linux/arch/arm64/include/asm/cpufeature.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#ifndef __ASM_CPUFEATURE_H
  10#define __ASM_CPUFEATURE_H
  11
  12#include <asm/cpucaps.h>
  13#include <asm/hwcap.h>
  14#include <asm/sysreg.h>
  15
  16/*
  17 * In the arm64 world (as in the ARM world), elf_hwcap is used both internally
  18 * in the kernel and for user space to keep track of which optional features
  19 * are supported by the current system. So let's map feature 'x' to HWCAP_x.
  20 * Note that HWCAP_x constants are bit fields so we need to take the log.
  21 */
  22
  23#define MAX_CPU_FEATURES        (8 * sizeof(elf_hwcap))
  24#define cpu_feature(x)          ilog2(HWCAP_ ## x)
  25
  26#ifndef __ASSEMBLY__
  27
  28#include <linux/bug.h>
  29#include <linux/jump_label.h>
  30#include <linux/kernel.h>
  31
  32/*
  33 * CPU feature register tracking
  34 *
  35 * The safe value of a CPUID feature field is dependent on the implications
  36 * of the values assigned to it by the architecture. Based on the relationship
  37 * between the values, the features are classified into 3 types - LOWER_SAFE,
  38 * HIGHER_SAFE and EXACT.
  39 *
  40 * The lowest value of all the CPUs is chosen for LOWER_SAFE and highest
  41 * for HIGHER_SAFE. It is expected that all CPUs have the same value for
  42 * a field when EXACT is specified, failing which, the safe value specified
  43 * in the table is chosen.
  44 */
  45
  46enum ftr_type {
  47        FTR_EXACT,      /* Use a predefined safe value */
  48        FTR_LOWER_SAFE, /* Smaller value is safe */
  49        FTR_HIGHER_SAFE,/* Bigger value is safe */
  50};
  51
  52#define FTR_STRICT      true    /* SANITY check strict matching required */
  53#define FTR_NONSTRICT   false   /* SANITY check ignored */
  54
  55#define FTR_SIGNED      true    /* Value should be treated as signed */
  56#define FTR_UNSIGNED    false   /* Value should be treated as unsigned */
  57
  58#define FTR_VISIBLE     true    /* Feature visible to the user space */
  59#define FTR_HIDDEN      false   /* Feature is hidden from the user */
  60
  61struct arm64_ftr_bits {
  62        bool            sign;   /* Value is signed ? */
  63        bool            visible;
  64        bool            strict; /* CPU Sanity check: strict matching required ? */
  65        enum ftr_type   type;
  66        u8              shift;
  67        u8              width;
  68        s64             safe_val; /* safe value for FTR_EXACT features */
  69};
  70
  71/*
  72 * @arm64_ftr_reg - Feature register
  73 * @strict_mask         Bits which should match across all CPUs for sanity.
  74 * @sys_val             Safe value across the CPUs (system view)
  75 */
  76struct arm64_ftr_reg {
  77        const char                      *name;
  78        u64                             strict_mask;
  79        u64                             user_mask;
  80        u64                             sys_val;
  81        u64                             user_val;
  82        const struct arm64_ftr_bits     *ftr_bits;
  83};
  84
  85extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
  86
  87/* scope of capability check */
  88enum {
  89        SCOPE_SYSTEM,
  90        SCOPE_LOCAL_CPU,
  91};
  92
  93struct arm64_cpu_capabilities {
  94        const char *desc;
  95        u16 capability;
  96        int def_scope;                  /* default scope */
  97        bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
  98        int (*enable)(void *);          /* Called on all active CPUs */
  99        union {
 100                struct {        /* To be used for erratum handling only */
 101                        u32 midr_model;
 102                        u32 midr_range_min, midr_range_max;
 103                };
 104
 105                struct {        /* Feature register checking */
 106                        u32 sys_reg;
 107                        u8 field_pos;
 108                        u8 min_field_value;
 109                        u8 hwcap_type;
 110                        bool sign;
 111                        unsigned long hwcap;
 112                };
 113        };
 114};
 115
 116extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 117extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
 118extern struct static_key_false arm64_const_caps_ready;
 119
 120bool this_cpu_has_cap(unsigned int cap);
 121
 122static inline bool cpu_have_feature(unsigned int num)
 123{
 124        return elf_hwcap & (1UL << num);
 125}
 126
 127/* System capability check for constant caps */
 128static inline bool __cpus_have_const_cap(int num)
 129{
 130        if (num >= ARM64_NCAPS)
 131                return false;
 132        return static_branch_unlikely(&cpu_hwcap_keys[num]);
 133}
 134
 135static inline bool cpus_have_cap(unsigned int num)
 136{
 137        if (num >= ARM64_NCAPS)
 138                return false;
 139        return test_bit(num, cpu_hwcaps);
 140}
 141
 142static inline bool cpus_have_const_cap(int num)
 143{
 144        if (static_branch_likely(&arm64_const_caps_ready))
 145                return __cpus_have_const_cap(num);
 146        else
 147                return cpus_have_cap(num);
 148}
 149
 150static inline void cpus_set_cap(unsigned int num)
 151{
 152        if (num >= ARM64_NCAPS) {
 153                pr_warn("Attempt to set an illegal CPU capability (%d >= %d)\n",
 154                        num, ARM64_NCAPS);
 155        } else {
 156                __set_bit(num, cpu_hwcaps);
 157        }
 158}
 159
 160static inline int __attribute_const__
 161cpuid_feature_extract_signed_field_width(u64 features, int field, int width)
 162{
 163        return (s64)(features << (64 - width - field)) >> (64 - width);
 164}
 165
 166static inline int __attribute_const__
 167cpuid_feature_extract_signed_field(u64 features, int field)
 168{
 169        return cpuid_feature_extract_signed_field_width(features, field, 4);
 170}
 171
 172static inline unsigned int __attribute_const__
 173cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
 174{
 175        return (u64)(features << (64 - width - field)) >> (64 - width);
 176}
 177
 178static inline unsigned int __attribute_const__
 179cpuid_feature_extract_unsigned_field(u64 features, int field)
 180{
 181        return cpuid_feature_extract_unsigned_field_width(features, field, 4);
 182}
 183
 184static inline u64 arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
 185{
 186        return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
 187}
 188
 189static inline u64 arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
 190{
 191        return (reg->user_val | (reg->sys_val & reg->user_mask));
 192}
 193
 194static inline int __attribute_const__
 195cpuid_feature_extract_field_width(u64 features, int field, int width, bool sign)
 196{
 197        return (sign) ?
 198                cpuid_feature_extract_signed_field_width(features, field, width) :
 199                cpuid_feature_extract_unsigned_field_width(features, field, width);
 200}
 201
 202static inline int __attribute_const__
 203cpuid_feature_extract_field(u64 features, int field, bool sign)
 204{
 205        return cpuid_feature_extract_field_width(features, field, 4, sign);
 206}
 207
 208static inline s64 arm64_ftr_value(const struct arm64_ftr_bits *ftrp, u64 val)
 209{
 210        return (s64)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
 211}
 212
 213static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
 214{
 215        return cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL_SHIFT) == 0x1 ||
 216                cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
 217}
 218
 219static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
 220{
 221        u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
 222
 223        return val == ID_AA64PFR0_EL0_32BIT_64BIT;
 224}
 225
 226void __init setup_cpu_features(void);
 227
 228void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
 229                            const char *info);
 230void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
 231void check_local_cpu_capabilities(void);
 232
 233void update_cpu_errata_workarounds(void);
 234void __init enable_errata_workarounds(void);
 235void verify_local_cpu_errata_workarounds(void);
 236
 237u64 read_sanitised_ftr_reg(u32 id);
 238
 239static inline bool cpu_supports_mixed_endian_el0(void)
 240{
 241        return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
 242}
 243
 244static inline bool system_supports_32bit_el0(void)
 245{
 246        return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
 247}
 248
 249static inline bool system_supports_mixed_endian_el0(void)
 250{
 251        return id_aa64mmfr0_mixed_endian_el0(read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1));
 252}
 253
 254static inline bool system_supports_fpsimd(void)
 255{
 256        return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
 257}
 258
 259static inline bool system_uses_ttbr0_pan(void)
 260{
 261        return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
 262                !cpus_have_const_cap(ARM64_HAS_PAN);
 263}
 264
 265#endif /* __ASSEMBLY__ */
 266
 267#endif
 268