linux/arch/arm64/kernel/cpufeature.c
<<
>>
Prefs
   1/*
   2 * Contains CPU feature definitions
   3 *
   4 * Copyright (C) 2015 ARM Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#define pr_fmt(fmt) "CPU features: " fmt
  20
  21#include <linux/bsearch.h>
  22#include <linux/cpumask.h>
  23#include <linux/sort.h>
  24#include <linux/stop_machine.h>
  25#include <linux/types.h>
  26#include <linux/mm.h>
  27#include <asm/cpu.h>
  28#include <asm/cpufeature.h>
  29#include <asm/cpu_ops.h>
  30#include <asm/fpsimd.h>
  31#include <asm/mmu_context.h>
  32#include <asm/processor.h>
  33#include <asm/sysreg.h>
  34#include <asm/traps.h>
  35#include <asm/virt.h>
  36
  37unsigned long elf_hwcap __read_mostly;
  38EXPORT_SYMBOL_GPL(elf_hwcap);
  39
  40#ifdef CONFIG_COMPAT
  41#define COMPAT_ELF_HWCAP_DEFAULT        \
  42                                (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
  43                                 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
  44                                 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
  45                                 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
  46                                 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
  47                                 COMPAT_HWCAP_LPAE)
  48unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
  49unsigned int compat_elf_hwcap2 __read_mostly;
  50#endif
  51
  52DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
  53EXPORT_SYMBOL(cpu_hwcaps);
  54
  55/*
  56 * Flag to indicate if we have computed the system wide
  57 * capabilities based on the boot time active CPUs. This
  58 * will be used to determine if a new booting CPU should
  59 * go through the verification process to make sure that it
  60 * supports the system capabilities, without using a hotplug
  61 * notifier.
  62 */
  63static bool sys_caps_initialised;
  64
  65static inline void set_sys_caps_initialised(void)
  66{
  67        sys_caps_initialised = true;
  68}
  69
  70static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
  71{
  72        /* file-wide pr_fmt adds "CPU features: " prefix */
  73        pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
  74        return 0;
  75}
  76
  77static struct notifier_block cpu_hwcaps_notifier = {
  78        .notifier_call = dump_cpu_hwcaps
  79};
  80
  81static int __init register_cpu_hwcaps_dumper(void)
  82{
  83        atomic_notifier_chain_register(&panic_notifier_list,
  84                                       &cpu_hwcaps_notifier);
  85        return 0;
  86}
  87__initcall(register_cpu_hwcaps_dumper);
  88
  89DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
  90EXPORT_SYMBOL(cpu_hwcap_keys);
  91
  92#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
  93        {                                               \
  94                .sign = SIGNED,                         \
  95                .visible = VISIBLE,                     \
  96                .strict = STRICT,                       \
  97                .type = TYPE,                           \
  98                .shift = SHIFT,                         \
  99                .width = WIDTH,                         \
 100                .safe_val = SAFE_VAL,                   \
 101        }
 102
 103/* Define a feature with unsigned values */
 104#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 105        __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
 106
 107/* Define a feature with a signed value */
 108#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 109        __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
 110
 111#define ARM64_FTR_END                                   \
 112        {                                               \
 113                .width = 0,                             \
 114        }
 115
 116/* meta feature for alternatives */
 117static bool __maybe_unused
 118cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
 119
 120
 121/*
 122 * NOTE: Any changes to the visibility of features should be kept in
 123 * sync with the documentation of the CPU feature register ABI.
 124 */
 125static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 126        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
 127        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
 128        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
 129        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
 130        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
 131        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
 132        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
 133        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
 134        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
 135        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
 136        ARM64_FTR_END,
 137};
 138
 139static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 140        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
 141        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
 142        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
 143        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
 144        ARM64_FTR_END,
 145};
 146
 147static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
 148        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
 149                                   FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
 150        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
 151        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
 152        S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
 153        /* Linux doesn't care about the EL3 */
 154        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
 155        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
 156        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
 157        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
 158        ARM64_FTR_END,
 159};
 160
 161static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
 162        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
 163        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
 164        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
 165        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
 166        /* Linux shouldn't care about secure memory */
 167        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
 168        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
 169        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
 170        /*
 171         * Differing PARange is fine as long as all peripherals and memory are mapped
 172         * within the minimum PARange of all CPUs
 173         */
 174        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
 175        ARM64_FTR_END,
 176};
 177
 178static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
 179        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
 180        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
 181        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
 182        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
 183        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
 184        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
 185        ARM64_FTR_END,
 186};
 187
 188static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
 189        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
 190        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
 191        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
 192        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
 193        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
 194        ARM64_FTR_END,
 195};
 196
 197static const struct arm64_ftr_bits ftr_ctr[] = {
 198        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1),   /* RAO */
 199        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0),     /* CWG */
 200        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),      /* ERG */
 201        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1),      /* DminLine */
 202        /*
 203         * Linux can handle differing I-cache policies. Userspace JITs will
 204         * make use of *minLine.
 205         * If we have differing I-cache policies, report it as the weakest - VIPT.
 206         */
 207        ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT),       /* L1Ip */
 208        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),       /* IminLine */
 209        ARM64_FTR_END,
 210};
 211
 212struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
 213        .name           = "SYS_CTR_EL0",
 214        .ftr_bits       = ftr_ctr
 215};
 216
 217static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
 218        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),   /* InnerShr */
 219        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),       /* FCSE */
 220        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),    /* AuxReg */
 221        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),       /* TCM */
 222        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),       /* ShareLvl */
 223        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),    /* OuterShr */
 224        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* PMSA */
 225        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),        /* VMSA */
 226        ARM64_FTR_END,
 227};
 228
 229static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
 230        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
 231        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
 232        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
 233        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
 234        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
 235        /*
 236         * We can instantiate multiple PMU instances with different levels
 237         * of support.
 238         */
 239        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
 240        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
 241        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
 242        ARM64_FTR_END,
 243};
 244
 245static const struct arm64_ftr_bits ftr_mvfr2[] = {
 246        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* FPMisc */
 247        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* SIMDMisc */
 248        ARM64_FTR_END,
 249};
 250
 251static const struct arm64_ftr_bits ftr_dczid[] = {
 252        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1),            /* DZP */
 253        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),       /* BS */
 254        ARM64_FTR_END,
 255};
 256
 257
 258static const struct arm64_ftr_bits ftr_id_isar5[] = {
 259        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
 260        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
 261        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
 262        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
 263        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
 264        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
 265        ARM64_FTR_END,
 266};
 267
 268static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
 269        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* ac2 */
 270        ARM64_FTR_END,
 271};
 272
 273static const struct arm64_ftr_bits ftr_id_pfr0[] = {
 274        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
 275        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
 276        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* State1 */
 277        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* State0 */
 278        ARM64_FTR_END,
 279};
 280
 281static const struct arm64_ftr_bits ftr_id_dfr0[] = {
 282        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
 283        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),   /* PerfMon */
 284        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
 285        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
 286        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
 287        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
 288        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
 289        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
 290        ARM64_FTR_END,
 291};
 292
 293static const struct arm64_ftr_bits ftr_zcr[] = {
 294        ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
 295                ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),        /* LEN */
 296        ARM64_FTR_END,
 297};
 298
 299/*
 300 * Common ftr bits for a 32bit register with all hidden, strict
 301 * attributes, with 4bit feature fields and a default safe value of
 302 * 0. Covers the following 32bit registers:
 303 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
 304 */
 305static const struct arm64_ftr_bits ftr_generic_32bits[] = {
 306        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
 307        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
 308        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
 309        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
 310        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
 311        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
 312        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
 313        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
 314        ARM64_FTR_END,
 315};
 316
 317/* Table for a single 32bit feature value */
 318static const struct arm64_ftr_bits ftr_single32[] = {
 319        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
 320        ARM64_FTR_END,
 321};
 322
 323static const struct arm64_ftr_bits ftr_raz[] = {
 324        ARM64_FTR_END,
 325};
 326
 327#define ARM64_FTR_REG(id, table) {              \
 328        .sys_id = id,                           \
 329        .reg =  &(struct arm64_ftr_reg){        \
 330                .name = #id,                    \
 331                .ftr_bits = &((table)[0]),      \
 332        }}
 333
 334static const struct __ftr_reg_entry {
 335        u32                     sys_id;
 336        struct arm64_ftr_reg    *reg;
 337} arm64_ftr_regs[] = {
 338
 339        /* Op1 = 0, CRn = 0, CRm = 1 */
 340        ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
 341        ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
 342        ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
 343        ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
 344        ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
 345        ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
 346        ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
 347
 348        /* Op1 = 0, CRn = 0, CRm = 2 */
 349        ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
 350        ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
 351        ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
 352        ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
 353        ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
 354        ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
 355        ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
 356
 357        /* Op1 = 0, CRn = 0, CRm = 3 */
 358        ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
 359        ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
 360        ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
 361
 362        /* Op1 = 0, CRn = 0, CRm = 4 */
 363        ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
 364        ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
 365        ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
 366
 367        /* Op1 = 0, CRn = 0, CRm = 5 */
 368        ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
 369        ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
 370
 371        /* Op1 = 0, CRn = 0, CRm = 6 */
 372        ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
 373        ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
 374
 375        /* Op1 = 0, CRn = 0, CRm = 7 */
 376        ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
 377        ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
 378        ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
 379
 380        /* Op1 = 0, CRn = 1, CRm = 2 */
 381        ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
 382
 383        /* Op1 = 3, CRn = 0, CRm = 0 */
 384        { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
 385        ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
 386
 387        /* Op1 = 3, CRn = 14, CRm = 0 */
 388        ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
 389};
 390
 391static int search_cmp_ftr_reg(const void *id, const void *regp)
 392{
 393        return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
 394}
 395
 396/*
 397 * get_arm64_ftr_reg - Lookup a feature register entry using its
 398 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
 399 * ascending order of sys_id , we use binary search to find a matching
 400 * entry.
 401 *
 402 * returns - Upon success,  matching ftr_reg entry for id.
 403 *         - NULL on failure. It is upto the caller to decide
 404 *           the impact of a failure.
 405 */
 406static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
 407{
 408        const struct __ftr_reg_entry *ret;
 409
 410        ret = bsearch((const void *)(unsigned long)sys_id,
 411                        arm64_ftr_regs,
 412                        ARRAY_SIZE(arm64_ftr_regs),
 413                        sizeof(arm64_ftr_regs[0]),
 414                        search_cmp_ftr_reg);
 415        if (ret)
 416                return ret->reg;
 417        return NULL;
 418}
 419
 420static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
 421                               s64 ftr_val)
 422{
 423        u64 mask = arm64_ftr_mask(ftrp);
 424
 425        reg &= ~mask;
 426        reg |= (ftr_val << ftrp->shift) & mask;
 427        return reg;
 428}
 429
 430static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
 431                                s64 cur)
 432{
 433        s64 ret = 0;
 434
 435        switch (ftrp->type) {
 436        case FTR_EXACT:
 437                ret = ftrp->safe_val;
 438                break;
 439        case FTR_LOWER_SAFE:
 440                ret = new < cur ? new : cur;
 441                break;
 442        case FTR_HIGHER_SAFE:
 443                ret = new > cur ? new : cur;
 444                break;
 445        default:
 446                BUG();
 447        }
 448
 449        return ret;
 450}
 451
 452static void __init sort_ftr_regs(void)
 453{
 454        int i;
 455
 456        /* Check that the array is sorted so that we can do the binary search */
 457        for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
 458                BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
 459}
 460
 461/*
 462 * Initialise the CPU feature register from Boot CPU values.
 463 * Also initiliases the strict_mask for the register.
 464 * Any bits that are not covered by an arm64_ftr_bits entry are considered
 465 * RES0 for the system-wide value, and must strictly match.
 466 */
 467static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
 468{
 469        u64 val = 0;
 470        u64 strict_mask = ~0x0ULL;
 471        u64 user_mask = 0;
 472        u64 valid_mask = 0;
 473
 474        const struct arm64_ftr_bits *ftrp;
 475        struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
 476
 477        BUG_ON(!reg);
 478
 479        for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
 480                u64 ftr_mask = arm64_ftr_mask(ftrp);
 481                s64 ftr_new = arm64_ftr_value(ftrp, new);
 482
 483                val = arm64_ftr_set_value(ftrp, val, ftr_new);
 484
 485                valid_mask |= ftr_mask;
 486                if (!ftrp->strict)
 487                        strict_mask &= ~ftr_mask;
 488                if (ftrp->visible)
 489                        user_mask |= ftr_mask;
 490                else
 491                        reg->user_val = arm64_ftr_set_value(ftrp,
 492                                                            reg->user_val,
 493                                                            ftrp->safe_val);
 494        }
 495
 496        val &= valid_mask;
 497
 498        reg->sys_val = val;
 499        reg->strict_mask = strict_mask;
 500        reg->user_mask = user_mask;
 501}
 502
 503void __init init_cpu_features(struct cpuinfo_arm64 *info)
 504{
 505        /* Before we start using the tables, make sure it is sorted */
 506        sort_ftr_regs();
 507
 508        init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
 509        init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
 510        init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
 511        init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
 512        init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
 513        init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
 514        init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
 515        init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
 516        init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
 517        init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
 518        init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
 519        init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
 520        init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
 521
 522        if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
 523                init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
 524                init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
 525                init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
 526                init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
 527                init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
 528                init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
 529                init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
 530                init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
 531                init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
 532                init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
 533                init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
 534                init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
 535                init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
 536                init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
 537                init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
 538                init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
 539        }
 540
 541        if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
 542                init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
 543                sve_init_vq_map();
 544        }
 545}
 546
 547static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
 548{
 549        const struct arm64_ftr_bits *ftrp;
 550
 551        for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
 552                s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
 553                s64 ftr_new = arm64_ftr_value(ftrp, new);
 554
 555                if (ftr_cur == ftr_new)
 556                        continue;
 557                /* Find a safe value */
 558                ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
 559                reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
 560        }
 561
 562}
 563
 564static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
 565{
 566        struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
 567
 568        BUG_ON(!regp);
 569        update_cpu_ftr_reg(regp, val);
 570        if ((boot & regp->strict_mask) == (val & regp->strict_mask))
 571                return 0;
 572        pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
 573                        regp->name, boot, cpu, val);
 574        return 1;
 575}
 576
 577/*
 578 * Update system wide CPU feature registers with the values from a
 579 * non-boot CPU. Also performs SANITY checks to make sure that there
 580 * aren't any insane variations from that of the boot CPU.
 581 */
 582void update_cpu_features(int cpu,
 583                         struct cpuinfo_arm64 *info,
 584                         struct cpuinfo_arm64 *boot)
 585{
 586        int taint = 0;
 587
 588        /*
 589         * The kernel can handle differing I-cache policies, but otherwise
 590         * caches should look identical. Userspace JITs will make use of
 591         * *minLine.
 592         */
 593        taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
 594                                      info->reg_ctr, boot->reg_ctr);
 595
 596        /*
 597         * Userspace may perform DC ZVA instructions. Mismatched block sizes
 598         * could result in too much or too little memory being zeroed if a
 599         * process is preempted and migrated between CPUs.
 600         */
 601        taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
 602                                      info->reg_dczid, boot->reg_dczid);
 603
 604        /* If different, timekeeping will be broken (especially with KVM) */
 605        taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
 606                                      info->reg_cntfrq, boot->reg_cntfrq);
 607
 608        /*
 609         * The kernel uses self-hosted debug features and expects CPUs to
 610         * support identical debug features. We presently need CTX_CMPs, WRPs,
 611         * and BRPs to be identical.
 612         * ID_AA64DFR1 is currently RES0.
 613         */
 614        taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
 615                                      info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
 616        taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
 617                                      info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
 618        /*
 619         * Even in big.LITTLE, processors should be identical instruction-set
 620         * wise.
 621         */
 622        taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
 623                                      info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
 624        taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
 625                                      info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
 626
 627        /*
 628         * Differing PARange support is fine as long as all peripherals and
 629         * memory are mapped within the minimum PARange of all CPUs.
 630         * Linux should not care about secure memory.
 631         */
 632        taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
 633                                      info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
 634        taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
 635                                      info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
 636        taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
 637                                      info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
 638
 639        /*
 640         * EL3 is not our concern.
 641         * ID_AA64PFR1 is currently RES0.
 642         */
 643        taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
 644                                      info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
 645        taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
 646                                      info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
 647
 648        taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
 649                                      info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
 650
 651        /*
 652         * If we have AArch32, we care about 32-bit features for compat.
 653         * If the system doesn't support AArch32, don't update them.
 654         */
 655        if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
 656                id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
 657
 658                taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
 659                                        info->reg_id_dfr0, boot->reg_id_dfr0);
 660                taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
 661                                        info->reg_id_isar0, boot->reg_id_isar0);
 662                taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
 663                                        info->reg_id_isar1, boot->reg_id_isar1);
 664                taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
 665                                        info->reg_id_isar2, boot->reg_id_isar2);
 666                taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
 667                                        info->reg_id_isar3, boot->reg_id_isar3);
 668                taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
 669                                        info->reg_id_isar4, boot->reg_id_isar4);
 670                taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
 671                                        info->reg_id_isar5, boot->reg_id_isar5);
 672
 673                /*
 674                 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
 675                 * ACTLR formats could differ across CPUs and therefore would have to
 676                 * be trapped for virtualization anyway.
 677                 */
 678                taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
 679                                        info->reg_id_mmfr0, boot->reg_id_mmfr0);
 680                taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
 681                                        info->reg_id_mmfr1, boot->reg_id_mmfr1);
 682                taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
 683                                        info->reg_id_mmfr2, boot->reg_id_mmfr2);
 684                taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
 685                                        info->reg_id_mmfr3, boot->reg_id_mmfr3);
 686                taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
 687                                        info->reg_id_pfr0, boot->reg_id_pfr0);
 688                taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
 689                                        info->reg_id_pfr1, boot->reg_id_pfr1);
 690                taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
 691                                        info->reg_mvfr0, boot->reg_mvfr0);
 692                taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
 693                                        info->reg_mvfr1, boot->reg_mvfr1);
 694                taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
 695                                        info->reg_mvfr2, boot->reg_mvfr2);
 696        }
 697
 698        if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
 699                taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
 700                                        info->reg_zcr, boot->reg_zcr);
 701
 702                /* Probe vector lengths, unless we already gave up on SVE */
 703                if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
 704                    !sys_caps_initialised)
 705                        sve_update_vq_map();
 706        }
 707
 708        /*
 709         * Mismatched CPU features are a recipe for disaster. Don't even
 710         * pretend to support them.
 711         */
 712        if (taint) {
 713                pr_warn_once("Unsupported CPU feature variation detected.\n");
 714                add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
 715        }
 716}
 717
 718u64 read_sanitised_ftr_reg(u32 id)
 719{
 720        struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
 721
 722        /* We shouldn't get a request for an unsupported register */
 723        BUG_ON(!regp);
 724        return regp->sys_val;
 725}
 726
 727#define read_sysreg_case(r)     \
 728        case r:         return read_sysreg_s(r)
 729
 730/*
 731 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
 732 * Read the system register on the current CPU
 733 */
 734static u64 __read_sysreg_by_encoding(u32 sys_id)
 735{
 736        switch (sys_id) {
 737        read_sysreg_case(SYS_ID_PFR0_EL1);
 738        read_sysreg_case(SYS_ID_PFR1_EL1);
 739        read_sysreg_case(SYS_ID_DFR0_EL1);
 740        read_sysreg_case(SYS_ID_MMFR0_EL1);
 741        read_sysreg_case(SYS_ID_MMFR1_EL1);
 742        read_sysreg_case(SYS_ID_MMFR2_EL1);
 743        read_sysreg_case(SYS_ID_MMFR3_EL1);
 744        read_sysreg_case(SYS_ID_ISAR0_EL1);
 745        read_sysreg_case(SYS_ID_ISAR1_EL1);
 746        read_sysreg_case(SYS_ID_ISAR2_EL1);
 747        read_sysreg_case(SYS_ID_ISAR3_EL1);
 748        read_sysreg_case(SYS_ID_ISAR4_EL1);
 749        read_sysreg_case(SYS_ID_ISAR5_EL1);
 750        read_sysreg_case(SYS_MVFR0_EL1);
 751        read_sysreg_case(SYS_MVFR1_EL1);
 752        read_sysreg_case(SYS_MVFR2_EL1);
 753
 754        read_sysreg_case(SYS_ID_AA64PFR0_EL1);
 755        read_sysreg_case(SYS_ID_AA64PFR1_EL1);
 756        read_sysreg_case(SYS_ID_AA64DFR0_EL1);
 757        read_sysreg_case(SYS_ID_AA64DFR1_EL1);
 758        read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
 759        read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
 760        read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
 761        read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
 762        read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
 763
 764        read_sysreg_case(SYS_CNTFRQ_EL0);
 765        read_sysreg_case(SYS_CTR_EL0);
 766        read_sysreg_case(SYS_DCZID_EL0);
 767
 768        default:
 769                BUG();
 770                return 0;
 771        }
 772}
 773
 774#include <linux/irqchip/arm-gic-v3.h>
 775
 776static bool
 777feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
 778{
 779        int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
 780
 781        return val >= entry->min_field_value;
 782}
 783
 784static bool
 785has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
 786{
 787        u64 val;
 788
 789        WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
 790        if (scope == SCOPE_SYSTEM)
 791                val = read_sanitised_ftr_reg(entry->sys_reg);
 792        else
 793                val = __read_sysreg_by_encoding(entry->sys_reg);
 794
 795        return feature_matches(val, entry);
 796}
 797
 798static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
 799{
 800        bool has_sre;
 801
 802        if (!has_cpuid_feature(entry, scope))
 803                return false;
 804
 805        has_sre = gic_enable_sre();
 806        if (!has_sre)
 807                pr_warn_once("%s present but disabled by higher exception level\n",
 808                             entry->desc);
 809
 810        return has_sre;
 811}
 812
 813static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
 814{
 815        u32 midr = read_cpuid_id();
 816
 817        /* Cavium ThunderX pass 1.x and 2.x */
 818        return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
 819                MIDR_CPU_VAR_REV(0, 0),
 820                MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
 821}
 822
 823static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
 824{
 825        return is_kernel_in_hyp_mode();
 826}
 827
 828static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
 829                           int __unused)
 830{
 831        phys_addr_t idmap_addr = __pa_symbol(__hyp_idmap_text_start);
 832
 833        /*
 834         * Activate the lower HYP offset only if:
 835         * - the idmap doesn't clash with it,
 836         * - the kernel is not running at EL2.
 837         */
 838        return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
 839}
 840
 841static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
 842{
 843        u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
 844
 845        return cpuid_feature_extract_signed_field(pfr0,
 846                                        ID_AA64PFR0_FP_SHIFT) < 0;
 847}
 848
 849static const struct arm64_cpu_capabilities arm64_features[] = {
 850        {
 851                .desc = "GIC system register CPU interface",
 852                .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
 853                .def_scope = SCOPE_SYSTEM,
 854                .matches = has_useable_gicv3_cpuif,
 855                .sys_reg = SYS_ID_AA64PFR0_EL1,
 856                .field_pos = ID_AA64PFR0_GIC_SHIFT,
 857                .sign = FTR_UNSIGNED,
 858                .min_field_value = 1,
 859        },
 860#ifdef CONFIG_ARM64_PAN
 861        {
 862                .desc = "Privileged Access Never",
 863                .capability = ARM64_HAS_PAN,
 864                .def_scope = SCOPE_SYSTEM,
 865                .matches = has_cpuid_feature,
 866                .sys_reg = SYS_ID_AA64MMFR1_EL1,
 867                .field_pos = ID_AA64MMFR1_PAN_SHIFT,
 868                .sign = FTR_UNSIGNED,
 869                .min_field_value = 1,
 870                .enable = cpu_enable_pan,
 871        },
 872#endif /* CONFIG_ARM64_PAN */
 873#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
 874        {
 875                .desc = "LSE atomic instructions",
 876                .capability = ARM64_HAS_LSE_ATOMICS,
 877                .def_scope = SCOPE_SYSTEM,
 878                .matches = has_cpuid_feature,
 879                .sys_reg = SYS_ID_AA64ISAR0_EL1,
 880                .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
 881                .sign = FTR_UNSIGNED,
 882                .min_field_value = 2,
 883        },
 884#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
 885        {
 886                .desc = "Software prefetching using PRFM",
 887                .capability = ARM64_HAS_NO_HW_PREFETCH,
 888                .def_scope = SCOPE_SYSTEM,
 889                .matches = has_no_hw_prefetch,
 890        },
 891#ifdef CONFIG_ARM64_UAO
 892        {
 893                .desc = "User Access Override",
 894                .capability = ARM64_HAS_UAO,
 895                .def_scope = SCOPE_SYSTEM,
 896                .matches = has_cpuid_feature,
 897                .sys_reg = SYS_ID_AA64MMFR2_EL1,
 898                .field_pos = ID_AA64MMFR2_UAO_SHIFT,
 899                .min_field_value = 1,
 900                /*
 901                 * We rely on stop_machine() calling uao_thread_switch() to set
 902                 * UAO immediately after patching.
 903                 */
 904        },
 905#endif /* CONFIG_ARM64_UAO */
 906#ifdef CONFIG_ARM64_PAN
 907        {
 908                .capability = ARM64_ALT_PAN_NOT_UAO,
 909                .def_scope = SCOPE_SYSTEM,
 910                .matches = cpufeature_pan_not_uao,
 911        },
 912#endif /* CONFIG_ARM64_PAN */
 913        {
 914                .desc = "Virtualization Host Extensions",
 915                .capability = ARM64_HAS_VIRT_HOST_EXTN,
 916                .def_scope = SCOPE_SYSTEM,
 917                .matches = runs_at_el2,
 918        },
 919        {
 920                .desc = "32-bit EL0 Support",
 921                .capability = ARM64_HAS_32BIT_EL0,
 922                .def_scope = SCOPE_SYSTEM,
 923                .matches = has_cpuid_feature,
 924                .sys_reg = SYS_ID_AA64PFR0_EL1,
 925                .sign = FTR_UNSIGNED,
 926                .field_pos = ID_AA64PFR0_EL0_SHIFT,
 927                .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
 928        },
 929        {
 930                .desc = "Reduced HYP mapping offset",
 931                .capability = ARM64_HYP_OFFSET_LOW,
 932                .def_scope = SCOPE_SYSTEM,
 933                .matches = hyp_offset_low,
 934        },
 935        {
 936                /* FP/SIMD is not implemented */
 937                .capability = ARM64_HAS_NO_FPSIMD,
 938                .def_scope = SCOPE_SYSTEM,
 939                .min_field_value = 0,
 940                .matches = has_no_fpsimd,
 941        },
 942#ifdef CONFIG_ARM64_PMEM
 943        {
 944                .desc = "Data cache clean to Point of Persistence",
 945                .capability = ARM64_HAS_DCPOP,
 946                .def_scope = SCOPE_SYSTEM,
 947                .matches = has_cpuid_feature,
 948                .sys_reg = SYS_ID_AA64ISAR1_EL1,
 949                .field_pos = ID_AA64ISAR1_DPB_SHIFT,
 950                .min_field_value = 1,
 951        },
 952#endif
 953#ifdef CONFIG_ARM64_SVE
 954        {
 955                .desc = "Scalable Vector Extension",
 956                .capability = ARM64_SVE,
 957                .def_scope = SCOPE_SYSTEM,
 958                .sys_reg = SYS_ID_AA64PFR0_EL1,
 959                .sign = FTR_UNSIGNED,
 960                .field_pos = ID_AA64PFR0_SVE_SHIFT,
 961                .min_field_value = ID_AA64PFR0_SVE,
 962                .matches = has_cpuid_feature,
 963                .enable = sve_kernel_enable,
 964        },
 965#endif /* CONFIG_ARM64_SVE */
 966        {},
 967};
 968
 969#define HWCAP_CAP(reg, field, s, min_value, type, cap)  \
 970        {                                                       \
 971                .desc = #cap,                                   \
 972                .def_scope = SCOPE_SYSTEM,                      \
 973                .matches = has_cpuid_feature,                   \
 974                .sys_reg = reg,                                 \
 975                .field_pos = field,                             \
 976                .sign = s,                                      \
 977                .min_field_value = min_value,                   \
 978                .hwcap_type = type,                             \
 979                .hwcap = cap,                                   \
 980        }
 981
 982static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
 983        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
 984        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
 985        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
 986        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
 987        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
 988        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
 989        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
 990        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
 991        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
 992        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
 993        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
 994        HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
 995        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
 996        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
 997        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
 998        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
 999        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
1000        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
1001        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
1002        HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
1003#ifdef CONFIG_ARM64_SVE
1004        HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
1005#endif
1006        {},
1007};
1008
1009static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1010#ifdef CONFIG_COMPAT
1011        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1012        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1013        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1014        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1015        HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1016#endif
1017        {},
1018};
1019
1020static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1021{
1022        switch (cap->hwcap_type) {
1023        case CAP_HWCAP:
1024                elf_hwcap |= cap->hwcap;
1025                break;
1026#ifdef CONFIG_COMPAT
1027        case CAP_COMPAT_HWCAP:
1028                compat_elf_hwcap |= (u32)cap->hwcap;
1029                break;
1030        case CAP_COMPAT_HWCAP2:
1031                compat_elf_hwcap2 |= (u32)cap->hwcap;
1032                break;
1033#endif
1034        default:
1035                WARN_ON(1);
1036                break;
1037        }
1038}
1039
1040/* Check if we have a particular HWCAP enabled */
1041static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1042{
1043        bool rc;
1044
1045        switch (cap->hwcap_type) {
1046        case CAP_HWCAP:
1047                rc = (elf_hwcap & cap->hwcap) != 0;
1048                break;
1049#ifdef CONFIG_COMPAT
1050        case CAP_COMPAT_HWCAP:
1051                rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1052                break;
1053        case CAP_COMPAT_HWCAP2:
1054                rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1055                break;
1056#endif
1057        default:
1058                WARN_ON(1);
1059                rc = false;
1060        }
1061
1062        return rc;
1063}
1064
1065static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1066{
1067        /* We support emulation of accesses to CPU ID feature registers */
1068        elf_hwcap |= HWCAP_CPUID;
1069        for (; hwcaps->matches; hwcaps++)
1070                if (hwcaps->matches(hwcaps, hwcaps->def_scope))
1071                        cap_set_elf_hwcap(hwcaps);
1072}
1073
1074void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1075                            const char *info)
1076{
1077        for (; caps->matches; caps++) {
1078                if (!caps->matches(caps, caps->def_scope))
1079                        continue;
1080
1081                if (!cpus_have_cap(caps->capability) && caps->desc)
1082                        pr_info("%s %s\n", info, caps->desc);
1083                cpus_set_cap(caps->capability);
1084        }
1085}
1086
1087/*
1088 * Run through the enabled capabilities and enable() it on all active
1089 * CPUs
1090 */
1091void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
1092{
1093        for (; caps->matches; caps++) {
1094                unsigned int num = caps->capability;
1095
1096                if (!cpus_have_cap(num))
1097                        continue;
1098
1099                /* Ensure cpus_have_const_cap(num) works */
1100                static_branch_enable(&cpu_hwcap_keys[num]);
1101
1102                if (caps->enable) {
1103                        /*
1104                         * Use stop_machine() as it schedules the work allowing
1105                         * us to modify PSTATE, instead of on_each_cpu() which
1106                         * uses an IPI, giving us a PSTATE that disappears when
1107                         * we return.
1108                         */
1109                        stop_machine(caps->enable, NULL, cpu_online_mask);
1110                }
1111        }
1112}
1113
1114/*
1115 * Check for CPU features that are used in early boot
1116 * based on the Boot CPU value.
1117 */
1118static void check_early_cpu_features(void)
1119{
1120        verify_cpu_run_el();
1121        verify_cpu_asid_bits();
1122}
1123
1124static void
1125verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1126{
1127
1128        for (; caps->matches; caps++)
1129                if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1130                        pr_crit("CPU%d: missing HWCAP: %s\n",
1131                                        smp_processor_id(), caps->desc);
1132                        cpu_die_early();
1133                }
1134}
1135
1136static void
1137verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
1138{
1139        for (; caps->matches; caps++) {
1140                if (!cpus_have_cap(caps->capability))
1141                        continue;
1142                /*
1143                 * If the new CPU misses an advertised feature, we cannot proceed
1144                 * further, park the cpu.
1145                 */
1146                if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
1147                        pr_crit("CPU%d: missing feature: %s\n",
1148                                        smp_processor_id(), caps->desc);
1149                        cpu_die_early();
1150                }
1151                if (caps->enable)
1152                        caps->enable(NULL);
1153        }
1154}
1155
1156static void verify_sve_features(void)
1157{
1158        u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
1159        u64 zcr = read_zcr_features();
1160
1161        unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
1162        unsigned int len = zcr & ZCR_ELx_LEN_MASK;
1163
1164        if (len < safe_len || sve_verify_vq_map()) {
1165                pr_crit("CPU%d: SVE: required vector length(s) missing\n",
1166                        smp_processor_id());
1167                cpu_die_early();
1168        }
1169
1170        /* Add checks on other ZCR bits here if necessary */
1171}
1172
1173/*
1174 * Run through the enabled system capabilities and enable() it on this CPU.
1175 * The capabilities were decided based on the available CPUs at the boot time.
1176 * Any new CPU should match the system wide status of the capability. If the
1177 * new CPU doesn't have a capability which the system now has enabled, we
1178 * cannot do anything to fix it up and could cause unexpected failures. So
1179 * we park the CPU.
1180 */
1181static void verify_local_cpu_capabilities(void)
1182{
1183        verify_local_cpu_errata_workarounds();
1184        verify_local_cpu_features(arm64_features);
1185        verify_local_elf_hwcaps(arm64_elf_hwcaps);
1186
1187        if (system_supports_32bit_el0())
1188                verify_local_elf_hwcaps(compat_elf_hwcaps);
1189
1190        if (system_supports_sve())
1191                verify_sve_features();
1192}
1193
1194void check_local_cpu_capabilities(void)
1195{
1196        /*
1197         * All secondary CPUs should conform to the early CPU features
1198         * in use by the kernel based on boot CPU.
1199         */
1200        check_early_cpu_features();
1201
1202        /*
1203         * If we haven't finalised the system capabilities, this CPU gets
1204         * a chance to update the errata work arounds.
1205         * Otherwise, this CPU should verify that it has all the system
1206         * advertised capabilities.
1207         */
1208        if (!sys_caps_initialised)
1209                update_cpu_errata_workarounds();
1210        else
1211                verify_local_cpu_capabilities();
1212}
1213
1214static void __init setup_feature_capabilities(void)
1215{
1216        update_cpu_capabilities(arm64_features, "detected feature:");
1217        enable_cpu_capabilities(arm64_features);
1218}
1219
1220DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1221EXPORT_SYMBOL(arm64_const_caps_ready);
1222
1223static void __init mark_const_caps_ready(void)
1224{
1225        static_branch_enable(&arm64_const_caps_ready);
1226}
1227
1228/*
1229 * Check if the current CPU has a given feature capability.
1230 * Should be called from non-preemptible context.
1231 */
1232static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1233                               unsigned int cap)
1234{
1235        const struct arm64_cpu_capabilities *caps;
1236
1237        if (WARN_ON(preemptible()))
1238                return false;
1239
1240        for (caps = cap_array; caps->desc; caps++)
1241                if (caps->capability == cap && caps->matches)
1242                        return caps->matches(caps, SCOPE_LOCAL_CPU);
1243
1244        return false;
1245}
1246
1247extern const struct arm64_cpu_capabilities arm64_errata[];
1248
1249bool this_cpu_has_cap(unsigned int cap)
1250{
1251        return (__this_cpu_has_cap(arm64_features, cap) ||
1252                __this_cpu_has_cap(arm64_errata, cap));
1253}
1254
1255void __init setup_cpu_features(void)
1256{
1257        u32 cwg;
1258        int cls;
1259
1260        /* Set the CPU feature capabilies */
1261        setup_feature_capabilities();
1262        enable_errata_workarounds();
1263        mark_const_caps_ready();
1264        setup_elf_hwcaps(arm64_elf_hwcaps);
1265
1266        if (system_supports_32bit_el0())
1267                setup_elf_hwcaps(compat_elf_hwcaps);
1268
1269        sve_setup();
1270
1271        /* Advertise that we have computed the system capabilities */
1272        set_sys_caps_initialised();
1273
1274        /*
1275         * Check for sane CTR_EL0.CWG value.
1276         */
1277        cwg = cache_type_cwg();
1278        cls = cache_line_size();
1279        if (!cwg)
1280                pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
1281                        cls);
1282        if (L1_CACHE_BYTES < cls)
1283                pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
1284                        L1_CACHE_BYTES, cls);
1285}
1286
1287static bool __maybe_unused
1288cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1289{
1290        return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
1291}
1292
1293/*
1294 * We emulate only the following system register space.
1295 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
1296 * See Table C5-6 System instruction encodings for System register accesses,
1297 * ARMv8 ARM(ARM DDI 0487A.f) for more details.
1298 */
1299static inline bool __attribute_const__ is_emulated(u32 id)
1300{
1301        return (sys_reg_Op0(id) == 0x3 &&
1302                sys_reg_CRn(id) == 0x0 &&
1303                sys_reg_Op1(id) == 0x0 &&
1304                (sys_reg_CRm(id) == 0 ||
1305                 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
1306}
1307
1308/*
1309 * With CRm == 0, reg should be one of :
1310 * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
1311 */
1312static inline int emulate_id_reg(u32 id, u64 *valp)
1313{
1314        switch (id) {
1315        case SYS_MIDR_EL1:
1316                *valp = read_cpuid_id();
1317                break;
1318        case SYS_MPIDR_EL1:
1319                *valp = SYS_MPIDR_SAFE_VAL;
1320                break;
1321        case SYS_REVIDR_EL1:
1322                /* IMPLEMENTATION DEFINED values are emulated with 0 */
1323                *valp = 0;
1324                break;
1325        default:
1326                return -EINVAL;
1327        }
1328
1329        return 0;
1330}
1331
1332static int emulate_sys_reg(u32 id, u64 *valp)
1333{
1334        struct arm64_ftr_reg *regp;
1335
1336        if (!is_emulated(id))
1337                return -EINVAL;
1338
1339        if (sys_reg_CRm(id) == 0)
1340                return emulate_id_reg(id, valp);
1341
1342        regp = get_arm64_ftr_reg(id);
1343        if (regp)
1344                *valp = arm64_ftr_reg_user_value(regp);
1345        else
1346                /*
1347                 * The untracked registers are either IMPLEMENTATION DEFINED
1348                 * (e.g, ID_AFR0_EL1) or reserved RAZ.
1349                 */
1350                *valp = 0;
1351        return 0;
1352}
1353
1354static int emulate_mrs(struct pt_regs *regs, u32 insn)
1355{
1356        int rc;
1357        u32 sys_reg, dst;
1358        u64 val;
1359
1360        /*
1361         * sys_reg values are defined as used in mrs/msr instruction.
1362         * shift the imm value to get the encoding.
1363         */
1364        sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
1365        rc = emulate_sys_reg(sys_reg, &val);
1366        if (!rc) {
1367                dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
1368                pt_regs_write_reg(regs, dst, val);
1369                arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1370        }
1371
1372        return rc;
1373}
1374
1375static struct undef_hook mrs_hook = {
1376        .instr_mask = 0xfff00000,
1377        .instr_val  = 0xd5300000,
1378        .pstate_mask = COMPAT_PSR_MODE_MASK,
1379        .pstate_val = PSR_MODE_EL0t,
1380        .fn = emulate_mrs,
1381};
1382
1383static int __init enable_mrs_emulation(void)
1384{
1385        register_undef_hook(&mrs_hook);
1386        return 0;
1387}
1388
1389core_initcall(enable_mrs_emulation);
1390