1
2
3
4
5
6
7
8#include <linux/arm-smccc.h>
9#include <linux/psci.h>
10#include <linux/types.h>
11#include <linux/cpu.h>
12#include <asm/cpu.h>
13#include <asm/cputype.h>
14#include <asm/cpufeature.h>
15
16static bool __maybe_unused
17is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
18{
19 const struct arm64_midr_revidr *fix;
20 u32 midr = read_cpuid_id(), revidr;
21
22 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
23 if (!is_midr_in_range(midr, &entry->midr_range))
24 return false;
25
26 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
27 revidr = read_cpuid(REVIDR_EL1);
28 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
29 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
30 return false;
31
32 return true;
33}
34
35static bool __maybe_unused
36is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
37 int scope)
38{
39 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
40 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
41}
42
43static bool __maybe_unused
44is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
45{
46 u32 model;
47
48 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
49
50 model = read_cpuid_id();
51 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
52 MIDR_ARCHITECTURE_MASK;
53
54 return model == entry->midr_range.model;
55}
56
57static bool
58has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
59 int scope)
60{
61 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
62 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
63 u64 ctr_raw, ctr_real;
64
65 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83 ctr_raw = read_cpuid_cachetype() & mask;
84 ctr_real = read_cpuid_effective_cachetype() & mask;
85
86 return (ctr_real != sys) && (ctr_raw != sys);
87}
88
89static void
90cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
91{
92 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
93
94
95 if ((read_cpuid_cachetype() & mask) !=
96 (arm64_ftr_reg_ctrel0.sys_val & mask))
97 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
98}
99
100atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
101
102#include <asm/mmu_context.h>
103#include <asm/cacheflush.h>
104
105DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
106
107#ifdef CONFIG_KVM_INDIRECT_VECTORS
108extern char __smccc_workaround_1_smc_start[];
109extern char __smccc_workaround_1_smc_end[];
110
111static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
112 const char *hyp_vecs_end)
113{
114 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
115 int i;
116
117 for (i = 0; i < SZ_2K; i += 0x80)
118 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
119
120 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
121}
122
123static void install_bp_hardening_cb(bp_hardening_cb_t fn,
124 const char *hyp_vecs_start,
125 const char *hyp_vecs_end)
126{
127 static DEFINE_RAW_SPINLOCK(bp_lock);
128 int cpu, slot = -1;
129
130
131
132
133
134 if (!hyp_vecs_start) {
135 __this_cpu_write(bp_hardening_data.fn, fn);
136 return;
137 }
138
139 raw_spin_lock(&bp_lock);
140 for_each_possible_cpu(cpu) {
141 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
142 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
143 break;
144 }
145 }
146
147 if (slot == -1) {
148 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
149 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
150 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
151 }
152
153 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
154 __this_cpu_write(bp_hardening_data.fn, fn);
155 raw_spin_unlock(&bp_lock);
156}
157#else
158#define __smccc_workaround_1_smc_start NULL
159#define __smccc_workaround_1_smc_end NULL
160
161static void install_bp_hardening_cb(bp_hardening_cb_t fn,
162 const char *hyp_vecs_start,
163 const char *hyp_vecs_end)
164{
165 __this_cpu_write(bp_hardening_data.fn, fn);
166}
167#endif
168
169#include <uapi/linux/psci.h>
170#include <linux/arm-smccc.h>
171#include <linux/psci.h>
172
173static void call_smc_arch_workaround_1(void)
174{
175 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
176}
177
178static void call_hvc_arch_workaround_1(void)
179{
180 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
181}
182
183static void qcom_link_stack_sanitization(void)
184{
185 u64 tmp;
186
187 asm volatile("mov %0, x30 \n"
188 ".rept 16 \n"
189 "bl . + 4 \n"
190 ".endr \n"
191 "mov x30, %0 \n"
192 : "=&r" (tmp));
193}
194
195static bool __nospectre_v2;
196static int __init parse_nospectre_v2(char *str)
197{
198 __nospectre_v2 = true;
199 return 0;
200}
201early_param("nospectre_v2", parse_nospectre_v2);
202
203
204
205
206
207
208static int detect_harden_bp_fw(void)
209{
210 bp_hardening_cb_t cb;
211 void *smccc_start, *smccc_end;
212 struct arm_smccc_res res;
213 u32 midr = read_cpuid_id();
214
215 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
216 return -1;
217
218 switch (psci_ops.conduit) {
219 case PSCI_CONDUIT_HVC:
220 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
221 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
222 switch ((int)res.a0) {
223 case 1:
224
225 return 0;
226 case 0:
227 cb = call_hvc_arch_workaround_1;
228
229 smccc_start = NULL;
230 smccc_end = NULL;
231 break;
232 default:
233 return -1;
234 }
235 break;
236
237 case PSCI_CONDUIT_SMC:
238 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
239 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
240 switch ((int)res.a0) {
241 case 1:
242
243 return 0;
244 case 0:
245 cb = call_smc_arch_workaround_1;
246 smccc_start = __smccc_workaround_1_smc_start;
247 smccc_end = __smccc_workaround_1_smc_end;
248 break;
249 default:
250 return -1;
251 }
252 break;
253
254 default:
255 return -1;
256 }
257
258 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
259 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
260 cb = qcom_link_stack_sanitization;
261
262 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
263 install_bp_hardening_cb(cb, smccc_start, smccc_end);
264
265 return 1;
266}
267
268DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
269
270int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
271static bool __ssb_safe = true;
272
273static const struct ssbd_options {
274 const char *str;
275 int state;
276} ssbd_options[] = {
277 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
278 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
279 { "kernel", ARM64_SSBD_KERNEL, },
280};
281
282static int __init ssbd_cfg(char *buf)
283{
284 int i;
285
286 if (!buf || !buf[0])
287 return -EINVAL;
288
289 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
290 int len = strlen(ssbd_options[i].str);
291
292 if (strncmp(buf, ssbd_options[i].str, len))
293 continue;
294
295 ssbd_state = ssbd_options[i].state;
296 return 0;
297 }
298
299 return -EINVAL;
300}
301early_param("ssbd", ssbd_cfg);
302
303void __init arm64_update_smccc_conduit(struct alt_instr *alt,
304 __le32 *origptr, __le32 *updptr,
305 int nr_inst)
306{
307 u32 insn;
308
309 BUG_ON(nr_inst != 1);
310
311 switch (psci_ops.conduit) {
312 case PSCI_CONDUIT_HVC:
313 insn = aarch64_insn_get_hvc_value();
314 break;
315 case PSCI_CONDUIT_SMC:
316 insn = aarch64_insn_get_smc_value();
317 break;
318 default:
319 return;
320 }
321
322 *updptr = cpu_to_le32(insn);
323}
324
325void __init arm64_enable_wa2_handling(struct alt_instr *alt,
326 __le32 *origptr, __le32 *updptr,
327 int nr_inst)
328{
329 BUG_ON(nr_inst != 1);
330
331
332
333
334
335 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
336 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
337}
338
339void arm64_set_ssbd_mitigation(bool state)
340{
341 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
342 pr_info_once("SSBD disabled by kernel configuration\n");
343 return;
344 }
345
346 if (this_cpu_has_cap(ARM64_SSBS)) {
347 if (state)
348 asm volatile(SET_PSTATE_SSBS(0));
349 else
350 asm volatile(SET_PSTATE_SSBS(1));
351 return;
352 }
353
354 switch (psci_ops.conduit) {
355 case PSCI_CONDUIT_HVC:
356 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
357 break;
358
359 case PSCI_CONDUIT_SMC:
360 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
361 break;
362
363 default:
364 WARN_ON_ONCE(1);
365 break;
366 }
367}
368
369static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
370 int scope)
371{
372 struct arm_smccc_res res;
373 bool required = true;
374 s32 val;
375 bool this_cpu_safe = false;
376
377 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
378
379 if (cpu_mitigations_off())
380 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
381
382
383 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
384 this_cpu_safe = true;
385
386 if (this_cpu_has_cap(ARM64_SSBS)) {
387 if (!this_cpu_safe)
388 __ssb_safe = false;
389 required = false;
390 goto out_printmsg;
391 }
392
393 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
394 ssbd_state = ARM64_SSBD_UNKNOWN;
395 if (!this_cpu_safe)
396 __ssb_safe = false;
397 return false;
398 }
399
400 switch (psci_ops.conduit) {
401 case PSCI_CONDUIT_HVC:
402 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
403 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
404 break;
405
406 case PSCI_CONDUIT_SMC:
407 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
408 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
409 break;
410
411 default:
412 ssbd_state = ARM64_SSBD_UNKNOWN;
413 if (!this_cpu_safe)
414 __ssb_safe = false;
415 return false;
416 }
417
418 val = (s32)res.a0;
419
420 switch (val) {
421 case SMCCC_RET_NOT_SUPPORTED:
422 ssbd_state = ARM64_SSBD_UNKNOWN;
423 if (!this_cpu_safe)
424 __ssb_safe = false;
425 return false;
426
427
428 case SMCCC_RET_NOT_REQUIRED:
429 pr_info_once("%s mitigation not required\n", entry->desc);
430 ssbd_state = ARM64_SSBD_MITIGATED;
431 return false;
432
433 case SMCCC_RET_SUCCESS:
434 __ssb_safe = false;
435 required = true;
436 break;
437
438 case 1:
439 required = false;
440 break;
441
442 default:
443 WARN_ON(1);
444 if (!this_cpu_safe)
445 __ssb_safe = false;
446 return false;
447 }
448
449 switch (ssbd_state) {
450 case ARM64_SSBD_FORCE_DISABLE:
451 arm64_set_ssbd_mitigation(false);
452 required = false;
453 break;
454
455 case ARM64_SSBD_KERNEL:
456 if (required) {
457 __this_cpu_write(arm64_ssbd_callback_required, 1);
458 arm64_set_ssbd_mitigation(true);
459 }
460 break;
461
462 case ARM64_SSBD_FORCE_ENABLE:
463 arm64_set_ssbd_mitigation(true);
464 required = true;
465 break;
466
467 default:
468 WARN_ON(1);
469 break;
470 }
471
472out_printmsg:
473 switch (ssbd_state) {
474 case ARM64_SSBD_FORCE_DISABLE:
475 pr_info_once("%s disabled from command-line\n", entry->desc);
476 break;
477
478 case ARM64_SSBD_FORCE_ENABLE:
479 pr_info_once("%s forced from command-line\n", entry->desc);
480 break;
481 }
482
483 return required;
484}
485
486
487static const struct midr_range arm64_ssb_cpus[] = {
488 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
489 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
490 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
491 {},
492};
493
494#ifdef CONFIG_ARM64_ERRATUM_1463225
495DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
496
497static bool
498has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
499 int scope)
500{
501 u32 midr = read_cpuid_id();
502
503 struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
504
505 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
506 return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
507}
508#endif
509
510static void __maybe_unused
511cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
512{
513 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
514}
515
516#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
517 .matches = is_affected_midr_range, \
518 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
519
520#define CAP_MIDR_ALL_VERSIONS(model) \
521 .matches = is_affected_midr_range, \
522 .midr_range = MIDR_ALL_VERSIONS(model)
523
524#define MIDR_FIXED(rev, revidr_mask) \
525 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
526
527#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
528 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
529 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
530
531#define CAP_MIDR_RANGE_LIST(list) \
532 .matches = is_affected_midr_range_list, \
533 .midr_range_list = list
534
535
536#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
537 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
538
539
540#define ERRATA_MIDR_REV(model, var, rev) \
541 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
542
543
544#define ERRATA_MIDR_ALL_VERSIONS(model) \
545 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
546 CAP_MIDR_ALL_VERSIONS(model)
547
548
549#define ERRATA_MIDR_RANGE_LIST(midr_list) \
550 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
551 CAP_MIDR_RANGE_LIST(midr_list)
552
553
554static bool __hardenbp_enab = true;
555static bool __spectrev2_safe = true;
556
557int get_spectre_v2_workaround_state(void)
558{
559 if (__spectrev2_safe)
560 return ARM64_BP_HARDEN_NOT_REQUIRED;
561
562 if (!__hardenbp_enab)
563 return ARM64_BP_HARDEN_UNKNOWN;
564
565 return ARM64_BP_HARDEN_WA_NEEDED;
566}
567
568
569
570
571static const struct midr_range spectre_v2_safe_list[] = {
572 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
573 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
574 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
575 { }
576};
577
578
579
580
581
582static bool __maybe_unused
583check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
584{
585 int need_wa;
586
587 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
588
589
590 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
591 ID_AA64PFR0_CSV2_SHIFT))
592 return false;
593
594
595 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
596 return false;
597
598
599 need_wa = detect_harden_bp_fw();
600 if (!need_wa)
601 return false;
602
603 __spectrev2_safe = false;
604
605 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
606 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
607 __hardenbp_enab = false;
608 return false;
609 }
610
611
612 if (__nospectre_v2 || cpu_mitigations_off()) {
613 pr_info_once("spectrev2 mitigation disabled by command line option\n");
614 __hardenbp_enab = false;
615 return false;
616 }
617
618 if (need_wa < 0) {
619 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
620 __hardenbp_enab = false;
621 }
622
623 return (need_wa > 0);
624}
625
626#ifdef CONFIG_HARDEN_EL2_VECTORS
627
628static const struct midr_range arm64_harden_el2_vectors[] = {
629 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
630 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
631 {},
632};
633
634#endif
635
636#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
637
638static const struct midr_range arm64_repeat_tlbi_cpus[] = {
639#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
640 MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
641#endif
642#ifdef CONFIG_ARM64_ERRATUM_1286807
643 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
644#endif
645 {},
646};
647
648#endif
649
650#ifdef CONFIG_CAVIUM_ERRATUM_27456
651const struct midr_range cavium_erratum_27456_cpus[] = {
652
653 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
654
655 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
656 {},
657};
658#endif
659
660#ifdef CONFIG_CAVIUM_ERRATUM_30115
661static const struct midr_range cavium_erratum_30115_cpus[] = {
662
663 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
664
665 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
666
667 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
668 {},
669};
670#endif
671
672#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
673static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
674 {
675 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
676 },
677 {
678 .midr_range.model = MIDR_QCOM_KRYO,
679 .matches = is_kryo_midr,
680 },
681 {},
682};
683#endif
684
685#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
686static const struct midr_range workaround_clean_cache[] = {
687#if defined(CONFIG_ARM64_ERRATUM_826319) || \
688 defined(CONFIG_ARM64_ERRATUM_827319) || \
689 defined(CONFIG_ARM64_ERRATUM_824069)
690
691 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
692#endif
693#ifdef CONFIG_ARM64_ERRATUM_819472
694
695 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
696#endif
697 {},
698};
699#endif
700
701#ifdef CONFIG_ARM64_ERRATUM_1418040
702
703
704
705
706static const struct midr_range erratum_1418040_list[] = {
707
708 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
709
710 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
711 {},
712};
713#endif
714
715const struct arm64_cpu_capabilities arm64_errata[] = {
716#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
717 {
718 .desc = "ARM errata 826319, 827319, 824069, 819472",
719 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
720 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
721 .cpu_enable = cpu_enable_cache_maint_trap,
722 },
723#endif
724#ifdef CONFIG_ARM64_ERRATUM_832075
725 {
726
727 .desc = "ARM erratum 832075",
728 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
729 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
730 0, 0,
731 1, 2),
732 },
733#endif
734#ifdef CONFIG_ARM64_ERRATUM_834220
735 {
736
737 .desc = "ARM erratum 834220",
738 .capability = ARM64_WORKAROUND_834220,
739 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
740 0, 0,
741 1, 2),
742 },
743#endif
744#ifdef CONFIG_ARM64_ERRATUM_843419
745 {
746
747 .desc = "ARM erratum 843419",
748 .capability = ARM64_WORKAROUND_843419,
749 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
750 MIDR_FIXED(0x4, BIT(8)),
751 },
752#endif
753#ifdef CONFIG_ARM64_ERRATUM_845719
754 {
755
756 .desc = "ARM erratum 845719",
757 .capability = ARM64_WORKAROUND_845719,
758 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
759 },
760#endif
761#ifdef CONFIG_CAVIUM_ERRATUM_23154
762 {
763
764 .desc = "Cavium erratum 23154",
765 .capability = ARM64_WORKAROUND_CAVIUM_23154,
766 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
767 },
768#endif
769#ifdef CONFIG_CAVIUM_ERRATUM_27456
770 {
771 .desc = "Cavium erratum 27456",
772 .capability = ARM64_WORKAROUND_CAVIUM_27456,
773 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
774 },
775#endif
776#ifdef CONFIG_CAVIUM_ERRATUM_30115
777 {
778 .desc = "Cavium erratum 30115",
779 .capability = ARM64_WORKAROUND_CAVIUM_30115,
780 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
781 },
782#endif
783 {
784 .desc = "Mismatched cache type (CTR_EL0)",
785 .capability = ARM64_MISMATCHED_CACHE_TYPE,
786 .matches = has_mismatched_cache_type,
787 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
788 .cpu_enable = cpu_enable_trap_ctr_access,
789 },
790#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
791 {
792 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
793 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
794 .matches = cpucap_multi_entry_cap_matches,
795 .match_list = qcom_erratum_1003_list,
796 },
797#endif
798#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
799 {
800 .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
801 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
802 ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
803 },
804#endif
805#ifdef CONFIG_ARM64_ERRATUM_858921
806 {
807
808 .desc = "ARM erratum 858921",
809 .capability = ARM64_WORKAROUND_858921,
810 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
811 },
812#endif
813 {
814 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
815 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
816 .matches = check_branch_predictor,
817 },
818#ifdef CONFIG_HARDEN_EL2_VECTORS
819 {
820 .desc = "EL2 vector hardening",
821 .capability = ARM64_HARDEN_EL2_VECTORS,
822 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
823 },
824#endif
825 {
826 .desc = "Speculative Store Bypass Disable",
827 .capability = ARM64_SSBD,
828 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
829 .matches = has_ssbd_mitigation,
830 .midr_range_list = arm64_ssb_cpus,
831 },
832#ifdef CONFIG_ARM64_ERRATUM_1418040
833 {
834 .desc = "ARM erratum 1418040",
835 .capability = ARM64_WORKAROUND_1418040,
836 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
837 },
838#endif
839#ifdef CONFIG_ARM64_ERRATUM_1165522
840 {
841
842 .desc = "ARM erratum 1165522",
843 .capability = ARM64_WORKAROUND_1165522,
844 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
845 },
846#endif
847#ifdef CONFIG_ARM64_ERRATUM_1463225
848 {
849 .desc = "ARM erratum 1463225",
850 .capability = ARM64_WORKAROUND_1463225,
851 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
852 .matches = has_cortex_a76_erratum_1463225,
853 },
854#endif
855 {
856 }
857};
858
859ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
860 char *buf)
861{
862 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
863}
864
865ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
866 char *buf)
867{
868 switch (get_spectre_v2_workaround_state()) {
869 case ARM64_BP_HARDEN_NOT_REQUIRED:
870 return sprintf(buf, "Not affected\n");
871 case ARM64_BP_HARDEN_WA_NEEDED:
872 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
873 case ARM64_BP_HARDEN_UNKNOWN:
874 default:
875 return sprintf(buf, "Vulnerable\n");
876 }
877}
878
879ssize_t cpu_show_spec_store_bypass(struct device *dev,
880 struct device_attribute *attr, char *buf)
881{
882 if (__ssb_safe)
883 return sprintf(buf, "Not affected\n");
884
885 switch (ssbd_state) {
886 case ARM64_SSBD_KERNEL:
887 case ARM64_SSBD_FORCE_ENABLE:
888 if (IS_ENABLED(CONFIG_ARM64_SSBD))
889 return sprintf(buf,
890 "Mitigation: Speculative Store Bypass disabled via prctl\n");
891 }
892
893 return sprintf(buf, "Vulnerable\n");
894}
895