1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/arm-smccc.h>
21#include <linux/cpu.h>
22#include <linux/device.h>
23#include <linux/nospec.h>
24#include <linux/prctl.h>
25#include <linux/sched/task_stack.h>
26
27#include <asm/insn.h>
28#include <asm/spectre.h>
29#include <asm/traps.h>
30#include <asm/virt.h>
31
32
33
34
35
36static void update_mitigation_state(enum mitigation_state *oldp,
37 enum mitigation_state new)
38{
39 enum mitigation_state state;
40
41 do {
42 state = READ_ONCE(*oldp);
43 if (new <= state)
44 break;
45
46
47 if (WARN_ON(system_capabilities_finalized()))
48 break;
49 } while (cmpxchg_relaxed(oldp, state, new) != state);
50}
51
52
53
54
55
56
57
58ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
59 char *buf)
60{
61 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
62}
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79static enum mitigation_state spectre_v2_state;
80
81static bool __read_mostly __nospectre_v2;
82static int __init parse_spectre_v2_param(char *str)
83{
84 __nospectre_v2 = true;
85 return 0;
86}
87early_param("nospectre_v2", parse_spectre_v2_param);
88
89static bool spectre_v2_mitigations_off(void)
90{
91 bool ret = __nospectre_v2 || cpu_mitigations_off();
92
93 if (ret)
94 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
95
96 return ret;
97}
98
99ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
100 char *buf)
101{
102 switch (spectre_v2_state) {
103 case SPECTRE_UNAFFECTED:
104 return sprintf(buf, "Not affected\n");
105 case SPECTRE_MITIGATED:
106 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
107 case SPECTRE_VULNERABLE:
108 fallthrough;
109 default:
110 return sprintf(buf, "Vulnerable\n");
111 }
112}
113
114static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
115{
116 u64 pfr0;
117 static const struct midr_range spectre_v2_safe_list[] = {
118 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
119 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
120 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
121 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
122 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
123 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
124 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
125 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
126 { }
127 };
128
129
130 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
131 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
132 return SPECTRE_UNAFFECTED;
133
134
135 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
136 return SPECTRE_UNAFFECTED;
137
138 return SPECTRE_VULNERABLE;
139}
140
141static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
142{
143 int ret;
144 struct arm_smccc_res res;
145
146 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
147 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
148
149 ret = res.a0;
150 switch (ret) {
151 case SMCCC_RET_SUCCESS:
152 return SPECTRE_MITIGATED;
153 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
154 return SPECTRE_UNAFFECTED;
155 default:
156 fallthrough;
157 case SMCCC_RET_NOT_SUPPORTED:
158 return SPECTRE_VULNERABLE;
159 }
160}
161
162bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
163{
164 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
165
166 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
167 return false;
168
169 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
170 return false;
171
172 return true;
173}
174
175enum mitigation_state arm64_get_spectre_v2_state(void)
176{
177 return spectre_v2_state;
178}
179
180DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
181
182static void install_bp_hardening_cb(bp_hardening_cb_t fn)
183{
184 __this_cpu_write(bp_hardening_data.fn, fn);
185
186
187
188
189
190 if (!is_hyp_mode_available())
191 return;
192
193 __this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
194}
195
196static void call_smc_arch_workaround_1(void)
197{
198 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
199}
200
201static void call_hvc_arch_workaround_1(void)
202{
203 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
204}
205
206static void qcom_link_stack_sanitisation(void)
207{
208 u64 tmp;
209
210 asm volatile("mov %0, x30 \n"
211 ".rept 16 \n"
212 "bl . + 4 \n"
213 ".endr \n"
214 "mov x30, %0 \n"
215 : "=&r" (tmp));
216}
217
218static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
219{
220 u32 midr = read_cpuid_id();
221 if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
222 ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
223 return NULL;
224
225 return qcom_link_stack_sanitisation;
226}
227
228static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
229{
230 bp_hardening_cb_t cb;
231 enum mitigation_state state;
232
233 state = spectre_v2_get_cpu_fw_mitigation_state();
234 if (state != SPECTRE_MITIGATED)
235 return state;
236
237 if (spectre_v2_mitigations_off())
238 return SPECTRE_VULNERABLE;
239
240 switch (arm_smccc_1_1_get_conduit()) {
241 case SMCCC_CONDUIT_HVC:
242 cb = call_hvc_arch_workaround_1;
243 break;
244
245 case SMCCC_CONDUIT_SMC:
246 cb = call_smc_arch_workaround_1;
247 break;
248
249 default:
250 return SPECTRE_VULNERABLE;
251 }
252
253
254
255
256
257 cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
258 install_bp_hardening_cb(cb);
259 return SPECTRE_MITIGATED;
260}
261
262void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
263{
264 enum mitigation_state state;
265
266 WARN_ON(preemptible());
267
268 state = spectre_v2_get_cpu_hw_mitigation_state();
269 if (state == SPECTRE_VULNERABLE)
270 state = spectre_v2_enable_fw_mitigation();
271
272 update_mitigation_state(&spectre_v2_state, state);
273}
274
275
276
277
278
279
280
281
282bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
283{
284 static const struct midr_range spectre_v3a_unsafe_list[] = {
285 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
286 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
287 {},
288 };
289
290 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
291 return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
292}
293
294void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
295{
296 struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
297
298 if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
299 data->slot += HYP_VECTOR_INDIRECT;
300}
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330static enum mitigation_state spectre_v4_state;
331
332
333DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
334
335enum spectre_v4_policy {
336 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
337 SPECTRE_V4_POLICY_MITIGATION_ENABLED,
338 SPECTRE_V4_POLICY_MITIGATION_DISABLED,
339};
340
341static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
342
343static const struct spectre_v4_param {
344 const char *str;
345 enum spectre_v4_policy policy;
346} spectre_v4_params[] = {
347 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
348 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
349 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
350};
351static int __init parse_spectre_v4_param(char *str)
352{
353 int i;
354
355 if (!str || !str[0])
356 return -EINVAL;
357
358 for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
359 const struct spectre_v4_param *param = &spectre_v4_params[i];
360
361 if (strncmp(str, param->str, strlen(param->str)))
362 continue;
363
364 __spectre_v4_policy = param->policy;
365 return 0;
366 }
367
368 return -EINVAL;
369}
370early_param("ssbd", parse_spectre_v4_param);
371
372
373
374
375
376
377
378
379static bool spectre_v4_mitigations_off(void)
380{
381 bool ret = cpu_mitigations_off() ||
382 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
383
384 if (ret)
385 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
386
387 return ret;
388}
389
390
391static bool spectre_v4_mitigations_dynamic(void)
392{
393 return !spectre_v4_mitigations_off() &&
394 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
395}
396
397static bool spectre_v4_mitigations_on(void)
398{
399 return !spectre_v4_mitigations_off() &&
400 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
401}
402
403ssize_t cpu_show_spec_store_bypass(struct device *dev,
404 struct device_attribute *attr, char *buf)
405{
406 switch (spectre_v4_state) {
407 case SPECTRE_UNAFFECTED:
408 return sprintf(buf, "Not affected\n");
409 case SPECTRE_MITIGATED:
410 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
411 case SPECTRE_VULNERABLE:
412 fallthrough;
413 default:
414 return sprintf(buf, "Vulnerable\n");
415 }
416}
417
418enum mitigation_state arm64_get_spectre_v4_state(void)
419{
420 return spectre_v4_state;
421}
422
423static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
424{
425 static const struct midr_range spectre_v4_safe_list[] = {
426 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
427 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
428 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
429 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
430 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
431 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
432 { },
433 };
434
435 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
436 return SPECTRE_UNAFFECTED;
437
438
439 if (this_cpu_has_cap(ARM64_SSBS))
440 return SPECTRE_MITIGATED;
441
442 return SPECTRE_VULNERABLE;
443}
444
445static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
446{
447 int ret;
448 struct arm_smccc_res res;
449
450 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
451 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
452
453 ret = res.a0;
454 switch (ret) {
455 case SMCCC_RET_SUCCESS:
456 return SPECTRE_MITIGATED;
457 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
458 fallthrough;
459 case SMCCC_RET_NOT_REQUIRED:
460 return SPECTRE_UNAFFECTED;
461 default:
462 fallthrough;
463 case SMCCC_RET_NOT_SUPPORTED:
464 return SPECTRE_VULNERABLE;
465 }
466}
467
468bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
469{
470 enum mitigation_state state;
471
472 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
473
474 state = spectre_v4_get_cpu_hw_mitigation_state();
475 if (state == SPECTRE_VULNERABLE)
476 state = spectre_v4_get_cpu_fw_mitigation_state();
477
478 return state != SPECTRE_UNAFFECTED;
479}
480
481static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
482{
483 if (user_mode(regs))
484 return 1;
485
486 if (instr & BIT(PSTATE_Imm_shift))
487 regs->pstate |= PSR_SSBS_BIT;
488 else
489 regs->pstate &= ~PSR_SSBS_BIT;
490
491 arm64_skip_faulting_instruction(regs, 4);
492 return 0;
493}
494
495static struct undef_hook ssbs_emulation_hook = {
496 .instr_mask = ~(1U << PSTATE_Imm_shift),
497 .instr_val = 0xd500401f | PSTATE_SSBS,
498 .fn = ssbs_emulation_handler,
499};
500
501static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
502{
503 static bool undef_hook_registered = false;
504 static DEFINE_RAW_SPINLOCK(hook_lock);
505 enum mitigation_state state;
506
507
508
509
510
511 state = spectre_v4_get_cpu_hw_mitigation_state();
512 if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
513 return state;
514
515 raw_spin_lock(&hook_lock);
516 if (!undef_hook_registered) {
517 register_undef_hook(&ssbs_emulation_hook);
518 undef_hook_registered = true;
519 }
520 raw_spin_unlock(&hook_lock);
521
522 if (spectre_v4_mitigations_off()) {
523 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
524 set_pstate_ssbs(1);
525 return SPECTRE_VULNERABLE;
526 }
527
528
529 set_pstate_ssbs(0);
530 return SPECTRE_MITIGATED;
531}
532
533
534
535
536
537void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
538 __le32 *origptr,
539 __le32 *updptr, int nr_inst)
540{
541 BUG_ON(nr_inst != 1);
542
543 if (spectre_v4_mitigations_off())
544 return;
545
546 if (cpus_have_final_cap(ARM64_SSBS))
547 return;
548
549 if (spectre_v4_mitigations_dynamic())
550 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
551}
552
553
554
555
556
557void __init spectre_v4_patch_fw_mitigation_conduit(struct alt_instr *alt,
558 __le32 *origptr,
559 __le32 *updptr, int nr_inst)
560{
561 u32 insn;
562
563 BUG_ON(nr_inst != 1);
564
565 switch (arm_smccc_1_1_get_conduit()) {
566 case SMCCC_CONDUIT_HVC:
567 insn = aarch64_insn_get_hvc_value();
568 break;
569 case SMCCC_CONDUIT_SMC:
570 insn = aarch64_insn_get_smc_value();
571 break;
572 default:
573 return;
574 }
575
576 *updptr = cpu_to_le32(insn);
577}
578
579static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
580{
581 enum mitigation_state state;
582
583 state = spectre_v4_get_cpu_fw_mitigation_state();
584 if (state != SPECTRE_MITIGATED)
585 return state;
586
587 if (spectre_v4_mitigations_off()) {
588 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
589 return SPECTRE_VULNERABLE;
590 }
591
592 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
593
594 if (spectre_v4_mitigations_dynamic())
595 __this_cpu_write(arm64_ssbd_callback_required, 1);
596
597 return SPECTRE_MITIGATED;
598}
599
600void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
601{
602 enum mitigation_state state;
603
604 WARN_ON(preemptible());
605
606 state = spectre_v4_enable_hw_mitigation();
607 if (state == SPECTRE_VULNERABLE)
608 state = spectre_v4_enable_fw_mitigation();
609
610 update_mitigation_state(&spectre_v4_state, state);
611}
612
613static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
614{
615 u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
616
617 if (state)
618 regs->pstate |= bit;
619 else
620 regs->pstate &= ~bit;
621}
622
623void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
624{
625 struct pt_regs *regs = task_pt_regs(tsk);
626 bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
627
628 if (spectre_v4_mitigations_off())
629 ssbs = true;
630 else if (spectre_v4_mitigations_dynamic() && !kthread)
631 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
632
633 __update_pstate_ssbs(regs, ssbs);
634}
635
636
637
638
639
640
641
642
643static void ssbd_prctl_enable_mitigation(struct task_struct *task)
644{
645 task_clear_spec_ssb_noexec(task);
646 task_set_spec_ssb_disable(task);
647 set_tsk_thread_flag(task, TIF_SSBD);
648}
649
650static void ssbd_prctl_disable_mitigation(struct task_struct *task)
651{
652 task_clear_spec_ssb_noexec(task);
653 task_clear_spec_ssb_disable(task);
654 clear_tsk_thread_flag(task, TIF_SSBD);
655}
656
657static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
658{
659 switch (ctrl) {
660 case PR_SPEC_ENABLE:
661
662
663
664
665
666 if (task_spec_ssb_force_disable(task))
667 return -EPERM;
668
669
670
671
672
673 if (spectre_v4_mitigations_on())
674 return -EPERM;
675
676 ssbd_prctl_disable_mitigation(task);
677 break;
678 case PR_SPEC_FORCE_DISABLE:
679
680
681
682
683
684 if (spectre_v4_mitigations_off())
685 return -EPERM;
686
687 task_set_spec_ssb_force_disable(task);
688 fallthrough;
689 case PR_SPEC_DISABLE:
690
691
692 if (spectre_v4_mitigations_off())
693 return -EPERM;
694
695 ssbd_prctl_enable_mitigation(task);
696 break;
697 case PR_SPEC_DISABLE_NOEXEC:
698
699
700
701
702
703 if (task_spec_ssb_force_disable(task) ||
704 spectre_v4_mitigations_off() ||
705 spectre_v4_mitigations_on()) {
706 return -EPERM;
707 }
708
709 ssbd_prctl_enable_mitigation(task);
710 task_set_spec_ssb_noexec(task);
711 break;
712 default:
713 return -ERANGE;
714 }
715
716 spectre_v4_enable_task_mitigation(task);
717 return 0;
718}
719
720int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
721 unsigned long ctrl)
722{
723 switch (which) {
724 case PR_SPEC_STORE_BYPASS:
725 return ssbd_prctl_set(task, ctrl);
726 default:
727 return -ENODEV;
728 }
729}
730
731static int ssbd_prctl_get(struct task_struct *task)
732{
733 switch (spectre_v4_state) {
734 case SPECTRE_UNAFFECTED:
735 return PR_SPEC_NOT_AFFECTED;
736 case SPECTRE_MITIGATED:
737 if (spectre_v4_mitigations_on())
738 return PR_SPEC_NOT_AFFECTED;
739
740 if (spectre_v4_mitigations_dynamic())
741 break;
742
743
744 fallthrough;
745 case SPECTRE_VULNERABLE:
746 fallthrough;
747 default:
748 return PR_SPEC_ENABLE;
749 }
750
751
752 if (task_spec_ssb_force_disable(task))
753 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
754
755 if (task_spec_ssb_noexec(task))
756 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
757
758 if (task_spec_ssb_disable(task))
759 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
760
761 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
762}
763
764int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
765{
766 switch (which) {
767 case PR_SPEC_STORE_BYPASS:
768 return ssbd_prctl_get(task);
769 default:
770 return -ENODEV;
771 }
772}
773