1#ifndef _ASM_X86_SPEC_CTRL_H
2#define _ASM_X86_SPEC_CTRL_H
3
4#define SPEC_CTRL_PCP_IBRS_ENTRY (1<<0)
5#define SPEC_CTRL_PCP_IBRS_EXIT (1<<1)
6
7#define SPEC_CTRL_PCP_IBRS (SPEC_CTRL_PCP_IBRS_ENTRY|SPEC_CTRL_PCP_IBRS_EXIT)
8
9#define IBRS_ENABLED_PCP PER_CPU_VAR(spec_ctrl_pcp + \
10 KERNEL_IBRS_SPEC_CTRL_enabled)
11#define IBRS_ENTRY_PCP PER_CPU_VAR(spec_ctrl_pcp + \
12 KERNEL_IBRS_SPEC_CTRL_entry)
13#define IBRS_EXIT_PCP PER_CPU_VAR(spec_ctrl_pcp + \
14 KERNEL_IBRS_SPEC_CTRL_exit)
15#define IBRS_HI32_PCP PER_CPU_VAR(spec_ctrl_pcp + \
16 KERNEL_IBRS_SPEC_CTRL_hi32)
17
18#ifdef __ASSEMBLY__
19
20#include <asm/msr-index.h>
21
22.macro __IBRS_ENTRY
23 movl IBRS_HI32_PCP, %edx
24 movl IBRS_ENTRY_PCP, %eax
25 movl $MSR_IA32_SPEC_CTRL, %ecx
26 wrmsr
27.endm
28
29.macro IBRS_ENTRY
30 testl $SPEC_CTRL_PCP_IBRS_ENTRY, IBRS_ENABLED_PCP
31 jz .Lskip_\@
32
33 pushq %rax
34 pushq %rcx
35 pushq %rdx
36 __IBRS_ENTRY
37 popq %rdx
38 popq %rcx
39 popq %rax
40 jmp .Lend_\@
41
42.Lskip_\@:
43 lfence
44.Lend_\@:
45.endm
46
47.macro IBRS_ENTRY_CLOBBER
48 testl $SPEC_CTRL_PCP_IBRS_ENTRY, IBRS_ENABLED_PCP
49 jz .Lskip_\@
50
51 __IBRS_ENTRY
52 jmp .Lend_\@
53
54.Lskip_\@:
55 lfence
56.Lend_\@:
57.endm
58
59#define NO_IBRS_RESTORE (-1)
60
61
62
63
64
65
66.macro IBRS_ENTRY_SAVE_AND_CLOBBER save_reg:req
67 movl $NO_IBRS_RESTORE, \save_reg
68 testl $SPEC_CTRL_PCP_IBRS_ENTRY, IBRS_ENABLED_PCP
69 jz .Lskip_\@
70
71 movl $MSR_IA32_SPEC_CTRL, %ecx
72 rdmsr
73
74
75
76
77
78
79
80
81
82
83 movl IBRS_ENTRY_PCP, %ecx
84 cmpl %eax, %ecx
85 je .Lwrmsr_\@
86
87 movl %eax, \save_reg
88.Lwrmsr_\@:
89 movl $MSR_IA32_SPEC_CTRL, %ecx
90 wrmsr
91 jmp .Lend_\@
92
93.Lskip_\@:
94 lfence
95.Lend_\@:
96.endm
97
98.macro __IBRS_EXIT
99 movl IBRS_HI32_PCP, %edx
100 movl IBRS_EXIT_PCP, %eax
101 movl $MSR_IA32_SPEC_CTRL, %ecx
102 wrmsr
103.endm
104
105.macro IBRS_EXIT
106 testl $SPEC_CTRL_PCP_IBRS_EXIT, IBRS_ENABLED_PCP
107 jz .Lskip_\@
108
109 pushq %rax
110 pushq %rcx
111 pushq %rdx
112 __IBRS_EXIT
113 popq %rdx
114 popq %rcx
115 popq %rax
116
117.Lskip_\@:
118.endm
119
120.macro IBRS_EXIT_RESTORE_CLOBBER save_reg:req
121 testl $SPEC_CTRL_PCP_IBRS, IBRS_ENABLED_PCP
122 jz .Lskip_\@
123
124 cmpl $NO_IBRS_RESTORE, \save_reg
125 je .Lskip_\@
126
127 movl $MSR_IA32_SPEC_CTRL, %ecx
128 movl IBRS_HI32_PCP, %edx
129 movl \save_reg, %eax
130 wrmsr
131
132.Lskip_\@:
133.endm
134
135.macro IBRS_EXIT_CLOBBER
136 testl $SPEC_CTRL_PCP_IBRS_EXIT, IBRS_ENABLED_PCP
137 jz .Lskip_\@
138
139 __IBRS_EXIT
140
141.Lskip_\@:
142.endm
143
144.macro CLEAR_EXTRA_REGS
145 xorq %r15, %r15
146 xorq %r14, %r14
147 xorq %r13, %r13
148 xorq %r12, %r12
149 xorq %rbp, %rbp
150 xorq %rbx, %rbx
151.endm
152
153.macro CLEAR_R8_TO_R15
154 xorq %r15, %r15
155 xorq %r14, %r14
156 xorq %r13, %r13
157 xorq %r12, %r12
158 xorq %r11, %r11
159 xorq %r10, %r10
160 xorq %r9, %r9
161 xorq %r8, %r8
162.endm
163
164.macro CLEAR_R10_TO_R15
165 xorq %r15, %r15
166 xorq %r14, %r14
167 xorq %r13, %r13
168 xorq %r12, %r12
169 xorq %r11, %r11
170 xorq %r10, %r10
171.endm
172
173#else
174
175#include <linux/ptrace.h>
176#include <asm/microcode.h>
177#include <asm/nospec-branch.h>
178
179extern struct static_key retp_enabled_key;
180extern struct static_key ibrs_present_key;
181extern struct static_key ssbd_userset_key;
182
183
184
185
186#define SPEC_CTRL_MSR_REFRESH ((unsigned)-1)
187
188extern void spec_ctrl_rescan_cpuid(void);
189extern void spec_ctrl_init(void);
190extern void spec_ctrl_cpu_init(void);
191extern void ssb_select_mitigation(void);
192extern void ssb_print_mitigation(void);
193
194bool spec_ctrl_force_enable_ibrs(void);
195bool spec_ctrl_cond_enable_ibrs(bool full_retpoline);
196bool spec_ctrl_enable_ibrs_always(void);
197void spec_ctrl_enable_ibrs_enhanced(void);
198bool spec_ctrl_force_enable_ibp_disabled(void);
199bool spec_ctrl_cond_enable_ibp_disabled(void);
200void spec_ctrl_enable_retpoline(void);
201bool spec_ctrl_enable_retpoline_ibrs_user(void);
202void spec_ctrl_set_ssbd(bool ssbd_on);
203
204enum spectre_v2_mitigation spec_ctrl_get_mitigation(void);
205
206bool unprotected_firmware_begin(void);
207void unprotected_firmware_end(bool ibrs_on);
208
209
210
211
212struct kernel_ibrs_spec_ctrl {
213 unsigned int enabled;
214 unsigned int exit;
215 union {
216 struct {
217
218
219
220
221 unsigned int entry;
222 unsigned int hi32;
223 };
224 u64 entry64;
225 };
226};
227
228DECLARE_PER_CPU_USER_MAPPED(struct kernel_ibrs_spec_ctrl, spec_ctrl_pcp);
229
230extern void x86_amd_ssbd_enable(void);
231
232
233extern u64 x86_spec_ctrl_base;
234extern u64 x86_spec_ctrl_mask;
235
236static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
237{
238 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
239 return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
240}
241
242static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
243{
244 BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
245 return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
246}
247
248static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
249{
250 return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
251}
252
253#ifdef CONFIG_SMP
254extern void speculative_store_bypass_ht_init(void);
255#else
256static inline void speculative_store_bypass_ht_init(void) { }
257#endif
258
259extern void speculative_store_bypass_update(unsigned long tif);
260
261static inline void speculative_store_bypass_update_current(void)
262{
263 speculative_store_bypass_update(current_thread_info()->flags);
264}
265
266enum {
267 IBRS_DISABLED,
268
269
270 IBRS_ENABLED,
271
272
273 IBRS_ENABLED_ALWAYS,
274
275
276 IBRS_ENABLED_USER,
277
278
279 IBRS_ENHANCED,
280
281 IBRS_MAX = IBRS_ENHANCED,
282};
283
284static __always_inline int cpu_has_spec_ctrl(void)
285{
286 return static_key_false(&ibrs_present_key);
287}
288
289static __always_inline bool ibrs_enabled_kernel(void)
290{
291
292
293
294
295 unsigned int ibrs = __this_cpu_read(spec_ctrl_pcp.entry);
296
297 return ibrs & SPEC_CTRL_IBRS;
298}
299
300static inline bool retp_enabled(void)
301{
302 return static_key_false(&retp_enabled_key);
303}
304
305static inline bool retp_enabled_full(void)
306{
307 return retp_enabled() && retp_compiler();
308}
309
310static inline bool ibpb_enabled(void)
311{
312 return (boot_cpu_has(X86_FEATURE_IBPB) &&
313 (ibrs_enabled_kernel() || retp_enabled()));
314}
315
316
317
318
319
320
321
322
323
324
325static __always_inline void
326x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
327{
328
329
330
331
332
333 u64 msr, guestval, hostval = this_cpu_read(spec_ctrl_pcp.entry64);
334 struct thread_info *ti = current_thread_info();
335 bool write_msr;
336
337 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
338
339
340
341
342
343 guestval = hostval & ~x86_spec_ctrl_mask;
344 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
345
346
347
348
349
350
351 write_msr = (!setguest &&
352 (this_cpu_read(spec_ctrl_pcp.enabled) &
353 SPEC_CTRL_PCP_IBRS_ENTRY)) ||
354 (hostval != guestval);
355
356 if (unlikely(write_msr)) {
357 msr = setguest ? guestval : hostval;
358 native_wrmsrl(MSR_IA32_SPEC_CTRL, msr);
359 }
360 }
361
362
363
364
365
366 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
367 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
368 goto ret;
369
370
371
372
373
374
375 if (!static_key_false(&ssbd_userset_key))
376 hostval = x86_spec_ctrl_base & SPEC_CTRL_SSBD;
377 else
378 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
379
380
381 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
382
383 if (hostval != guestval) {
384 unsigned long tif;
385
386 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
387 ssbd_spec_ctrl_to_tif(hostval);
388
389 speculative_store_bypass_update(tif);
390 }
391
392ret:
393
394 if (!setguest) {
395
396
397
398
399 if (retp_enabled_full())
400 return;
401
402
403 rmb();
404 }
405}
406
407
408
409
410
411
412
413
414
415static __always_inline void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl,
416 u64 guest_virt_spec_ctrl)
417{
418 x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
419}
420
421
422
423
424
425
426
427
428
429static __always_inline void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl,
430 u64 guest_virt_spec_ctrl)
431{
432 x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
433}
434
435
436
437
438
439
440
441
442static __always_inline void spec_ctrl_ibrs_on(void)
443{
444
445
446
447
448 if (static_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
449 if (ibrs_enabled_kernel())
450 return;
451
452 } else if (ibrs_enabled_kernel()) {
453 u64 spec_ctrl = this_cpu_read(spec_ctrl_pcp.entry64);
454
455 native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
456 return;
457 }
458
459
460 if (retp_enabled_full())
461 return;
462
463
464 rmb();
465}
466
467static __always_inline void spec_ctrl_ibrs_off(void)
468{
469 if (!static_cpu_has(X86_FEATURE_IBRS_ENHANCED) &&
470 ibrs_enabled_kernel()) {
471 u64 spec_ctrl = x86_spec_ctrl_base;
472
473
474 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
475 spec_ctrl |= ssbd_tif_to_spec_ctrl(
476 current_thread_info()->flags);
477
478 native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
479 }
480
481}
482
483
484
485
486
487
488
489
490
491
492
493
494
495static inline bool spec_ctrl_ibrs_on_firmware(void)
496{
497 bool ibrs_on = false;
498
499 if (cpu_has_spec_ctrl() && retp_enabled() && !ibrs_enabled_kernel()) {
500 u64 spec_ctrl = this_cpu_read(spec_ctrl_pcp.entry64) |
501 SPEC_CTRL_IBRS;
502
503 native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
504 ibrs_on = true;
505 } else {
506
507 rmb();
508 }
509
510 return ibrs_on;
511}
512
513static inline void spec_ctrl_ibrs_off_firmware(bool ibrs_on)
514{
515 if (ibrs_on) {
516 u64 spec_ctrl = this_cpu_read(spec_ctrl_pcp.entry64);
517
518 native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
519 } else {
520
521 rmb();
522 }
523}
524
525static inline void __spec_ctrl_ibpb(void)
526{
527 native_wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
528}
529
530static inline void spec_ctrl_ibpb(void)
531{
532 if (ibpb_enabled())
533 __spec_ctrl_ibpb();
534}
535
536static inline void spec_ctrl_ibpb_if_different_creds(struct task_struct *next)
537{
538 if (ibpb_enabled() &&
539 (!next || __ptrace_may_access(next, PTRACE_MODE_IBPB))) {
540 __spec_ctrl_ibpb();
541
542 if (static_cpu_has(X86_FEATURE_SMEP))
543 fill_RSB();
544 }
545}
546
547extern enum ssb_mitigation ssb_mode;
548
549#endif
550#endif
551