1
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/errno.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/smp.h>
8#include <linux/prctl.h>
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/sched/idle.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/pm.h>
18#include <linux/tick.h>
19#include <linux/random.h>
20#include <linux/user-return-notifier.h>
21#include <linux/dmi.h>
22#include <linux/utsname.h>
23#include <linux/stackprotector.h>
24#include <linux/cpuidle.h>
25#include <linux/acpi.h>
26#include <linux/elf-randomize.h>
27#include <trace/events/power.h>
28#include <linux/hw_breakpoint.h>
29#include <asm/cpu.h>
30#include <asm/apic.h>
31#include <asm/syscalls.h>
32#include <linux/uaccess.h>
33#include <asm/mwait.h>
34#include <asm/fpu/internal.h>
35#include <asm/debugreg.h>
36#include <asm/nmi.h>
37#include <asm/tlbflush.h>
38#include <asm/mce.h>
39#include <asm/vm86.h>
40#include <asm/switch_to.h>
41#include <asm/desc.h>
42#include <asm/prctl.h>
43#include <asm/spec-ctrl.h>
44#include <asm/proto.h>
45
46#include "process.h"
47
48
49
50
51
52
53
54
55__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
56 .x86_tss = {
57
58
59
60
61
62
63 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
64
65
66
67
68
69
70 .sp1 = TOP_OF_INIT_STACK,
71
72#ifdef CONFIG_X86_32
73 .ss0 = __KERNEL_DS,
74 .ss1 = __KERNEL_CS,
75 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
76#endif
77 },
78#ifdef CONFIG_X86_32
79
80
81
82
83
84
85 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
86#endif
87};
88EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
89
90DEFINE_PER_CPU(bool, __tss_limit_invalid);
91EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
92
93
94
95
96
97int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
98{
99 memcpy(dst, src, arch_task_struct_size);
100#ifdef CONFIG_VM86
101 dst->thread.vm86 = NULL;
102#endif
103
104 return fpu__copy(dst, src);
105}
106
107
108
109
110void exit_thread(struct task_struct *tsk)
111{
112 struct thread_struct *t = &tsk->thread;
113 unsigned long *bp = t->io_bitmap_ptr;
114 struct fpu *fpu = &t->fpu;
115
116 if (bp) {
117 struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
118
119 t->io_bitmap_ptr = NULL;
120 clear_thread_flag(TIF_IO_BITMAP);
121
122
123
124 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
125 t->io_bitmap_max = 0;
126 put_cpu();
127 kfree(bp);
128 }
129
130 free_vm86(t);
131
132 fpu__drop(fpu);
133}
134
135void flush_thread(void)
136{
137 struct task_struct *tsk = current;
138
139 flush_ptrace_hw_breakpoint(tsk);
140 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
141
142 fpu__clear(&tsk->thread.fpu);
143}
144
145void disable_TSC(void)
146{
147 preempt_disable();
148 if (!test_and_set_thread_flag(TIF_NOTSC))
149
150
151
152
153 cr4_set_bits(X86_CR4_TSD);
154 preempt_enable();
155}
156
157static void enable_TSC(void)
158{
159 preempt_disable();
160 if (test_and_clear_thread_flag(TIF_NOTSC))
161
162
163
164
165 cr4_clear_bits(X86_CR4_TSD);
166 preempt_enable();
167}
168
169int get_tsc_mode(unsigned long adr)
170{
171 unsigned int val;
172
173 if (test_thread_flag(TIF_NOTSC))
174 val = PR_TSC_SIGSEGV;
175 else
176 val = PR_TSC_ENABLE;
177
178 return put_user(val, (unsigned int __user *)adr);
179}
180
181int set_tsc_mode(unsigned int val)
182{
183 if (val == PR_TSC_SIGSEGV)
184 disable_TSC();
185 else if (val == PR_TSC_ENABLE)
186 enable_TSC();
187 else
188 return -EINVAL;
189
190 return 0;
191}
192
193DEFINE_PER_CPU(u64, msr_misc_features_shadow);
194
195static void set_cpuid_faulting(bool on)
196{
197 u64 msrval;
198
199 msrval = this_cpu_read(msr_misc_features_shadow);
200 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
201 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
202 this_cpu_write(msr_misc_features_shadow, msrval);
203 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
204}
205
206static void disable_cpuid(void)
207{
208 preempt_disable();
209 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
210
211
212
213
214 set_cpuid_faulting(true);
215 }
216 preempt_enable();
217}
218
219static void enable_cpuid(void)
220{
221 preempt_disable();
222 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
223
224
225
226
227 set_cpuid_faulting(false);
228 }
229 preempt_enable();
230}
231
232static int get_cpuid_mode(void)
233{
234 return !test_thread_flag(TIF_NOCPUID);
235}
236
237static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
238{
239 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
240 return -ENODEV;
241
242 if (cpuid_enabled)
243 enable_cpuid();
244 else
245 disable_cpuid();
246
247 return 0;
248}
249
250
251
252
253void arch_setup_new_exec(void)
254{
255
256 if (test_thread_flag(TIF_NOCPUID))
257 enable_cpuid();
258
259
260
261
262
263 if (test_thread_flag(TIF_SSBD) &&
264 task_spec_ssb_noexec(current)) {
265 clear_thread_flag(TIF_SSBD);
266 task_clear_spec_ssb_disable(current);
267 task_clear_spec_ssb_noexec(current);
268 speculation_ctrl_update(task_thread_info(current)->flags);
269 }
270}
271
272static inline void switch_to_bitmap(struct thread_struct *prev,
273 struct thread_struct *next,
274 unsigned long tifp, unsigned long tifn)
275{
276 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
277
278 if (tifn & _TIF_IO_BITMAP) {
279
280
281
282
283 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
284 max(prev->io_bitmap_max, next->io_bitmap_max));
285
286
287
288
289 refresh_tss_limit();
290 } else if (tifp & _TIF_IO_BITMAP) {
291
292
293
294 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
295 }
296}
297
298#ifdef CONFIG_SMP
299
300struct ssb_state {
301 struct ssb_state *shared_state;
302 raw_spinlock_t lock;
303 unsigned int disable_state;
304 unsigned long local_state;
305};
306
307#define LSTATE_SSB 0
308
309static DEFINE_PER_CPU(struct ssb_state, ssb_state);
310
311void speculative_store_bypass_ht_init(void)
312{
313 struct ssb_state *st = this_cpu_ptr(&ssb_state);
314 unsigned int this_cpu = smp_processor_id();
315 unsigned int cpu;
316
317 st->local_state = 0;
318
319
320
321
322
323 if (st->shared_state)
324 return;
325
326 raw_spin_lock_init(&st->lock);
327
328
329
330
331
332 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
333 if (cpu == this_cpu)
334 continue;
335
336 if (!per_cpu(ssb_state, cpu).shared_state)
337 continue;
338
339
340 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
341 return;
342 }
343
344
345
346
347
348
349
350 st->shared_state = st;
351}
352
353
354
355
356
357
358
359
360static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
361{
362 struct ssb_state *st = this_cpu_ptr(&ssb_state);
363 u64 msr = x86_amd_ls_cfg_base;
364
365 if (!static_cpu_has(X86_FEATURE_ZEN)) {
366 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
367 wrmsrl(MSR_AMD64_LS_CFG, msr);
368 return;
369 }
370
371 if (tifn & _TIF_SSBD) {
372
373
374
375
376 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
377 return;
378
379 msr |= x86_amd_ls_cfg_ssbd_mask;
380
381 raw_spin_lock(&st->shared_state->lock);
382
383 if (!st->shared_state->disable_state)
384 wrmsrl(MSR_AMD64_LS_CFG, msr);
385 st->shared_state->disable_state++;
386 raw_spin_unlock(&st->shared_state->lock);
387 } else {
388 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
389 return;
390
391 raw_spin_lock(&st->shared_state->lock);
392 st->shared_state->disable_state--;
393 if (!st->shared_state->disable_state)
394 wrmsrl(MSR_AMD64_LS_CFG, msr);
395 raw_spin_unlock(&st->shared_state->lock);
396 }
397}
398#else
399static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
400{
401 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
402
403 wrmsrl(MSR_AMD64_LS_CFG, msr);
404}
405#endif
406
407static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
408{
409
410
411
412
413 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
414}
415
416
417
418
419
420
421
422static __always_inline void __speculation_ctrl_update(unsigned long tifp,
423 unsigned long tifn)
424{
425 unsigned long tif_diff = tifp ^ tifn;
426 u64 msr = x86_spec_ctrl_base;
427 bool updmsr = false;
428
429 lockdep_assert_irqs_disabled();
430
431
432
433
434
435
436
437 if (tif_diff & _TIF_SSBD) {
438 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
439 amd_set_ssb_virt_state(tifn);
440 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
441 amd_set_core_ssb_state(tifn);
442 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
443 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
444 msr |= ssbd_tif_to_spec_ctrl(tifn);
445 updmsr = true;
446 }
447 }
448
449
450
451
452
453 if (IS_ENABLED(CONFIG_SMP) &&
454 static_branch_unlikely(&switch_to_cond_stibp)) {
455 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
456 msr |= stibp_tif_to_spec_ctrl(tifn);
457 }
458
459 if (updmsr)
460 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
461}
462
463static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
464{
465 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
466 if (task_spec_ssb_disable(tsk))
467 set_tsk_thread_flag(tsk, TIF_SSBD);
468 else
469 clear_tsk_thread_flag(tsk, TIF_SSBD);
470
471 if (task_spec_ib_disable(tsk))
472 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
473 else
474 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
475 }
476
477 return task_thread_info(tsk)->flags;
478}
479
480void speculation_ctrl_update(unsigned long tif)
481{
482 unsigned long flags;
483
484
485 local_irq_save(flags);
486 __speculation_ctrl_update(~tif, tif);
487 local_irq_restore(flags);
488}
489
490
491void speculation_ctrl_update_current(void)
492{
493 preempt_disable();
494 speculation_ctrl_update(speculation_ctrl_update_tif(current));
495 preempt_enable();
496}
497
498void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
499{
500 struct thread_struct *prev, *next;
501 unsigned long tifp, tifn;
502
503 prev = &prev_p->thread;
504 next = &next_p->thread;
505
506 tifn = READ_ONCE(task_thread_info(next_p)->flags);
507 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
508 switch_to_bitmap(prev, next, tifp, tifn);
509
510 propagate_user_return_notify(prev_p, next_p);
511
512 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
513 arch_has_block_step()) {
514 unsigned long debugctl, msk;
515
516 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
517 debugctl &= ~DEBUGCTLMSR_BTF;
518 msk = tifn & _TIF_BLOCKSTEP;
519 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
520 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
521 }
522
523 if ((tifp ^ tifn) & _TIF_NOTSC)
524 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
525
526 if ((tifp ^ tifn) & _TIF_NOCPUID)
527 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
528
529 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
530 __speculation_ctrl_update(tifp, tifn);
531 } else {
532 speculation_ctrl_update_tif(prev_p);
533 tifn = speculation_ctrl_update_tif(next_p);
534
535
536 __speculation_ctrl_update(~tifn, tifn);
537 }
538}
539
540
541
542
543unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
544EXPORT_SYMBOL(boot_option_idle_override);
545
546static void (*x86_idle)(void);
547
548#ifndef CONFIG_SMP
549static inline void play_dead(void)
550{
551 BUG();
552}
553#endif
554
555void arch_cpu_idle_enter(void)
556{
557 tsc_verify_tsc_adjust(false);
558 local_touch_nmi();
559}
560
561void arch_cpu_idle_dead(void)
562{
563 play_dead();
564}
565
566
567
568
569void arch_cpu_idle(void)
570{
571 x86_idle();
572}
573
574
575
576
577void __cpuidle default_idle(void)
578{
579 trace_cpu_idle_rcuidle(1, smp_processor_id());
580 safe_halt();
581 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
582}
583#ifdef CONFIG_APM_MODULE
584EXPORT_SYMBOL(default_idle);
585#endif
586
587#ifdef CONFIG_XEN
588bool xen_set_default_idle(void)
589{
590 bool ret = !!x86_idle;
591
592 x86_idle = default_idle;
593
594 return ret;
595}
596#endif
597
598void stop_this_cpu(void *dummy)
599{
600 local_irq_disable();
601
602
603
604 set_cpu_online(smp_processor_id(), false);
605 disable_local_APIC();
606 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
607
608
609
610
611
612
613
614
615
616
617 if (boot_cpu_has(X86_FEATURE_SME))
618 native_wbinvd();
619 for (;;) {
620
621
622
623
624
625 native_halt();
626 }
627}
628
629
630
631
632
633static void amd_e400_idle(void)
634{
635
636
637
638
639
640 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
641 default_idle();
642 return;
643 }
644
645 tick_broadcast_enter();
646
647 default_idle();
648
649
650
651
652
653 local_irq_disable();
654 tick_broadcast_exit();
655 local_irq_enable();
656}
657
658
659
660
661
662
663
664
665
666
667
668static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
669{
670 if (c->x86_vendor != X86_VENDOR_INTEL)
671 return 0;
672
673 if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
674 return 0;
675
676 return 1;
677}
678
679
680
681
682
683
684static __cpuidle void mwait_idle(void)
685{
686 if (!current_set_polling_and_test()) {
687 trace_cpu_idle_rcuidle(1, smp_processor_id());
688 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
689 mb();
690 clflush((void *)¤t_thread_info()->flags);
691 mb();
692 }
693
694 __monitor((void *)¤t_thread_info()->flags, 0, 0);
695 if (!need_resched())
696 __sti_mwait(0, 0);
697 else
698 local_irq_enable();
699 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
700 } else {
701 local_irq_enable();
702 }
703 __current_clr_polling();
704}
705
706void select_idle_routine(const struct cpuinfo_x86 *c)
707{
708#ifdef CONFIG_SMP
709 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
710 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
711#endif
712 if (x86_idle || boot_option_idle_override == IDLE_POLL)
713 return;
714
715 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
716 pr_info("using AMD E400 aware idle routine\n");
717 x86_idle = amd_e400_idle;
718 } else if (prefer_mwait_c1_over_halt(c)) {
719 pr_info("using mwait in idle threads\n");
720 x86_idle = mwait_idle;
721 } else
722 x86_idle = default_idle;
723}
724
725void amd_e400_c1e_apic_setup(void)
726{
727 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
728 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
729 local_irq_disable();
730 tick_broadcast_force();
731 local_irq_enable();
732 }
733}
734
735void __init arch_post_acpi_subsys_init(void)
736{
737 u32 lo, hi;
738
739 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
740 return;
741
742
743
744
745
746
747 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
748 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
749 return;
750
751 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
752
753 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
754 mark_tsc_unstable("TSC halt in AMD C1E");
755 pr_info("System has AMD C1E enabled\n");
756}
757
758static int __init idle_setup(char *str)
759{
760 if (!str)
761 return -EINVAL;
762
763 if (!strcmp(str, "poll")) {
764 pr_info("using polling idle threads\n");
765 boot_option_idle_override = IDLE_POLL;
766 cpu_idle_poll_ctrl(true);
767 } else if (!strcmp(str, "halt")) {
768
769
770
771
772
773
774
775 x86_idle = default_idle;
776 boot_option_idle_override = IDLE_HALT;
777 } else if (!strcmp(str, "nomwait")) {
778
779
780
781
782
783
784 boot_option_idle_override = IDLE_NOMWAIT;
785 } else
786 return -1;
787
788 return 0;
789}
790early_param("idle", idle_setup);
791
792unsigned long arch_align_stack(unsigned long sp)
793{
794 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
795 sp -= get_random_int() % 8192;
796 return sp & ~0xf;
797}
798
799unsigned long arch_randomize_brk(struct mm_struct *mm)
800{
801 return randomize_page(mm->brk, 0x02000000);
802}
803
804
805
806
807
808
809
810unsigned long get_wchan(struct task_struct *p)
811{
812 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
813 int count = 0;
814
815 if (p == current || p->state == TASK_RUNNING)
816 return 0;
817
818 if (!try_get_task_stack(p))
819 return 0;
820
821 start = (unsigned long)task_stack_page(p);
822 if (!start)
823 goto out;
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
842 top -= 2 * sizeof(unsigned long);
843 bottom = start;
844
845 sp = READ_ONCE(p->thread.sp);
846 if (sp < bottom || sp > top)
847 goto out;
848
849 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
850 do {
851 if (fp < bottom || fp > top)
852 goto out;
853 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
854 if (!in_sched_functions(ip)) {
855 ret = ip;
856 goto out;
857 }
858 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
859 } while (count++ < 16 && p->state != TASK_RUNNING);
860
861out:
862 put_task_stack(p);
863 return ret;
864}
865
866long do_arch_prctl_common(struct task_struct *task, int option,
867 unsigned long cpuid_enabled)
868{
869 switch (option) {
870 case ARCH_GET_CPUID:
871 return get_cpuid_mode();
872 case ARCH_SET_CPUID:
873 return set_cpuid_mode(task, cpuid_enabled);
874 }
875
876 return -EINVAL;
877}
878