1
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/errno.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/smp.h>
8#include <linux/prctl.h>
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/sched/idle.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/pm.h>
18#include <linux/tick.h>
19#include <linux/random.h>
20#include <linux/user-return-notifier.h>
21#include <linux/dmi.h>
22#include <linux/utsname.h>
23#include <linux/stackprotector.h>
24#include <linux/cpuidle.h>
25#include <trace/events/power.h>
26#include <linux/hw_breakpoint.h>
27#include <asm/cpu.h>
28#include <asm/apic.h>
29#include <asm/syscalls.h>
30#include <linux/uaccess.h>
31#include <asm/mwait.h>
32#include <asm/fpu/internal.h>
33#include <asm/debugreg.h>
34#include <asm/nmi.h>
35#include <asm/tlbflush.h>
36#include <asm/mce.h>
37#include <asm/vm86.h>
38#include <asm/switch_to.h>
39#include <asm/desc.h>
40#include <asm/prctl.h>
41#include <asm/spec-ctrl.h>
42
43
44
45
46
47
48
49
50__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
51 .x86_tss = {
52
53
54
55
56
57
58 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
59
60#ifdef CONFIG_X86_64
61
62
63
64
65
66 .sp1 = TOP_OF_INIT_STACK,
67#endif
68
69#ifdef CONFIG_X86_32
70 .ss0 = __KERNEL_DS,
71 .ss1 = __KERNEL_CS,
72 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
73#endif
74 },
75#ifdef CONFIG_X86_32
76
77
78
79
80
81
82 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
83#endif
84};
85EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
86
87DEFINE_PER_CPU(bool, __tss_limit_invalid);
88EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
89
90
91
92
93
94int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
95{
96 memcpy(dst, src, arch_task_struct_size);
97#ifdef CONFIG_VM86
98 dst->thread.vm86 = NULL;
99#endif
100
101 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
102}
103
104
105
106
107void exit_thread(struct task_struct *tsk)
108{
109 struct thread_struct *t = &tsk->thread;
110 unsigned long *bp = t->io_bitmap_ptr;
111 struct fpu *fpu = &t->fpu;
112
113 if (bp) {
114 struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
115
116 t->io_bitmap_ptr = NULL;
117 clear_thread_flag(TIF_IO_BITMAP);
118
119
120
121 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
122 t->io_bitmap_max = 0;
123 put_cpu();
124 kfree(bp);
125 }
126
127 free_vm86(t);
128
129 fpu__drop(fpu);
130}
131
132void flush_thread(void)
133{
134 struct task_struct *tsk = current;
135
136 flush_ptrace_hw_breakpoint(tsk);
137 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
138
139 fpu__clear(&tsk->thread.fpu);
140}
141
142void disable_TSC(void)
143{
144 preempt_disable();
145 if (!test_and_set_thread_flag(TIF_NOTSC))
146
147
148
149
150 cr4_set_bits(X86_CR4_TSD);
151 preempt_enable();
152}
153
154static void enable_TSC(void)
155{
156 preempt_disable();
157 if (test_and_clear_thread_flag(TIF_NOTSC))
158
159
160
161
162 cr4_clear_bits(X86_CR4_TSD);
163 preempt_enable();
164}
165
166int get_tsc_mode(unsigned long adr)
167{
168 unsigned int val;
169
170 if (test_thread_flag(TIF_NOTSC))
171 val = PR_TSC_SIGSEGV;
172 else
173 val = PR_TSC_ENABLE;
174
175 return put_user(val, (unsigned int __user *)adr);
176}
177
178int set_tsc_mode(unsigned int val)
179{
180 if (val == PR_TSC_SIGSEGV)
181 disable_TSC();
182 else if (val == PR_TSC_ENABLE)
183 enable_TSC();
184 else
185 return -EINVAL;
186
187 return 0;
188}
189
190DEFINE_PER_CPU(u64, msr_misc_features_shadow);
191
192static void set_cpuid_faulting(bool on)
193{
194 u64 msrval;
195
196 msrval = this_cpu_read(msr_misc_features_shadow);
197 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
198 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
199 this_cpu_write(msr_misc_features_shadow, msrval);
200 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
201}
202
203static void disable_cpuid(void)
204{
205 preempt_disable();
206 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
207
208
209
210
211 set_cpuid_faulting(true);
212 }
213 preempt_enable();
214}
215
216static void enable_cpuid(void)
217{
218 preempt_disable();
219 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
220
221
222
223
224 set_cpuid_faulting(false);
225 }
226 preempt_enable();
227}
228
229static int get_cpuid_mode(void)
230{
231 return !test_thread_flag(TIF_NOCPUID);
232}
233
234static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
235{
236 if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
237 return -ENODEV;
238
239 if (cpuid_enabled)
240 enable_cpuid();
241 else
242 disable_cpuid();
243
244 return 0;
245}
246
247
248
249
250void arch_setup_new_exec(void)
251{
252
253 if (test_thread_flag(TIF_NOCPUID))
254 enable_cpuid();
255}
256
257static inline void switch_to_bitmap(struct tss_struct *tss,
258 struct thread_struct *prev,
259 struct thread_struct *next,
260 unsigned long tifp, unsigned long tifn)
261{
262 if (tifn & _TIF_IO_BITMAP) {
263
264
265
266
267 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
268 max(prev->io_bitmap_max, next->io_bitmap_max));
269
270
271
272
273 refresh_tss_limit();
274 } else if (tifp & _TIF_IO_BITMAP) {
275
276
277
278 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
279 }
280}
281
282#ifdef CONFIG_SMP
283
284struct ssb_state {
285 struct ssb_state *shared_state;
286 raw_spinlock_t lock;
287 unsigned int disable_state;
288 unsigned long local_state;
289};
290
291#define LSTATE_SSB 0
292
293static DEFINE_PER_CPU(struct ssb_state, ssb_state);
294
295void speculative_store_bypass_ht_init(void)
296{
297 struct ssb_state *st = this_cpu_ptr(&ssb_state);
298 unsigned int this_cpu = smp_processor_id();
299 unsigned int cpu;
300
301 st->local_state = 0;
302
303
304
305
306
307 if (st->shared_state)
308 return;
309
310 raw_spin_lock_init(&st->lock);
311
312
313
314
315
316 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
317 if (cpu == this_cpu)
318 continue;
319
320 if (!per_cpu(ssb_state, cpu).shared_state)
321 continue;
322
323
324 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
325 return;
326 }
327
328
329
330
331
332
333
334 st->shared_state = st;
335}
336
337
338
339
340
341
342
343
344static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
345{
346 struct ssb_state *st = this_cpu_ptr(&ssb_state);
347 u64 msr = x86_amd_ls_cfg_base;
348
349 if (!static_cpu_has(X86_FEATURE_ZEN)) {
350 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
351 wrmsrl(MSR_AMD64_LS_CFG, msr);
352 return;
353 }
354
355 if (tifn & _TIF_SSBD) {
356
357
358
359
360 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
361 return;
362
363 msr |= x86_amd_ls_cfg_ssbd_mask;
364
365 raw_spin_lock(&st->shared_state->lock);
366
367 if (!st->shared_state->disable_state)
368 wrmsrl(MSR_AMD64_LS_CFG, msr);
369 st->shared_state->disable_state++;
370 raw_spin_unlock(&st->shared_state->lock);
371 } else {
372 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
373 return;
374
375 raw_spin_lock(&st->shared_state->lock);
376 st->shared_state->disable_state--;
377 if (!st->shared_state->disable_state)
378 wrmsrl(MSR_AMD64_LS_CFG, msr);
379 raw_spin_unlock(&st->shared_state->lock);
380 }
381}
382#else
383static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
384{
385 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
386
387 wrmsrl(MSR_AMD64_LS_CFG, msr);
388}
389#endif
390
391static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
392{
393
394
395
396
397 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
398}
399
400static __always_inline void intel_set_ssb_state(unsigned long tifn)
401{
402 u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
403
404 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
405}
406
407static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
408{
409 if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
410 amd_set_ssb_virt_state(tifn);
411 else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
412 amd_set_core_ssb_state(tifn);
413 else
414 intel_set_ssb_state(tifn);
415}
416
417void speculative_store_bypass_update(unsigned long tif)
418{
419 preempt_disable();
420 __speculative_store_bypass_update(tif);
421 preempt_enable();
422}
423
424void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
425 struct tss_struct *tss)
426{
427 struct thread_struct *prev, *next;
428 unsigned long tifp, tifn;
429
430 prev = &prev_p->thread;
431 next = &next_p->thread;
432
433 tifn = READ_ONCE(task_thread_info(next_p)->flags);
434 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
435 switch_to_bitmap(tss, prev, next, tifp, tifn);
436
437 propagate_user_return_notify(prev_p, next_p);
438
439 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
440 arch_has_block_step()) {
441 unsigned long debugctl, msk;
442
443 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
444 debugctl &= ~DEBUGCTLMSR_BTF;
445 msk = tifn & _TIF_BLOCKSTEP;
446 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
447 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
448 }
449
450 if ((tifp ^ tifn) & _TIF_NOTSC)
451 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
452
453 if ((tifp ^ tifn) & _TIF_NOCPUID)
454 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
455
456 if ((tifp ^ tifn) & _TIF_SSBD)
457 __speculative_store_bypass_update(tifn);
458}
459
460
461
462
463unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
464EXPORT_SYMBOL(boot_option_idle_override);
465
466static void (*x86_idle)(void);
467
468#ifndef CONFIG_SMP
469static inline void play_dead(void)
470{
471 BUG();
472}
473#endif
474
475void arch_cpu_idle_enter(void)
476{
477 tsc_verify_tsc_adjust(false);
478 local_touch_nmi();
479}
480
481void arch_cpu_idle_dead(void)
482{
483 play_dead();
484}
485
486
487
488
489void arch_cpu_idle(void)
490{
491 x86_idle();
492}
493
494
495
496
497void __cpuidle default_idle(void)
498{
499 trace_cpu_idle_rcuidle(1, smp_processor_id());
500 safe_halt();
501 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
502}
503#ifdef CONFIG_APM_MODULE
504EXPORT_SYMBOL(default_idle);
505#endif
506
507#ifdef CONFIG_XEN
508bool xen_set_default_idle(void)
509{
510 bool ret = !!x86_idle;
511
512 x86_idle = default_idle;
513
514 return ret;
515}
516#endif
517
518void stop_this_cpu(void *dummy)
519{
520 local_irq_disable();
521
522
523
524 set_cpu_online(smp_processor_id(), false);
525 disable_local_APIC();
526 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
527
528
529
530
531
532
533
534
535
536
537 if (boot_cpu_has(X86_FEATURE_SME))
538 native_wbinvd();
539 for (;;) {
540
541
542
543
544
545 native_halt();
546 }
547}
548
549
550
551
552
553static void amd_e400_idle(void)
554{
555
556
557
558
559
560 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
561 default_idle();
562 return;
563 }
564
565 tick_broadcast_enter();
566
567 default_idle();
568
569
570
571
572
573 local_irq_disable();
574 tick_broadcast_exit();
575 local_irq_enable();
576}
577
578
579
580
581
582
583
584
585
586
587
588static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
589{
590 if (c->x86_vendor != X86_VENDOR_INTEL)
591 return 0;
592
593 if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
594 return 0;
595
596 return 1;
597}
598
599
600
601
602
603
604static __cpuidle void mwait_idle(void)
605{
606 if (!current_set_polling_and_test()) {
607 trace_cpu_idle_rcuidle(1, smp_processor_id());
608 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
609 mb();
610 clflush((void *)¤t_thread_info()->flags);
611 mb();
612 }
613
614 __monitor((void *)¤t_thread_info()->flags, 0, 0);
615 if (!need_resched())
616 __sti_mwait(0, 0);
617 else
618 local_irq_enable();
619 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
620 } else {
621 local_irq_enable();
622 }
623 __current_clr_polling();
624}
625
626void select_idle_routine(const struct cpuinfo_x86 *c)
627{
628#ifdef CONFIG_SMP
629 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
630 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
631#endif
632 if (x86_idle || boot_option_idle_override == IDLE_POLL)
633 return;
634
635 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
636 pr_info("using AMD E400 aware idle routine\n");
637 x86_idle = amd_e400_idle;
638 } else if (prefer_mwait_c1_over_halt(c)) {
639 pr_info("using mwait in idle threads\n");
640 x86_idle = mwait_idle;
641 } else
642 x86_idle = default_idle;
643}
644
645void amd_e400_c1e_apic_setup(void)
646{
647 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
648 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
649 local_irq_disable();
650 tick_broadcast_force();
651 local_irq_enable();
652 }
653}
654
655void __init arch_post_acpi_subsys_init(void)
656{
657 u32 lo, hi;
658
659 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
660 return;
661
662
663
664
665
666
667 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
668 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
669 return;
670
671 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
672
673 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
674 mark_tsc_unstable("TSC halt in AMD C1E");
675 pr_info("System has AMD C1E enabled\n");
676}
677
678static int __init idle_setup(char *str)
679{
680 if (!str)
681 return -EINVAL;
682
683 if (!strcmp(str, "poll")) {
684 pr_info("using polling idle threads\n");
685 boot_option_idle_override = IDLE_POLL;
686 cpu_idle_poll_ctrl(true);
687 } else if (!strcmp(str, "halt")) {
688
689
690
691
692
693
694
695 x86_idle = default_idle;
696 boot_option_idle_override = IDLE_HALT;
697 } else if (!strcmp(str, "nomwait")) {
698
699
700
701
702
703
704 boot_option_idle_override = IDLE_NOMWAIT;
705 } else
706 return -1;
707
708 return 0;
709}
710early_param("idle", idle_setup);
711
712unsigned long arch_align_stack(unsigned long sp)
713{
714 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
715 sp -= get_random_int() % 8192;
716 return sp & ~0xf;
717}
718
719unsigned long arch_randomize_brk(struct mm_struct *mm)
720{
721 return randomize_page(mm->brk, 0x02000000);
722}
723
724
725
726
727
728
729
730unsigned long get_wchan(struct task_struct *p)
731{
732 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
733 int count = 0;
734
735 if (!p || p == current || p->state == TASK_RUNNING)
736 return 0;
737
738 if (!try_get_task_stack(p))
739 return 0;
740
741 start = (unsigned long)task_stack_page(p);
742 if (!start)
743 goto out;
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
762 top -= 2 * sizeof(unsigned long);
763 bottom = start;
764
765 sp = READ_ONCE(p->thread.sp);
766 if (sp < bottom || sp > top)
767 goto out;
768
769 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
770 do {
771 if (fp < bottom || fp > top)
772 goto out;
773 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
774 if (!in_sched_functions(ip)) {
775 ret = ip;
776 goto out;
777 }
778 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
779 } while (count++ < 16 && p->state != TASK_RUNNING);
780
781out:
782 put_task_stack(p);
783 return ret;
784}
785
786long do_arch_prctl_common(struct task_struct *task, int option,
787 unsigned long cpuid_enabled)
788{
789 switch (option) {
790 case ARCH_GET_CPUID:
791 return get_cpuid_mode();
792 case ARCH_SET_CPUID:
793 return set_cpuid_mode(task, cpuid_enabled);
794 }
795
796 return -EINVAL;
797}
798