1
2#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4#include <linux/errno.h>
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/smp.h>
8#include <linux/prctl.h>
9#include <linux/slab.h>
10#include <linux/sched.h>
11#include <linux/sched/idle.h>
12#include <linux/sched/debug.h>
13#include <linux/sched/task.h>
14#include <linux/sched/task_stack.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/pm.h>
18#include <linux/tick.h>
19#include <linux/random.h>
20#include <linux/user-return-notifier.h>
21#include <linux/dmi.h>
22#include <linux/utsname.h>
23#include <linux/stackprotector.h>
24#include <linux/cpuidle.h>
25#include <linux/acpi.h>
26#include <linux/elf-randomize.h>
27#include <trace/events/power.h>
28#include <linux/hw_breakpoint.h>
29#include <asm/cpu.h>
30#include <asm/apic.h>
31#include <asm/syscalls.h>
32#include <linux/uaccess.h>
33#include <asm/mwait.h>
34#include <asm/fpu/api.h>
35#include <asm/fpu/sched.h>
36#include <asm/fpu/xstate.h>
37#include <asm/debugreg.h>
38#include <asm/nmi.h>
39#include <asm/tlbflush.h>
40#include <asm/mce.h>
41#include <asm/vm86.h>
42#include <asm/switch_to.h>
43#include <asm/desc.h>
44#include <asm/prctl.h>
45#include <asm/spec-ctrl.h>
46#include <asm/spec_ctrl.h>
47#include <asm/proto.h>
48
49#include "process.h"
50
51
52
53
54
55
56
57
58__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
59 .x86_tss = {
60
61
62
63
64
65
66 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
67
68
69
70
71
72
73 .sp1 = TOP_OF_INIT_STACK,
74
75#ifdef CONFIG_X86_32
76 .ss0 = __KERNEL_DS,
77 .ss1 = __KERNEL_CS,
78 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
79#endif
80 },
81#ifdef CONFIG_X86_32
82
83
84
85
86
87
88 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
89#endif
90};
91EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
92
93DEFINE_PER_CPU(bool, __tss_limit_invalid);
94EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
95
96
97
98
99
100int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
101{
102 memcpy(dst, src, arch_task_struct_size);
103#ifdef CONFIG_VM86
104 dst->thread.vm86 = NULL;
105#endif
106
107 dst->thread.fpu.fpstate = NULL;
108
109 return 0;
110}
111
112#ifdef CONFIG_X86_64
113void arch_release_task_struct(struct task_struct *tsk)
114{
115 if (fpu_state_size_dynamic())
116 fpstate_free(&tsk->thread.fpu);
117}
118#endif
119
120
121
122
123void exit_thread(struct task_struct *tsk)
124{
125 struct thread_struct *t = &tsk->thread;
126 unsigned long *bp = t->io_bitmap_ptr;
127 struct fpu *fpu = &t->fpu;
128
129 if (bp) {
130 struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
131
132 t->io_bitmap_ptr = NULL;
133 clear_thread_flag(TIF_IO_BITMAP);
134
135
136
137 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
138 t->io_bitmap_max = 0;
139 put_cpu();
140 kfree(bp);
141 }
142
143 free_vm86(t);
144
145 fpu__drop(fpu);
146}
147
148static int set_new_tls(struct task_struct *p, unsigned long tls)
149{
150 struct user_desc __user *utls = (struct user_desc __user *)tls;
151
152 if (in_ia32_syscall())
153 return do_set_thread_area(p, -1, utls, 0);
154 else
155 return do_set_thread_area_64(p, ARCH_SET_FS, tls);
156}
157
158static inline int copy_io_bitmap(struct task_struct *tsk)
159{
160 if (likely(!test_tsk_thread_flag(current, TIF_IO_BITMAP)))
161 return 0;
162
163 tsk->thread.io_bitmap_ptr = kmemdup(current->thread.io_bitmap_ptr,
164 IO_BITMAP_BYTES, GFP_KERNEL);
165 if (!tsk->thread.io_bitmap_ptr) {
166 tsk->thread.io_bitmap_max = 0;
167 return -ENOMEM;
168 }
169 set_tsk_thread_flag(tsk, TIF_IO_BITMAP);
170 return 0;
171}
172
173static inline void free_io_bitmap(struct task_struct *tsk)
174{
175 if (tsk->thread.io_bitmap_ptr) {
176 kfree(tsk->thread.io_bitmap_ptr);
177 tsk->thread.io_bitmap_ptr = NULL;
178 tsk->thread.io_bitmap_max = 0;
179 }
180}
181
182int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
183 unsigned long arg, struct task_struct *p, unsigned long tls)
184{
185 struct inactive_task_frame *frame;
186 struct fork_frame *fork_frame;
187 struct pt_regs *childregs;
188 int ret;
189
190 childregs = task_pt_regs(p);
191 fork_frame = container_of(childregs, struct fork_frame, regs);
192 frame = &fork_frame->frame;
193
194
195
196
197
198
199
200 frame->flags = X86_EFLAGS_FIXED;
201 frame->bp = 0;
202 frame->ret_addr = (unsigned long) ret_from_fork;
203 p->thread.sp = (unsigned long) fork_frame;
204 p->thread.io_bitmap_ptr = NULL;
205 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
206
207#ifdef CONFIG_X86_64
208 current_save_fsgs();
209 p->thread.fsindex = current->thread.fsindex;
210 p->thread.fsbase = current->thread.fsbase;
211 p->thread.gsindex = current->thread.gsindex;
212 p->thread.gsbase = current->thread.gsbase;
213
214 savesegment(es, p->thread.es);
215 savesegment(ds, p->thread.ds);
216#else
217 p->thread.sp0 = (unsigned long) (childregs + 1);
218#endif
219
220 fpu_clone(p, clone_flags);
221
222
223 if (unlikely(p->flags & PF_KTHREAD)) {
224 p->task_struct_rh->pkru = pkru_get_init_value();
225 memset(childregs, 0, sizeof(struct pt_regs));
226 kthread_frame_init(frame, sp, arg);
227 return 0;
228 }
229
230
231
232
233
234 p->task_struct_rh->pkru = read_pkru();
235
236 frame->bx = 0;
237 *childregs = *current_pt_regs();
238 childregs->ax = 0;
239 if (sp)
240 childregs->sp = sp;
241
242#ifdef CONFIG_X86_32
243 task_user_gs(p) = get_user_gs(current_pt_regs());
244#endif
245
246 ret = copy_io_bitmap(p);
247 if (ret)
248 return ret;
249
250
251 if (clone_flags & CLONE_SETTLS) {
252 ret = set_new_tls(p, tls);
253 if (ret)
254 free_io_bitmap(p);
255 }
256 return ret;
257}
258
259static void pkru_flush_thread(void)
260{
261
262
263
264
265 pkru_write_default();
266}
267
268void flush_thread(void)
269{
270 struct task_struct *tsk = current;
271
272 flush_ptrace_hw_breakpoint(tsk);
273 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
274
275 fpu_flush_thread();
276 pkru_flush_thread();
277}
278
279void disable_TSC(void)
280{
281 preempt_disable();
282 if (!test_and_set_thread_flag(TIF_NOTSC))
283
284
285
286
287 cr4_set_bits(X86_CR4_TSD);
288 preempt_enable();
289}
290
291static void enable_TSC(void)
292{
293 preempt_disable();
294 if (test_and_clear_thread_flag(TIF_NOTSC))
295
296
297
298
299 cr4_clear_bits(X86_CR4_TSD);
300 preempt_enable();
301}
302
303int get_tsc_mode(unsigned long adr)
304{
305 unsigned int val;
306
307 if (test_thread_flag(TIF_NOTSC))
308 val = PR_TSC_SIGSEGV;
309 else
310 val = PR_TSC_ENABLE;
311
312 return put_user(val, (unsigned int __user *)adr);
313}
314
315int set_tsc_mode(unsigned int val)
316{
317 if (val == PR_TSC_SIGSEGV)
318 disable_TSC();
319 else if (val == PR_TSC_ENABLE)
320 enable_TSC();
321 else
322 return -EINVAL;
323
324 return 0;
325}
326
327DEFINE_PER_CPU(u64, msr_misc_features_shadow);
328
329static void set_cpuid_faulting(bool on)
330{
331 u64 msrval;
332
333 msrval = this_cpu_read(msr_misc_features_shadow);
334 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
335 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
336 this_cpu_write(msr_misc_features_shadow, msrval);
337 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
338}
339
340static void disable_cpuid(void)
341{
342 preempt_disable();
343 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
344
345
346
347
348 set_cpuid_faulting(true);
349 }
350 preempt_enable();
351}
352
353static void enable_cpuid(void)
354{
355 preempt_disable();
356 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
357
358
359
360
361 set_cpuid_faulting(false);
362 }
363 preempt_enable();
364}
365
366static int get_cpuid_mode(void)
367{
368 return !test_thread_flag(TIF_NOCPUID);
369}
370
371static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
372{
373 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
374 return -ENODEV;
375
376 if (cpuid_enabled)
377 enable_cpuid();
378 else
379 disable_cpuid();
380
381 return 0;
382}
383
384
385
386
387void arch_setup_new_exec(void)
388{
389
390 if (test_thread_flag(TIF_NOCPUID))
391 enable_cpuid();
392
393
394
395
396
397 if (test_thread_flag(TIF_SSBD) &&
398 task_spec_ssb_noexec(current)) {
399 clear_thread_flag(TIF_SSBD);
400 task_clear_spec_ssb_disable(current);
401 task_clear_spec_ssb_noexec(current);
402 speculation_ctrl_update(task_thread_info(current)->flags);
403 }
404}
405
406static inline void switch_to_bitmap(struct thread_struct *prev,
407 struct thread_struct *next,
408 unsigned long tifp, unsigned long tifn)
409{
410 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
411
412 if (tifn & _TIF_IO_BITMAP) {
413
414
415
416
417 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
418 max(prev->io_bitmap_max, next->io_bitmap_max));
419
420
421
422
423 refresh_tss_limit();
424 } else if (tifp & _TIF_IO_BITMAP) {
425
426
427
428 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
429 }
430}
431
432#ifdef CONFIG_SMP
433
434struct ssb_state {
435 struct ssb_state *shared_state;
436 raw_spinlock_t lock;
437 unsigned int disable_state;
438 unsigned long local_state;
439};
440
441#define LSTATE_SSB 0
442
443static DEFINE_PER_CPU(struct ssb_state, ssb_state);
444
445void speculative_store_bypass_ht_init(void)
446{
447 struct ssb_state *st = this_cpu_ptr(&ssb_state);
448 unsigned int this_cpu = smp_processor_id();
449 unsigned int cpu;
450
451 st->local_state = 0;
452
453
454
455
456
457 if (st->shared_state)
458 return;
459
460 raw_spin_lock_init(&st->lock);
461
462
463
464
465
466 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
467 if (cpu == this_cpu)
468 continue;
469
470 if (!per_cpu(ssb_state, cpu).shared_state)
471 continue;
472
473
474 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
475 return;
476 }
477
478
479
480
481
482
483
484 st->shared_state = st;
485}
486
487
488
489
490
491
492
493
494static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
495{
496 struct ssb_state *st = this_cpu_ptr(&ssb_state);
497 u64 msr = x86_amd_ls_cfg_base;
498
499 if (!static_cpu_has(X86_FEATURE_ZEN)) {
500 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
501 wrmsrl(MSR_AMD64_LS_CFG, msr);
502 return;
503 }
504
505 if (tifn & _TIF_SSBD) {
506
507
508
509
510 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
511 return;
512
513 msr |= x86_amd_ls_cfg_ssbd_mask;
514
515 raw_spin_lock(&st->shared_state->lock);
516
517 if (!st->shared_state->disable_state)
518 wrmsrl(MSR_AMD64_LS_CFG, msr);
519 st->shared_state->disable_state++;
520 raw_spin_unlock(&st->shared_state->lock);
521 } else {
522 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
523 return;
524
525 raw_spin_lock(&st->shared_state->lock);
526 st->shared_state->disable_state--;
527 if (!st->shared_state->disable_state)
528 wrmsrl(MSR_AMD64_LS_CFG, msr);
529 raw_spin_unlock(&st->shared_state->lock);
530 }
531}
532#else
533static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
534{
535 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
536
537 wrmsrl(MSR_AMD64_LS_CFG, msr);
538}
539#endif
540
541static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
542{
543
544
545
546
547 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
548}
549
550
551
552
553
554
555
556static __always_inline void __speculation_ctrl_update(unsigned long tifp,
557 unsigned long tifn)
558{
559 unsigned long tif_diff = tifp ^ tifn;
560 u64 msr = x86_spec_ctrl_base;
561 bool updmsr = false;
562
563 lockdep_assert_irqs_disabled();
564
565
566 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
567 if (tif_diff & _TIF_SSBD)
568 amd_set_ssb_virt_state(tifn);
569 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
570 if (tif_diff & _TIF_SSBD)
571 amd_set_core_ssb_state(tifn);
572 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
573 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
574 updmsr |= !!(tif_diff & _TIF_SSBD);
575 msr |= ssbd_tif_to_spec_ctrl(tifn);
576 }
577
578
579 if (IS_ENABLED(CONFIG_SMP) &&
580 static_branch_unlikely(&switch_to_cond_stibp)) {
581 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
582 msr |= stibp_tif_to_spec_ctrl(tifn);
583 }
584
585 if (updmsr) {
586 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_ENTRY))
587 spec_ctrl_update(msr);
588 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
589 }
590}
591
592static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
593{
594 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
595 if (task_spec_ssb_disable(tsk))
596 set_tsk_thread_flag(tsk, TIF_SSBD);
597 else
598 clear_tsk_thread_flag(tsk, TIF_SSBD);
599
600 if (task_spec_ib_disable(tsk))
601 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
602 else
603 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
604 }
605
606 return task_thread_info(tsk)->flags;
607}
608
609void speculation_ctrl_update(unsigned long tif)
610{
611 unsigned long flags;
612
613
614 local_irq_save(flags);
615 __speculation_ctrl_update(~tif, tif);
616 local_irq_restore(flags);
617}
618
619
620void speculation_ctrl_update_current(void)
621{
622 preempt_disable();
623 speculation_ctrl_update(speculation_ctrl_update_tif(current));
624 preempt_enable();
625}
626
627void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
628{
629 struct thread_struct *prev, *next;
630 unsigned long tifp, tifn;
631
632 prev = &prev_p->thread;
633 next = &next_p->thread;
634
635 tifn = READ_ONCE(task_thread_info(next_p)->flags);
636 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
637 switch_to_bitmap(prev, next, tifp, tifn);
638
639 propagate_user_return_notify(prev_p, next_p);
640
641 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
642 arch_has_block_step()) {
643 unsigned long debugctl, msk;
644
645 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
646 debugctl &= ~DEBUGCTLMSR_BTF;
647 msk = tifn & _TIF_BLOCKSTEP;
648 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
649 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
650 }
651
652 if ((tifp ^ tifn) & _TIF_NOTSC)
653 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
654
655 if ((tifp ^ tifn) & _TIF_NOCPUID)
656 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
657
658 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
659 __speculation_ctrl_update(tifp, tifn);
660 } else {
661 speculation_ctrl_update_tif(prev_p);
662 tifn = speculation_ctrl_update_tif(next_p);
663
664
665 __speculation_ctrl_update(~tifn, tifn);
666 }
667
668 if ((tifp ^ tifn) & _TIF_SLD)
669 switch_to_sld(tifn);
670}
671
672
673
674
675unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
676EXPORT_SYMBOL(boot_option_idle_override);
677
678static void (*x86_idle)(void);
679
680#ifndef CONFIG_SMP
681static inline void play_dead(void)
682{
683 BUG();
684}
685#endif
686
687void arch_cpu_idle_enter(void)
688{
689 tsc_verify_tsc_adjust(false);
690 local_touch_nmi();
691}
692
693void arch_cpu_idle_dead(void)
694{
695 play_dead();
696}
697
698
699
700
701void arch_cpu_idle(void)
702{
703 x86_idle();
704}
705
706
707
708
709void __cpuidle default_idle(void)
710{
711 safe_halt();
712}
713#if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
714EXPORT_SYMBOL(default_idle);
715#endif
716
717#ifdef CONFIG_XEN
718bool xen_set_default_idle(void)
719{
720 bool ret = !!x86_idle;
721
722 x86_idle = default_idle;
723
724 return ret;
725}
726#endif
727
728void stop_this_cpu(void *dummy)
729{
730 local_irq_disable();
731
732
733
734 set_cpu_online(smp_processor_id(), false);
735 disable_local_APIC();
736 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
737
738
739
740
741
742
743
744
745
746
747 if (boot_cpu_has(X86_FEATURE_SME))
748 native_wbinvd();
749 for (;;) {
750
751
752
753
754
755 native_halt();
756 }
757}
758
759
760
761
762
763static void amd_e400_idle(void)
764{
765
766
767
768
769
770 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
771 default_idle();
772 return;
773 }
774
775 tick_broadcast_enter();
776
777 default_idle();
778
779
780
781
782
783 local_irq_disable();
784 tick_broadcast_exit();
785 local_irq_enable();
786}
787
788
789
790
791
792
793
794
795
796
797
798static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
799{
800 if (c->x86_vendor != X86_VENDOR_INTEL)
801 return 0;
802
803 if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
804 return 0;
805
806 return 1;
807}
808
809
810
811
812
813
814static __cpuidle void mwait_idle(void)
815{
816 if (!current_set_polling_and_test()) {
817 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
818 mb();
819 clflush((void *)¤t_thread_info()->flags);
820 mb();
821 }
822
823 __monitor((void *)¤t_thread_info()->flags, 0, 0);
824 if (!need_resched())
825 __sti_mwait(0, 0);
826 else
827 local_irq_enable();
828 } else {
829 local_irq_enable();
830 }
831 __current_clr_polling();
832}
833
834void select_idle_routine(const struct cpuinfo_x86 *c)
835{
836#ifdef CONFIG_SMP
837 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
838 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
839#endif
840 if (x86_idle || boot_option_idle_override == IDLE_POLL)
841 return;
842
843 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
844 pr_info("using AMD E400 aware idle routine\n");
845 x86_idle = amd_e400_idle;
846 } else if (prefer_mwait_c1_over_halt(c)) {
847 pr_info("using mwait in idle threads\n");
848 x86_idle = mwait_idle;
849 } else
850 x86_idle = default_idle;
851}
852
853void amd_e400_c1e_apic_setup(void)
854{
855 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
856 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
857 local_irq_disable();
858 tick_broadcast_force();
859 local_irq_enable();
860 }
861}
862
863void __init arch_post_acpi_subsys_init(void)
864{
865 u32 lo, hi;
866
867 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
868 return;
869
870
871
872
873
874
875 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
876 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
877 return;
878
879 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
880
881 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
882 mark_tsc_unstable("TSC halt in AMD C1E");
883 pr_info("System has AMD C1E enabled\n");
884}
885
886static int __init idle_setup(char *str)
887{
888 if (!str)
889 return -EINVAL;
890
891 if (!strcmp(str, "poll")) {
892 pr_info("using polling idle threads\n");
893 boot_option_idle_override = IDLE_POLL;
894 cpu_idle_poll_ctrl(true);
895 } else if (!strcmp(str, "halt")) {
896
897
898
899
900
901
902
903 x86_idle = default_idle;
904 boot_option_idle_override = IDLE_HALT;
905 } else if (!strcmp(str, "nomwait")) {
906
907
908
909
910
911
912 boot_option_idle_override = IDLE_NOMWAIT;
913 } else
914 return -1;
915
916 return 0;
917}
918early_param("idle", idle_setup);
919
920unsigned long arch_align_stack(unsigned long sp)
921{
922 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
923 sp -= get_random_int() % 8192;
924 return sp & ~0xf;
925}
926
927unsigned long arch_randomize_brk(struct mm_struct *mm)
928{
929 return randomize_page(mm->brk, 0x02000000);
930}
931
932
933
934
935
936
937
938unsigned long get_wchan(struct task_struct *p)
939{
940 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
941 int count = 0;
942
943 if (p == current || task_is_running(p))
944 return 0;
945
946 if (!try_get_task_stack(p))
947 return 0;
948
949 start = (unsigned long)task_stack_page(p);
950 if (!start)
951 goto out;
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
970 top -= 2 * sizeof(unsigned long);
971 bottom = start;
972
973 sp = READ_ONCE(p->thread.sp);
974 if (sp < bottom || sp > top)
975 goto out;
976
977 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
978 do {
979 if (fp < bottom || fp > top)
980 goto out;
981 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
982 if (!in_sched_functions(ip)) {
983 ret = ip;
984 goto out;
985 }
986 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
987 } while (count++ < 16 && !task_is_running(p));
988
989out:
990 put_task_stack(p);
991 return ret;
992}
993
994long do_arch_prctl_common(struct task_struct *task, int option,
995 unsigned long arg2)
996{
997 switch (option) {
998 case ARCH_GET_CPUID:
999 return get_cpuid_mode();
1000 case ARCH_SET_CPUID:
1001 return set_cpuid_mode(task, arg2);
1002 case ARCH_GET_XCOMP_SUPP:
1003 case ARCH_GET_XCOMP_PERM:
1004 case ARCH_REQ_XCOMP_PERM:
1005 return fpu_xstate_prctl(task, option, arg2);
1006 }
1007
1008 return -EINVAL;
1009}
1010