1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/errno.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/smp.h>
7#include <linux/prctl.h>
8#include <linux/slab.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/pm.h>
12#include <linux/tick.h>
13#include <linux/random.h>
14#include <linux/user-return-notifier.h>
15#include <linux/dmi.h>
16#include <linux/utsname.h>
17#include <linux/stackprotector.h>
18#include <linux/tick.h>
19#include <linux/cpuidle.h>
20#include <trace/events/power.h>
21#include <linux/hw_breakpoint.h>
22#include <asm/cpu.h>
23#include <asm/apic.h>
24#include <asm/syscalls.h>
25#include <asm/idle.h>
26#include <asm/uaccess.h>
27#include <asm/mwait.h>
28#include <asm/fpu/internal.h>
29#include <asm/debugreg.h>
30#include <asm/nmi.h>
31#include <asm/tlbflush.h>
32#include <asm/mce.h>
33#include <asm/vm86.h>
34
35
36
37
38
39
40
41
42__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
43 .x86_tss = {
44 .sp0 = TOP_OF_INIT_STACK,
45#ifdef CONFIG_X86_32
46 .ss0 = __KERNEL_DS,
47 .ss1 = __KERNEL_CS,
48 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
49#endif
50 },
51#ifdef CONFIG_X86_32
52
53
54
55
56
57
58 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
59#endif
60#ifdef CONFIG_X86_32
61 .SYSENTER_stack_canary = STACK_END_MAGIC,
62#endif
63};
64EXPORT_PER_CPU_SYMBOL(cpu_tss);
65
66#ifdef CONFIG_X86_64
67static DEFINE_PER_CPU(unsigned char, is_idle);
68static ATOMIC_NOTIFIER_HEAD(idle_notifier);
69
70void idle_notifier_register(struct notifier_block *n)
71{
72 atomic_notifier_chain_register(&idle_notifier, n);
73}
74EXPORT_SYMBOL_GPL(idle_notifier_register);
75
76void idle_notifier_unregister(struct notifier_block *n)
77{
78 atomic_notifier_chain_unregister(&idle_notifier, n);
79}
80EXPORT_SYMBOL_GPL(idle_notifier_unregister);
81#endif
82
83
84
85
86
87int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
88{
89 memcpy(dst, src, arch_task_struct_size);
90#ifdef CONFIG_VM86
91 dst->thread.vm86 = NULL;
92#endif
93
94 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
95}
96
97
98
99
100void exit_thread(void)
101{
102 struct task_struct *me = current;
103 struct thread_struct *t = &me->thread;
104 unsigned long *bp = t->io_bitmap_ptr;
105 struct fpu *fpu = &t->fpu;
106
107 if (bp) {
108 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
109
110 t->io_bitmap_ptr = NULL;
111 clear_thread_flag(TIF_IO_BITMAP);
112
113
114
115 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
116 t->io_bitmap_max = 0;
117 put_cpu();
118 kfree(bp);
119 }
120
121 free_vm86(t);
122
123 fpu__drop(fpu);
124}
125
126void flush_thread(void)
127{
128 struct task_struct *tsk = current;
129
130 flush_ptrace_hw_breakpoint(tsk);
131 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
132
133 fpu__clear(&tsk->thread.fpu);
134}
135
136static void hard_disable_TSC(void)
137{
138 cr4_set_bits(X86_CR4_TSD);
139}
140
141void disable_TSC(void)
142{
143 preempt_disable();
144 if (!test_and_set_thread_flag(TIF_NOTSC))
145
146
147
148
149 hard_disable_TSC();
150 preempt_enable();
151}
152
153static void hard_enable_TSC(void)
154{
155 cr4_clear_bits(X86_CR4_TSD);
156}
157
158static void enable_TSC(void)
159{
160 preempt_disable();
161 if (test_and_clear_thread_flag(TIF_NOTSC))
162
163
164
165
166 hard_enable_TSC();
167 preempt_enable();
168}
169
170int get_tsc_mode(unsigned long adr)
171{
172 unsigned int val;
173
174 if (test_thread_flag(TIF_NOTSC))
175 val = PR_TSC_SIGSEGV;
176 else
177 val = PR_TSC_ENABLE;
178
179 return put_user(val, (unsigned int __user *)adr);
180}
181
182int set_tsc_mode(unsigned int val)
183{
184 if (val == PR_TSC_SIGSEGV)
185 disable_TSC();
186 else if (val == PR_TSC_ENABLE)
187 enable_TSC();
188 else
189 return -EINVAL;
190
191 return 0;
192}
193
194void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
195 struct tss_struct *tss)
196{
197 struct thread_struct *prev, *next;
198
199 prev = &prev_p->thread;
200 next = &next_p->thread;
201
202 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
203 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
204 unsigned long debugctl = get_debugctlmsr();
205
206 debugctl &= ~DEBUGCTLMSR_BTF;
207 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
208 debugctl |= DEBUGCTLMSR_BTF;
209
210 update_debugctlmsr(debugctl);
211 }
212
213 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
214 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
215
216 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
217 hard_disable_TSC();
218 else
219 hard_enable_TSC();
220 }
221
222 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
223
224
225
226
227 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
228 max(prev->io_bitmap_max, next->io_bitmap_max));
229 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
230
231
232
233 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
234 }
235 propagate_user_return_notify(prev_p, next_p);
236}
237
238
239
240
241unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
242EXPORT_SYMBOL(boot_option_idle_override);
243
244static void (*x86_idle)(void);
245
246#ifndef CONFIG_SMP
247static inline void play_dead(void)
248{
249 BUG();
250}
251#endif
252
253#ifdef CONFIG_X86_64
254void enter_idle(void)
255{
256 this_cpu_write(is_idle, 1);
257 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
258}
259
260static void __exit_idle(void)
261{
262 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
263 return;
264 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
265}
266
267
268void exit_idle(void)
269{
270
271 if (current->pid)
272 return;
273 __exit_idle();
274}
275#endif
276
277void arch_cpu_idle_enter(void)
278{
279 local_touch_nmi();
280 enter_idle();
281}
282
283void arch_cpu_idle_exit(void)
284{
285 __exit_idle();
286}
287
288void arch_cpu_idle_dead(void)
289{
290 play_dead();
291}
292
293
294
295
296void arch_cpu_idle(void)
297{
298 x86_idle();
299}
300
301
302
303
304void default_idle(void)
305{
306 trace_cpu_idle_rcuidle(1, smp_processor_id());
307 safe_halt();
308 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
309}
310#ifdef CONFIG_APM_MODULE
311EXPORT_SYMBOL(default_idle);
312#endif
313
314#ifdef CONFIG_XEN
315bool xen_set_default_idle(void)
316{
317 bool ret = !!x86_idle;
318
319 x86_idle = default_idle;
320
321 return ret;
322}
323#endif
324void stop_this_cpu(void *dummy)
325{
326 local_irq_disable();
327
328
329
330 set_cpu_online(smp_processor_id(), false);
331 disable_local_APIC();
332 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
333
334 for (;;)
335 halt();
336}
337
338bool amd_e400_c1e_detected;
339EXPORT_SYMBOL(amd_e400_c1e_detected);
340
341static cpumask_var_t amd_e400_c1e_mask;
342
343void amd_e400_remove_cpu(int cpu)
344{
345 if (amd_e400_c1e_mask != NULL)
346 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
347}
348
349
350
351
352
353
354static void amd_e400_idle(void)
355{
356 if (!amd_e400_c1e_detected) {
357 u32 lo, hi;
358
359 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
360
361 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
362 amd_e400_c1e_detected = true;
363 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
364 mark_tsc_unstable("TSC halt in AMD C1E");
365 pr_info("System has AMD C1E enabled\n");
366 }
367 }
368
369 if (amd_e400_c1e_detected) {
370 int cpu = smp_processor_id();
371
372 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
373 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
374
375 tick_broadcast_force();
376 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
377 }
378 tick_broadcast_enter();
379
380 default_idle();
381
382
383
384
385
386 local_irq_disable();
387 tick_broadcast_exit();
388 local_irq_enable();
389 } else
390 default_idle();
391}
392
393
394
395
396
397
398
399
400
401
402
403static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
404{
405 if (c->x86_vendor != X86_VENDOR_INTEL)
406 return 0;
407
408 if (!cpu_has(c, X86_FEATURE_MWAIT))
409 return 0;
410
411 return 1;
412}
413
414
415
416
417
418
419static void mwait_idle(void)
420{
421 if (!current_set_polling_and_test()) {
422 trace_cpu_idle_rcuidle(1, smp_processor_id());
423 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
424 mb();
425 clflush((void *)¤t_thread_info()->flags);
426 mb();
427 }
428
429 __monitor((void *)¤t_thread_info()->flags, 0, 0);
430 if (!need_resched())
431 __sti_mwait(0, 0);
432 else
433 local_irq_enable();
434 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
435 } else {
436 local_irq_enable();
437 }
438 __current_clr_polling();
439}
440
441void select_idle_routine(const struct cpuinfo_x86 *c)
442{
443#ifdef CONFIG_SMP
444 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
445 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
446#endif
447 if (x86_idle || boot_option_idle_override == IDLE_POLL)
448 return;
449
450 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
451
452 pr_info("using AMD E400 aware idle routine\n");
453 x86_idle = amd_e400_idle;
454 } else if (prefer_mwait_c1_over_halt(c)) {
455 pr_info("using mwait in idle threads\n");
456 x86_idle = mwait_idle;
457 } else
458 x86_idle = default_idle;
459}
460
461void __init init_amd_e400_c1e_mask(void)
462{
463
464 if (x86_idle == amd_e400_idle)
465 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
466}
467
468static int __init idle_setup(char *str)
469{
470 if (!str)
471 return -EINVAL;
472
473 if (!strcmp(str, "poll")) {
474 pr_info("using polling idle threads\n");
475 boot_option_idle_override = IDLE_POLL;
476 cpu_idle_poll_ctrl(true);
477 } else if (!strcmp(str, "halt")) {
478
479
480
481
482
483
484
485 x86_idle = default_idle;
486 boot_option_idle_override = IDLE_HALT;
487 } else if (!strcmp(str, "nomwait")) {
488
489
490
491
492
493
494 boot_option_idle_override = IDLE_NOMWAIT;
495 } else
496 return -1;
497
498 return 0;
499}
500early_param("idle", idle_setup);
501
502unsigned long arch_align_stack(unsigned long sp)
503{
504 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
505 sp -= get_random_int() % 8192;
506 return sp & ~0xf;
507}
508
509unsigned long arch_randomize_brk(struct mm_struct *mm)
510{
511 unsigned long range_end = mm->brk + 0x02000000;
512 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
513}
514
515
516
517
518
519
520
521unsigned long get_wchan(struct task_struct *p)
522{
523 unsigned long start, bottom, top, sp, fp, ip;
524 int count = 0;
525
526 if (!p || p == current || p->state == TASK_RUNNING)
527 return 0;
528
529 start = (unsigned long)task_stack_page(p);
530 if (!start)
531 return 0;
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
552 top -= 2 * sizeof(unsigned long);
553 bottom = start + sizeof(struct thread_info);
554
555 sp = READ_ONCE(p->thread.sp);
556 if (sp < bottom || sp > top)
557 return 0;
558
559 fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
560 do {
561 if (fp < bottom || fp > top)
562 return 0;
563 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
564 if (!in_sched_functions(ip))
565 return ip;
566 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
567 } while (count++ < 16 && p->state != TASK_RUNNING);
568 return 0;
569}
570