1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3#include <linux/errno.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/smp.h>
7#include <linux/prctl.h>
8#include <linux/slab.h>
9#include <linux/sched.h>
10#include <linux/module.h>
11#include <linux/pm.h>
12#include <linux/tick.h>
13#include <linux/random.h>
14#include <linux/user-return-notifier.h>
15#include <linux/dmi.h>
16#include <linux/utsname.h>
17#include <linux/stackprotector.h>
18#include <linux/tick.h>
19#include <linux/cpuidle.h>
20#include <trace/events/power.h>
21#include <linux/hw_breakpoint.h>
22#include <asm/cpu.h>
23#include <asm/apic.h>
24#include <asm/syscalls.h>
25#include <asm/idle.h>
26#include <asm/uaccess.h>
27#include <asm/mwait.h>
28#include <asm/fpu/internal.h>
29#include <asm/debugreg.h>
30#include <asm/nmi.h>
31#include <asm/tlbflush.h>
32
33
34
35
36
37
38
39
40__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
41 .x86_tss = {
42 .sp0 = TOP_OF_INIT_STACK,
43#ifdef CONFIG_X86_32
44 .ss0 = __KERNEL_DS,
45 .ss1 = __KERNEL_CS,
46 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
47#endif
48 },
49#ifdef CONFIG_X86_32
50
51
52
53
54
55
56 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
57#endif
58};
59EXPORT_PER_CPU_SYMBOL(cpu_tss);
60
61#ifdef CONFIG_X86_64
62static DEFINE_PER_CPU(unsigned char, is_idle);
63static ATOMIC_NOTIFIER_HEAD(idle_notifier);
64
65void idle_notifier_register(struct notifier_block *n)
66{
67 atomic_notifier_chain_register(&idle_notifier, n);
68}
69EXPORT_SYMBOL_GPL(idle_notifier_register);
70
71void idle_notifier_unregister(struct notifier_block *n)
72{
73 atomic_notifier_chain_unregister(&idle_notifier, n);
74}
75EXPORT_SYMBOL_GPL(idle_notifier_unregister);
76#endif
77
78
79
80
81
82int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
83{
84 memcpy(dst, src, arch_task_struct_size);
85
86 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
87}
88
89
90
91
92void exit_thread(void)
93{
94 struct task_struct *me = current;
95 struct thread_struct *t = &me->thread;
96 unsigned long *bp = t->io_bitmap_ptr;
97 struct fpu *fpu = &t->fpu;
98
99 if (bp) {
100 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
101
102 t->io_bitmap_ptr = NULL;
103 clear_thread_flag(TIF_IO_BITMAP);
104
105
106
107 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
108 t->io_bitmap_max = 0;
109 put_cpu();
110 kfree(bp);
111 }
112
113 fpu__drop(fpu);
114}
115
116void flush_thread(void)
117{
118 struct task_struct *tsk = current;
119
120 flush_ptrace_hw_breakpoint(tsk);
121 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
122
123 fpu__clear(&tsk->thread.fpu);
124}
125
126static void hard_disable_TSC(void)
127{
128 cr4_set_bits(X86_CR4_TSD);
129}
130
131void disable_TSC(void)
132{
133 preempt_disable();
134 if (!test_and_set_thread_flag(TIF_NOTSC))
135
136
137
138
139 hard_disable_TSC();
140 preempt_enable();
141}
142
143static void hard_enable_TSC(void)
144{
145 cr4_clear_bits(X86_CR4_TSD);
146}
147
148static void enable_TSC(void)
149{
150 preempt_disable();
151 if (test_and_clear_thread_flag(TIF_NOTSC))
152
153
154
155
156 hard_enable_TSC();
157 preempt_enable();
158}
159
160int get_tsc_mode(unsigned long adr)
161{
162 unsigned int val;
163
164 if (test_thread_flag(TIF_NOTSC))
165 val = PR_TSC_SIGSEGV;
166 else
167 val = PR_TSC_ENABLE;
168
169 return put_user(val, (unsigned int __user *)adr);
170}
171
172int set_tsc_mode(unsigned int val)
173{
174 if (val == PR_TSC_SIGSEGV)
175 disable_TSC();
176 else if (val == PR_TSC_ENABLE)
177 enable_TSC();
178 else
179 return -EINVAL;
180
181 return 0;
182}
183
184void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
185 struct tss_struct *tss)
186{
187 struct thread_struct *prev, *next;
188
189 prev = &prev_p->thread;
190 next = &next_p->thread;
191
192 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
193 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
194 unsigned long debugctl = get_debugctlmsr();
195
196 debugctl &= ~DEBUGCTLMSR_BTF;
197 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
198 debugctl |= DEBUGCTLMSR_BTF;
199
200 update_debugctlmsr(debugctl);
201 }
202
203 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
204 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
205
206 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
207 hard_disable_TSC();
208 else
209 hard_enable_TSC();
210 }
211
212 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
213
214
215
216
217 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
218 max(prev->io_bitmap_max, next->io_bitmap_max));
219 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
220
221
222
223 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
224 }
225 propagate_user_return_notify(prev_p, next_p);
226}
227
228
229
230
231unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
232EXPORT_SYMBOL(boot_option_idle_override);
233
234static void (*x86_idle)(void);
235
236#ifndef CONFIG_SMP
237static inline void play_dead(void)
238{
239 BUG();
240}
241#endif
242
243#ifdef CONFIG_X86_64
244void enter_idle(void)
245{
246 this_cpu_write(is_idle, 1);
247 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
248}
249
250static void __exit_idle(void)
251{
252 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
253 return;
254 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
255}
256
257
258void exit_idle(void)
259{
260
261 if (current->pid)
262 return;
263 __exit_idle();
264}
265#endif
266
267void arch_cpu_idle_enter(void)
268{
269 local_touch_nmi();
270 enter_idle();
271}
272
273void arch_cpu_idle_exit(void)
274{
275 __exit_idle();
276}
277
278void arch_cpu_idle_dead(void)
279{
280 play_dead();
281}
282
283
284
285
286void arch_cpu_idle(void)
287{
288 x86_idle();
289}
290
291
292
293
294void default_idle(void)
295{
296 trace_cpu_idle_rcuidle(1, smp_processor_id());
297 safe_halt();
298 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
299}
300#ifdef CONFIG_APM_MODULE
301EXPORT_SYMBOL(default_idle);
302#endif
303
304#ifdef CONFIG_XEN
305bool xen_set_default_idle(void)
306{
307 bool ret = !!x86_idle;
308
309 x86_idle = default_idle;
310
311 return ret;
312}
313#endif
314void stop_this_cpu(void *dummy)
315{
316 local_irq_disable();
317
318
319
320 set_cpu_online(smp_processor_id(), false);
321 disable_local_APIC();
322
323 for (;;)
324 halt();
325}
326
327bool amd_e400_c1e_detected;
328EXPORT_SYMBOL(amd_e400_c1e_detected);
329
330static cpumask_var_t amd_e400_c1e_mask;
331
332void amd_e400_remove_cpu(int cpu)
333{
334 if (amd_e400_c1e_mask != NULL)
335 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
336}
337
338
339
340
341
342
343static void amd_e400_idle(void)
344{
345 if (!amd_e400_c1e_detected) {
346 u32 lo, hi;
347
348 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
349
350 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
351 amd_e400_c1e_detected = true;
352 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
353 mark_tsc_unstable("TSC halt in AMD C1E");
354 pr_info("System has AMD C1E enabled\n");
355 }
356 }
357
358 if (amd_e400_c1e_detected) {
359 int cpu = smp_processor_id();
360
361 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
362 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
363
364 tick_broadcast_force();
365 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
366 }
367 tick_broadcast_enter();
368
369 default_idle();
370
371
372
373
374
375 local_irq_disable();
376 tick_broadcast_exit();
377 local_irq_enable();
378 } else
379 default_idle();
380}
381
382
383
384
385
386
387
388
389
390
391
392static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
393{
394 if (c->x86_vendor != X86_VENDOR_INTEL)
395 return 0;
396
397 if (!cpu_has(c, X86_FEATURE_MWAIT))
398 return 0;
399
400 return 1;
401}
402
403
404
405
406
407
408static void mwait_idle(void)
409{
410 if (!current_set_polling_and_test()) {
411 trace_cpu_idle_rcuidle(1, smp_processor_id());
412 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
413 smp_mb();
414 clflush((void *)¤t_thread_info()->flags);
415 smp_mb();
416 }
417
418 __monitor((void *)¤t_thread_info()->flags, 0, 0);
419 if (!need_resched())
420 __sti_mwait(0, 0);
421 else
422 local_irq_enable();
423 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
424 } else {
425 local_irq_enable();
426 }
427 __current_clr_polling();
428}
429
430void select_idle_routine(const struct cpuinfo_x86 *c)
431{
432#ifdef CONFIG_SMP
433 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
434 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
435#endif
436 if (x86_idle || boot_option_idle_override == IDLE_POLL)
437 return;
438
439 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
440
441 pr_info("using AMD E400 aware idle routine\n");
442 x86_idle = amd_e400_idle;
443 } else if (prefer_mwait_c1_over_halt(c)) {
444 pr_info("using mwait in idle threads\n");
445 x86_idle = mwait_idle;
446 } else
447 x86_idle = default_idle;
448}
449
450void __init init_amd_e400_c1e_mask(void)
451{
452
453 if (x86_idle == amd_e400_idle)
454 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
455}
456
457static int __init idle_setup(char *str)
458{
459 if (!str)
460 return -EINVAL;
461
462 if (!strcmp(str, "poll")) {
463 pr_info("using polling idle threads\n");
464 boot_option_idle_override = IDLE_POLL;
465 cpu_idle_poll_ctrl(true);
466 } else if (!strcmp(str, "halt")) {
467
468
469
470
471
472
473
474 x86_idle = default_idle;
475 boot_option_idle_override = IDLE_HALT;
476 } else if (!strcmp(str, "nomwait")) {
477
478
479
480
481
482
483 boot_option_idle_override = IDLE_NOMWAIT;
484 } else
485 return -1;
486
487 return 0;
488}
489early_param("idle", idle_setup);
490
491unsigned long arch_align_stack(unsigned long sp)
492{
493 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
494 sp -= get_random_int() % 8192;
495 return sp & ~0xf;
496}
497
498unsigned long arch_randomize_brk(struct mm_struct *mm)
499{
500 unsigned long range_end = mm->brk + 0x02000000;
501 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
502}
503
504