1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#define DEBUG
55
56#include <linux/kernel.h>
57#include <linux/cpuidle.h>
58#include <linux/clockchips.h>
59#include <linux/hrtimer.h>
60#include <trace/events/power.h>
61#include <linux/sched.h>
62#include <linux/notifier.h>
63#include <linux/cpu.h>
64#include <linux/module.h>
65#include <asm/cpu_device_id.h>
66#include <asm/mwait.h>
67#include <asm/msr.h>
68
69#define INTEL_IDLE_VERSION "0.4"
70#define PREFIX "intel_idle: "
71
72static struct cpuidle_driver intel_idle_driver = {
73 .name = "intel_idle",
74 .owner = THIS_MODULE,
75};
76
77static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1;
78
79static unsigned int mwait_substates;
80
81#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
82
83static unsigned int lapic_timer_reliable_states = (1 << 1);
84
85struct idle_cpu {
86 struct cpuidle_state *state_table;
87
88
89
90
91
92 unsigned long auto_demotion_disable_flags;
93};
94
95static const struct idle_cpu *icpu;
96static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
97static int intel_idle(struct cpuidle_device *dev,
98 struct cpuidle_driver *drv, int index);
99static int intel_idle_cpu_init(int cpu);
100
101static struct cpuidle_state *cpuidle_state_table;
102
103
104
105
106
107
108
109#define CPUIDLE_FLAG_TLB_FLUSHED 0x10000
110
111
112
113
114
115
116static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
117 { },
118 {
119 .name = "C1-NHM",
120 .desc = "MWAIT 0x00",
121 .flags = CPUIDLE_FLAG_TIME_VALID,
122 .exit_latency = 3,
123 .target_residency = 6,
124 .enter = &intel_idle },
125 {
126 .name = "C3-NHM",
127 .desc = "MWAIT 0x10",
128 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
129 .exit_latency = 20,
130 .target_residency = 80,
131 .enter = &intel_idle },
132 {
133 .name = "C6-NHM",
134 .desc = "MWAIT 0x20",
135 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
136 .exit_latency = 200,
137 .target_residency = 800,
138 .enter = &intel_idle },
139};
140
141static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
142 { },
143 {
144 .name = "C1-SNB",
145 .desc = "MWAIT 0x00",
146 .flags = CPUIDLE_FLAG_TIME_VALID,
147 .exit_latency = 1,
148 .target_residency = 1,
149 .enter = &intel_idle },
150 {
151 .name = "C3-SNB",
152 .desc = "MWAIT 0x10",
153 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
154 .exit_latency = 80,
155 .target_residency = 211,
156 .enter = &intel_idle },
157 {
158 .name = "C6-SNB",
159 .desc = "MWAIT 0x20",
160 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
161 .exit_latency = 104,
162 .target_residency = 345,
163 .enter = &intel_idle },
164 {
165 .name = "C7-SNB",
166 .desc = "MWAIT 0x30",
167 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
168 .exit_latency = 109,
169 .target_residency = 345,
170 .enter = &intel_idle },
171};
172
173static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = {
174 { },
175 {
176 .name = "C1-IVB",
177 .desc = "MWAIT 0x00",
178 .flags = CPUIDLE_FLAG_TIME_VALID,
179 .exit_latency = 1,
180 .target_residency = 1,
181 .enter = &intel_idle },
182 {
183 .name = "C3-IVB",
184 .desc = "MWAIT 0x10",
185 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
186 .exit_latency = 59,
187 .target_residency = 156,
188 .enter = &intel_idle },
189 {
190 .name = "C6-IVB",
191 .desc = "MWAIT 0x20",
192 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
193 .exit_latency = 80,
194 .target_residency = 300,
195 .enter = &intel_idle },
196 {
197 .name = "C7-IVB",
198 .desc = "MWAIT 0x30",
199 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
200 .exit_latency = 87,
201 .target_residency = 300,
202 .enter = &intel_idle },
203};
204
205static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
206 { },
207 {
208 .name = "C1-ATM",
209 .desc = "MWAIT 0x00",
210 .flags = CPUIDLE_FLAG_TIME_VALID,
211 .exit_latency = 1,
212 .target_residency = 4,
213 .enter = &intel_idle },
214 {
215 .name = "C2-ATM",
216 .desc = "MWAIT 0x10",
217 .flags = CPUIDLE_FLAG_TIME_VALID,
218 .exit_latency = 20,
219 .target_residency = 80,
220 .enter = &intel_idle },
221 { },
222 {
223 .name = "C4-ATM",
224 .desc = "MWAIT 0x30",
225 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
226 .exit_latency = 100,
227 .target_residency = 400,
228 .enter = &intel_idle },
229 { },
230 {
231 .name = "C6-ATM",
232 .desc = "MWAIT 0x52",
233 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
234 .exit_latency = 140,
235 .target_residency = 560,
236 .enter = &intel_idle },
237};
238
239static long get_driver_data(int cstate)
240{
241 int driver_data;
242 switch (cstate) {
243
244 case 1:
245 driver_data = 0x00;
246 break;
247 case 2:
248 driver_data = 0x10;
249 break;
250 case 3:
251 driver_data = 0x20;
252 break;
253 case 4:
254 driver_data = 0x30;
255 break;
256 case 5:
257 driver_data = 0x40;
258 break;
259 case 6:
260 driver_data = 0x52;
261 break;
262 default:
263 driver_data = 0x00;
264 }
265 return driver_data;
266}
267
268
269
270
271
272
273
274
275
276static int intel_idle(struct cpuidle_device *dev,
277 struct cpuidle_driver *drv, int index)
278{
279 unsigned long ecx = 1;
280 struct cpuidle_state *state = &drv->states[index];
281 struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
282 unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
283 unsigned int cstate;
284 ktime_t kt_before, kt_after;
285 s64 usec_delta;
286 int cpu = smp_processor_id();
287
288 cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
289
290
291
292
293
294 if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
295 leave_mm(cpu);
296
297 if (!(lapic_timer_reliable_states & (1 << (cstate))))
298 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
299
300 kt_before = ktime_get_real();
301
302 stop_critical_timings();
303 if (!need_resched()) {
304
305 __monitor((void *)¤t_thread_info()->flags, 0, 0);
306 smp_mb();
307 if (!need_resched())
308 __mwait(eax, ecx);
309 }
310
311 start_critical_timings();
312
313 kt_after = ktime_get_real();
314 usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before));
315
316 local_irq_enable();
317
318 if (!(lapic_timer_reliable_states & (1 << (cstate))))
319 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
320
321
322 dev->last_residency = (int)usec_delta;
323
324 return index;
325}
326
327static void __setup_broadcast_timer(void *arg)
328{
329 unsigned long reason = (unsigned long)arg;
330 int cpu = smp_processor_id();
331
332 reason = reason ?
333 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
334
335 clockevents_notify(reason, &cpu);
336}
337
338static int cpu_hotplug_notify(struct notifier_block *n,
339 unsigned long action, void *hcpu)
340{
341 int hotcpu = (unsigned long)hcpu;
342 struct cpuidle_device *dev;
343
344 switch (action & 0xf) {
345 case CPU_ONLINE:
346
347 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
348 smp_call_function_single(hotcpu, __setup_broadcast_timer,
349 (void *)true, 1);
350
351
352
353
354
355
356 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
357 if (!dev->registered)
358 intel_idle_cpu_init(hotcpu);
359
360 break;
361 }
362 return NOTIFY_OK;
363}
364
365static struct notifier_block cpu_hotplug_notifier = {
366 .notifier_call = cpu_hotplug_notify,
367};
368
369static void auto_demotion_disable(void *dummy)
370{
371 unsigned long long msr_bits;
372
373 rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
374 msr_bits &= ~(icpu->auto_demotion_disable_flags);
375 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
376}
377
378static const struct idle_cpu idle_cpu_nehalem = {
379 .state_table = nehalem_cstates,
380 .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
381};
382
383static const struct idle_cpu idle_cpu_atom = {
384 .state_table = atom_cstates,
385};
386
387static const struct idle_cpu idle_cpu_lincroft = {
388 .state_table = atom_cstates,
389 .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
390};
391
392static const struct idle_cpu idle_cpu_snb = {
393 .state_table = snb_cstates,
394};
395
396static const struct idle_cpu idle_cpu_ivb = {
397 .state_table = ivb_cstates,
398};
399
400#define ICPU(model, cpu) \
401 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
402
403static const struct x86_cpu_id intel_idle_ids[] = {
404 ICPU(0x1a, idle_cpu_nehalem),
405 ICPU(0x1e, idle_cpu_nehalem),
406 ICPU(0x1f, idle_cpu_nehalem),
407 ICPU(0x25, idle_cpu_nehalem),
408 ICPU(0x2c, idle_cpu_nehalem),
409 ICPU(0x2e, idle_cpu_nehalem),
410 ICPU(0x1c, idle_cpu_atom),
411 ICPU(0x26, idle_cpu_lincroft),
412 ICPU(0x2f, idle_cpu_nehalem),
413 ICPU(0x2a, idle_cpu_snb),
414 ICPU(0x2d, idle_cpu_snb),
415 ICPU(0x3a, idle_cpu_ivb),
416 ICPU(0x3e, idle_cpu_ivb),
417 {}
418};
419MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
420
421
422
423
424static int intel_idle_probe(void)
425{
426 unsigned int eax, ebx, ecx;
427 const struct x86_cpu_id *id;
428
429 if (max_cstate == 0) {
430 pr_debug(PREFIX "disabled\n");
431 return -EPERM;
432 }
433
434 id = x86_match_cpu(intel_idle_ids);
435 if (!id) {
436 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
437 boot_cpu_data.x86 == 6)
438 pr_debug(PREFIX "does not run on family %d model %d\n",
439 boot_cpu_data.x86, boot_cpu_data.x86_model);
440 return -ENODEV;
441 }
442
443 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
444 return -ENODEV;
445
446 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
447
448 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
449 !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
450 !mwait_substates)
451 return -ENODEV;
452
453 pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
454
455 icpu = (const struct idle_cpu *)id->driver_data;
456 cpuidle_state_table = icpu->state_table;
457
458 if (boot_cpu_has(X86_FEATURE_ARAT))
459 lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
460 else
461 on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
462
463 register_cpu_notifier(&cpu_hotplug_notifier);
464
465 pr_debug(PREFIX "v" INTEL_IDLE_VERSION
466 " model 0x%X\n", boot_cpu_data.x86_model);
467
468 pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n",
469 lapic_timer_reliable_states);
470 return 0;
471}
472
473
474
475
476
477static void intel_idle_cpuidle_devices_uninit(void)
478{
479 int i;
480 struct cpuidle_device *dev;
481
482 for_each_online_cpu(i) {
483 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
484 cpuidle_unregister_device(dev);
485 }
486
487 free_percpu(intel_idle_cpuidle_devices);
488 return;
489}
490
491
492
493
494static int intel_idle_cpuidle_driver_init(void)
495{
496 int cstate;
497 struct cpuidle_driver *drv = &intel_idle_driver;
498
499 drv->state_count = 1;
500
501 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
502 int num_substates;
503
504 if (cstate > max_cstate) {
505 printk(PREFIX "max_cstate %d reached\n",
506 max_cstate);
507 break;
508 }
509
510
511 num_substates = (mwait_substates >> ((cstate) * 4))
512 & MWAIT_SUBSTATE_MASK;
513 if (num_substates == 0)
514 continue;
515
516 if (cpuidle_state_table[cstate].enter == NULL) {
517
518 if (*cpuidle_state_table[cstate].name == '\0')
519 pr_debug(PREFIX "unaware of model 0x%x"
520 " MWAIT %d please"
521 " contact lenb@kernel.org",
522 boot_cpu_data.x86_model, cstate);
523 continue;
524 }
525
526 if ((cstate > 2) &&
527 !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
528 mark_tsc_unstable("TSC halts in idle"
529 " states deeper than C2");
530
531 drv->states[drv->state_count] =
532 cpuidle_state_table[cstate];
533
534 drv->state_count += 1;
535 }
536
537 if (icpu->auto_demotion_disable_flags)
538 on_each_cpu(auto_demotion_disable, NULL, 1);
539
540 return 0;
541}
542
543
544
545
546
547
548
549static int intel_idle_cpu_init(int cpu)
550{
551 int cstate;
552 struct cpuidle_device *dev;
553
554 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
555
556 dev->state_count = 1;
557
558 for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
559 int num_substates;
560
561 if (cstate > max_cstate) {
562 printk(PREFIX "max_cstate %d reached\n", max_cstate);
563 break;
564 }
565
566
567 num_substates = (mwait_substates >> ((cstate) * 4))
568 & MWAIT_SUBSTATE_MASK;
569 if (num_substates == 0)
570 continue;
571
572 if (cpuidle_state_table[cstate].enter == NULL)
573 continue;
574
575 dev->states_usage[dev->state_count].driver_data =
576 (void *)get_driver_data(cstate);
577
578 dev->state_count += 1;
579 }
580
581 dev->cpu = cpu;
582
583 if (cpuidle_register_device(dev)) {
584 pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
585 intel_idle_cpuidle_devices_uninit();
586 return -EIO;
587 }
588
589 if (icpu->auto_demotion_disable_flags)
590 smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
591
592 return 0;
593}
594
595static int __init intel_idle_init(void)
596{
597 int retval, i;
598
599
600 if (boot_option_idle_override != IDLE_NO_OVERRIDE)
601 return -ENODEV;
602
603 retval = intel_idle_probe();
604 if (retval)
605 return retval;
606
607 intel_idle_cpuidle_driver_init();
608 retval = cpuidle_register_driver(&intel_idle_driver);
609 if (retval) {
610 struct cpuidle_driver *drv = cpuidle_get_driver();
611 printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
612 drv ? drv->name : "none");
613 return retval;
614 }
615
616 intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
617 if (intel_idle_cpuidle_devices == NULL)
618 return -ENOMEM;
619
620 for_each_online_cpu(i) {
621 retval = intel_idle_cpu_init(i);
622 if (retval) {
623 cpuidle_unregister_driver(&intel_idle_driver);
624 return retval;
625 }
626 }
627
628 return 0;
629}
630
631static void __exit intel_idle_exit(void)
632{
633 intel_idle_cpuidle_devices_uninit();
634 cpuidle_unregister_driver(&intel_idle_driver);
635
636
637 if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
638 on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
639 unregister_cpu_notifier(&cpu_hotplug_notifier);
640
641 return;
642}
643
644module_init(intel_idle_init);
645module_exit(intel_idle_exit);
646
647module_param(max_cstate, int, 0444);
648
649MODULE_AUTHOR("Len Brown <len.brown@intel.com>");
650MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION);
651MODULE_LICENSE("GPL");
652