1
2
3
4
5
6
7
8
9
10
11#include <linux/clockchips.h>
12#include <linux/kernel.h>
13#include <linux/mutex.h>
14#include <linux/sched.h>
15#include <linux/sched/clock.h>
16#include <linux/notifier.h>
17#include <linux/pm_qos.h>
18#include <linux/cpu.h>
19#include <linux/cpuidle.h>
20#include <linux/ktime.h>
21#include <linux/hrtimer.h>
22#include <linux/module.h>
23#include <linux/suspend.h>
24#include <linux/tick.h>
25#include <linux/mmu_context.h>
26#include <trace/events/power.h>
27
28#include "cpuidle.h"
29
30DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
31DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
32
33DEFINE_MUTEX(cpuidle_lock);
34LIST_HEAD(cpuidle_detected_devices);
35
36static int enabled_devices;
37static int off __read_mostly;
38static int initialized __read_mostly;
39
40int cpuidle_disabled(void)
41{
42 return off;
43}
44void disable_cpuidle(void)
45{
46 off = 1;
47}
48
49bool cpuidle_not_available(struct cpuidle_driver *drv,
50 struct cpuidle_device *dev)
51{
52 return off || !initialized || !drv || !dev || !dev->enabled;
53}
54
55
56
57
58
59
60int cpuidle_play_dead(void)
61{
62 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
63 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
64 int i;
65
66 if (!drv)
67 return -ENODEV;
68
69
70 for (i = drv->state_count - 1; i >= 0; i--)
71 if (drv->states[i].enter_dead)
72 return drv->states[i].enter_dead(dev, i);
73
74 return -ENODEV;
75}
76
77static int find_deepest_state(struct cpuidle_driver *drv,
78 struct cpuidle_device *dev,
79 u64 max_latency_ns,
80 unsigned int forbidden_flags,
81 bool s2idle)
82{
83 u64 latency_req = 0;
84 int i, ret = 0;
85
86 for (i = 1; i < drv->state_count; i++) {
87 struct cpuidle_state *s = &drv->states[i];
88
89 if (dev->states_usage[i].disable ||
90 s->exit_latency_ns <= latency_req ||
91 s->exit_latency_ns > max_latency_ns ||
92 (s->flags & forbidden_flags) ||
93 (s2idle && !s->enter_s2idle))
94 continue;
95
96 latency_req = s->exit_latency_ns;
97 ret = i;
98 }
99 return ret;
100}
101
102
103
104
105
106
107
108
109
110void cpuidle_use_deepest_state(u64 latency_limit_ns)
111{
112 struct cpuidle_device *dev;
113
114 preempt_disable();
115 dev = cpuidle_get_device();
116 if (dev)
117 dev->forced_idle_latency_limit_ns = latency_limit_ns;
118 preempt_enable();
119}
120
121
122
123
124
125
126
127
128
129int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
130 struct cpuidle_device *dev,
131 u64 latency_limit_ns)
132{
133 return find_deepest_state(drv, dev, latency_limit_ns, 0, false);
134}
135
136#ifdef CONFIG_SUSPEND
137static void enter_s2idle_proper(struct cpuidle_driver *drv,
138 struct cpuidle_device *dev, int index)
139{
140 ktime_t time_start, time_end;
141 struct cpuidle_state *target_state = &drv->states[index];
142
143 time_start = ns_to_ktime(local_clock());
144
145 tick_freeze();
146
147
148
149
150
151 stop_critical_timings();
152 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
153 rcu_idle_enter();
154 target_state->enter_s2idle(dev, drv, index);
155 if (WARN_ON_ONCE(!irqs_disabled()))
156 local_irq_disable();
157 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
158 rcu_idle_exit();
159 tick_unfreeze();
160 start_critical_timings();
161
162 time_end = ns_to_ktime(local_clock());
163
164 dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
165 dev->states_usage[index].s2idle_usage++;
166}
167
168
169
170
171
172
173
174
175
176int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
177{
178 int index;
179
180
181
182
183
184
185 index = find_deepest_state(drv, dev, U64_MAX, 0, true);
186 if (index > 0) {
187 enter_s2idle_proper(drv, dev, index);
188 local_irq_enable();
189 }
190 return index;
191}
192#endif
193
194
195
196
197
198
199
200int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
201 int index)
202{
203 int entered_state;
204
205 struct cpuidle_state *target_state = &drv->states[index];
206 bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
207 ktime_t time_start, time_end;
208
209
210
211
212
213
214 if (broadcast && tick_broadcast_enter()) {
215 index = find_deepest_state(drv, dev, target_state->exit_latency_ns,
216 CPUIDLE_FLAG_TIMER_STOP, false);
217 if (index < 0) {
218 default_idle_call();
219 return -EBUSY;
220 }
221 target_state = &drv->states[index];
222 broadcast = false;
223 }
224
225 if (target_state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
226 leave_mm(dev->cpu);
227
228
229 sched_idle_set_state(target_state);
230
231 trace_cpu_idle(index, dev->cpu);
232 time_start = ns_to_ktime(local_clock());
233
234 stop_critical_timings();
235 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
236 rcu_idle_enter();
237 entered_state = target_state->enter(dev, drv, index);
238 if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
239 rcu_idle_exit();
240 start_critical_timings();
241
242 sched_clock_idle_wakeup_event();
243 time_end = ns_to_ktime(local_clock());
244 trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
245
246
247 sched_idle_set_state(NULL);
248
249 if (broadcast) {
250 if (WARN_ON_ONCE(!irqs_disabled()))
251 local_irq_disable();
252
253 tick_broadcast_exit();
254 }
255
256 if (!cpuidle_state_is_coupled(drv, index))
257 local_irq_enable();
258
259 if (entered_state >= 0) {
260 s64 diff, delay = drv->states[entered_state].exit_latency_ns;
261 int i;
262
263
264
265
266
267
268 diff = ktime_sub(time_end, time_start);
269
270 dev->last_residency_ns = diff;
271 dev->states_usage[entered_state].time_ns += diff;
272 dev->states_usage[entered_state].usage++;
273
274 if (diff < drv->states[entered_state].target_residency_ns) {
275 for (i = entered_state - 1; i >= 0; i--) {
276 if (dev->states_usage[i].disable)
277 continue;
278
279
280 dev->states_usage[entered_state].above++;
281 break;
282 }
283 } else if (diff > delay) {
284 for (i = entered_state + 1; i < drv->state_count; i++) {
285 if (dev->states_usage[i].disable)
286 continue;
287
288
289
290
291
292 if (diff - delay >= drv->states[i].target_residency_ns)
293 dev->states_usage[entered_state].below++;
294
295 break;
296 }
297 }
298 } else {
299 dev->last_residency_ns = 0;
300 dev->states_usage[index].rejected++;
301 }
302
303 return entered_state;
304}
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
320 bool *stop_tick)
321{
322 return cpuidle_curr_governor->select(drv, dev, stop_tick);
323}
324
325
326
327
328
329
330
331
332
333
334
335int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
336 int index)
337{
338 int ret = 0;
339
340
341
342
343
344
345
346 WRITE_ONCE(dev->next_hrtimer, tick_nohz_get_next_hrtimer());
347
348 if (cpuidle_state_is_coupled(drv, index))
349 ret = cpuidle_enter_state_coupled(dev, drv, index);
350 else
351 ret = cpuidle_enter_state(dev, drv, index);
352
353 WRITE_ONCE(dev->next_hrtimer, 0);
354 return ret;
355}
356
357
358
359
360
361
362
363
364
365void cpuidle_reflect(struct cpuidle_device *dev, int index)
366{
367 if (cpuidle_curr_governor->reflect && index >= 0)
368 cpuidle_curr_governor->reflect(dev, index);
369}
370
371
372
373
374
375
376
377
378
379
380
381#define CPUIDLE_POLL_MIN 10000
382#define CPUIDLE_POLL_MAX (TICK_NSEC / 16)
383
384
385
386
387
388
389
390
391
392u64 cpuidle_poll_time(struct cpuidle_driver *drv,
393 struct cpuidle_device *dev)
394{
395 int i;
396 u64 limit_ns;
397
398 BUILD_BUG_ON(CPUIDLE_POLL_MIN > CPUIDLE_POLL_MAX);
399
400 if (dev->poll_limit_ns)
401 return dev->poll_limit_ns;
402
403 limit_ns = CPUIDLE_POLL_MAX;
404 for (i = 1; i < drv->state_count; i++) {
405 u64 state_limit;
406
407 if (dev->states_usage[i].disable)
408 continue;
409
410 state_limit = drv->states[i].target_residency_ns;
411 if (state_limit < CPUIDLE_POLL_MIN)
412 continue;
413
414 limit_ns = min_t(u64, state_limit, CPUIDLE_POLL_MAX);
415 break;
416 }
417
418 dev->poll_limit_ns = limit_ns;
419
420 return dev->poll_limit_ns;
421}
422
423
424
425
426void cpuidle_install_idle_handler(void)
427{
428 if (enabled_devices) {
429
430 smp_wmb();
431 initialized = 1;
432 }
433}
434
435
436
437
438void cpuidle_uninstall_idle_handler(void)
439{
440 if (enabled_devices) {
441 initialized = 0;
442 wake_up_all_idle_cpus();
443 }
444
445
446
447
448
449 synchronize_rcu();
450}
451
452
453
454
455void cpuidle_pause_and_lock(void)
456{
457 mutex_lock(&cpuidle_lock);
458 cpuidle_uninstall_idle_handler();
459}
460
461EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
462
463
464
465
466void cpuidle_resume_and_unlock(void)
467{
468 cpuidle_install_idle_handler();
469 mutex_unlock(&cpuidle_lock);
470}
471
472EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
473
474
475void cpuidle_pause(void)
476{
477 mutex_lock(&cpuidle_lock);
478 cpuidle_uninstall_idle_handler();
479 mutex_unlock(&cpuidle_lock);
480}
481
482
483void cpuidle_resume(void)
484{
485 mutex_lock(&cpuidle_lock);
486 cpuidle_install_idle_handler();
487 mutex_unlock(&cpuidle_lock);
488}
489
490
491
492
493
494
495
496
497int cpuidle_enable_device(struct cpuidle_device *dev)
498{
499 int ret;
500 struct cpuidle_driver *drv;
501
502 if (!dev)
503 return -EINVAL;
504
505 if (dev->enabled)
506 return 0;
507
508 if (!cpuidle_curr_governor)
509 return -EIO;
510
511 drv = cpuidle_get_cpu_driver(dev);
512
513 if (!drv)
514 return -EIO;
515
516 if (!dev->registered)
517 return -EINVAL;
518
519 ret = cpuidle_add_device_sysfs(dev);
520 if (ret)
521 return ret;
522
523 if (cpuidle_curr_governor->enable) {
524 ret = cpuidle_curr_governor->enable(drv, dev);
525 if (ret)
526 goto fail_sysfs;
527 }
528
529 smp_wmb();
530
531 dev->enabled = 1;
532
533 enabled_devices++;
534 return 0;
535
536fail_sysfs:
537 cpuidle_remove_device_sysfs(dev);
538
539 return ret;
540}
541
542EXPORT_SYMBOL_GPL(cpuidle_enable_device);
543
544
545
546
547
548
549
550
551void cpuidle_disable_device(struct cpuidle_device *dev)
552{
553 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
554
555 if (!dev || !dev->enabled)
556 return;
557
558 if (!drv || !cpuidle_curr_governor)
559 return;
560
561 dev->enabled = 0;
562
563 if (cpuidle_curr_governor->disable)
564 cpuidle_curr_governor->disable(drv, dev);
565
566 cpuidle_remove_device_sysfs(dev);
567 enabled_devices--;
568}
569
570EXPORT_SYMBOL_GPL(cpuidle_disable_device);
571
572static void __cpuidle_unregister_device(struct cpuidle_device *dev)
573{
574 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
575
576 list_del(&dev->device_list);
577 per_cpu(cpuidle_devices, dev->cpu) = NULL;
578 module_put(drv->owner);
579
580 dev->registered = 0;
581}
582
583static void __cpuidle_device_init(struct cpuidle_device *dev)
584{
585 memset(dev->states_usage, 0, sizeof(dev->states_usage));
586 dev->last_residency_ns = 0;
587 dev->next_hrtimer = 0;
588}
589
590
591
592
593
594
595
596
597static int __cpuidle_register_device(struct cpuidle_device *dev)
598{
599 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
600 int i, ret;
601
602 if (!try_module_get(drv->owner))
603 return -EINVAL;
604
605 for (i = 0; i < drv->state_count; i++) {
606 if (drv->states[i].flags & CPUIDLE_FLAG_UNUSABLE)
607 dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_DRIVER;
608
609 if (drv->states[i].flags & CPUIDLE_FLAG_OFF)
610 dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
611 }
612
613 per_cpu(cpuidle_devices, dev->cpu) = dev;
614 list_add(&dev->device_list, &cpuidle_detected_devices);
615
616 ret = cpuidle_coupled_register_device(dev);
617 if (ret)
618 __cpuidle_unregister_device(dev);
619 else
620 dev->registered = 1;
621
622 return ret;
623}
624
625
626
627
628
629int cpuidle_register_device(struct cpuidle_device *dev)
630{
631 int ret = -EBUSY;
632
633 if (!dev)
634 return -EINVAL;
635
636 mutex_lock(&cpuidle_lock);
637
638 if (dev->registered)
639 goto out_unlock;
640
641 __cpuidle_device_init(dev);
642
643 ret = __cpuidle_register_device(dev);
644 if (ret)
645 goto out_unlock;
646
647 ret = cpuidle_add_sysfs(dev);
648 if (ret)
649 goto out_unregister;
650
651 ret = cpuidle_enable_device(dev);
652 if (ret)
653 goto out_sysfs;
654
655 cpuidle_install_idle_handler();
656
657out_unlock:
658 mutex_unlock(&cpuidle_lock);
659
660 return ret;
661
662out_sysfs:
663 cpuidle_remove_sysfs(dev);
664out_unregister:
665 __cpuidle_unregister_device(dev);
666 goto out_unlock;
667}
668
669EXPORT_SYMBOL_GPL(cpuidle_register_device);
670
671
672
673
674
675void cpuidle_unregister_device(struct cpuidle_device *dev)
676{
677 if (!dev || dev->registered == 0)
678 return;
679
680 cpuidle_pause_and_lock();
681
682 cpuidle_disable_device(dev);
683
684 cpuidle_remove_sysfs(dev);
685
686 __cpuidle_unregister_device(dev);
687
688 cpuidle_coupled_unregister_device(dev);
689
690 cpuidle_resume_and_unlock();
691}
692
693EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
694
695
696
697
698
699
700
701
702void cpuidle_unregister(struct cpuidle_driver *drv)
703{
704 int cpu;
705 struct cpuidle_device *device;
706
707 for_each_cpu(cpu, drv->cpumask) {
708 device = &per_cpu(cpuidle_dev, cpu);
709 cpuidle_unregister_device(device);
710 }
711
712 cpuidle_unregister_driver(drv);
713}
714EXPORT_SYMBOL_GPL(cpuidle_unregister);
715
716
717
718
719
720
721
722
723
724
725
726
727int cpuidle_register(struct cpuidle_driver *drv,
728 const struct cpumask *const coupled_cpus)
729{
730 int ret, cpu;
731 struct cpuidle_device *device;
732
733 ret = cpuidle_register_driver(drv);
734 if (ret) {
735 pr_err("failed to register cpuidle driver\n");
736 return ret;
737 }
738
739 for_each_cpu(cpu, drv->cpumask) {
740 device = &per_cpu(cpuidle_dev, cpu);
741 device->cpu = cpu;
742
743#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
744
745
746
747
748
749 if (coupled_cpus)
750 device->coupled_cpus = *coupled_cpus;
751#endif
752 ret = cpuidle_register_device(device);
753 if (!ret)
754 continue;
755
756 pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
757
758 cpuidle_unregister(drv);
759 break;
760 }
761
762 return ret;
763}
764EXPORT_SYMBOL_GPL(cpuidle_register);
765
766
767
768
769static int __init cpuidle_init(void)
770{
771 if (cpuidle_disabled())
772 return -ENODEV;
773
774 return cpuidle_add_interface(cpu_subsys.dev_root);
775}
776
777module_param(off, int, 0444);
778module_param_string(governor, param_governor, CPUIDLE_NAME_LEN, 0444);
779core_initcall(cpuidle_init);
780