1
2
3
4
5
6
7
8
9#include "sched.h"
10
11#include <trace/events/power.h>
12
13
14extern char __cpuidle_text_start[], __cpuidle_text_end[];
15
16
17
18
19
20void sched_idle_set_state(struct cpuidle_state *idle_state)
21{
22 idle_set_state(this_rq(), idle_state);
23}
24
25static int __read_mostly cpu_idle_force_poll;
26
27void cpu_idle_poll_ctrl(bool enable)
28{
29 if (enable) {
30 cpu_idle_force_poll++;
31 } else {
32 cpu_idle_force_poll--;
33 WARN_ON_ONCE(cpu_idle_force_poll < 0);
34 }
35}
36
37#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
38static int __init cpu_idle_poll_setup(char *__unused)
39{
40 cpu_idle_force_poll = 1;
41
42 return 1;
43}
44__setup("nohlt", cpu_idle_poll_setup);
45
46static int __init cpu_idle_nopoll_setup(char *__unused)
47{
48 cpu_idle_force_poll = 0;
49
50 return 1;
51}
52__setup("hlt", cpu_idle_nopoll_setup);
53#endif
54
55static noinline int __cpuidle cpu_idle_poll(void)
56{
57 trace_cpu_idle(0, smp_processor_id());
58 stop_critical_timings();
59 rcu_idle_enter();
60 local_irq_enable();
61
62 while (!tif_need_resched() &&
63 (cpu_idle_force_poll || tick_check_broadcast_expired()))
64 cpu_relax();
65
66 rcu_idle_exit();
67 start_critical_timings();
68 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
69
70 return 1;
71}
72
73
74void __weak arch_cpu_idle_prepare(void) { }
75void __weak arch_cpu_idle_enter(void) { }
76void __weak arch_cpu_idle_exit(void) { }
77void __weak arch_cpu_idle_dead(void) { }
78void __weak arch_cpu_idle(void)
79{
80 cpu_idle_force_poll = 1;
81 raw_local_irq_enable();
82}
83
84
85
86
87
88
89void __cpuidle default_idle_call(void)
90{
91 if (current_clr_polling_and_test()) {
92 local_irq_enable();
93 } else {
94
95 trace_cpu_idle(1, smp_processor_id());
96 stop_critical_timings();
97
98
99
100
101
102
103
104
105
106
107 trace_hardirqs_on_prepare();
108 lockdep_hardirqs_on_prepare(_THIS_IP_);
109 rcu_idle_enter();
110 lockdep_hardirqs_on(_THIS_IP_);
111
112 arch_cpu_idle();
113
114
115
116
117
118
119
120 raw_local_irq_disable();
121 lockdep_hardirqs_off(_THIS_IP_);
122 rcu_idle_exit();
123 lockdep_hardirqs_on(_THIS_IP_);
124 raw_local_irq_enable();
125
126 start_critical_timings();
127 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
128 }
129}
130
131static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
132 struct cpuidle_device *dev)
133{
134 if (current_clr_polling_and_test())
135 return -EBUSY;
136
137 return cpuidle_enter_s2idle(drv, dev);
138}
139
140static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
141 int next_state)
142{
143
144
145
146
147 if (current_clr_polling_and_test()) {
148 dev->last_residency_ns = 0;
149 local_irq_enable();
150 return -EBUSY;
151 }
152
153
154
155
156
157
158 return cpuidle_enter(drv, dev, next_state);
159}
160
161
162
163
164
165
166
167
168
169
170static void cpuidle_idle_call(void)
171{
172 struct cpuidle_device *dev = cpuidle_get_device();
173 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
174 int next_state, entered_state;
175
176
177
178
179
180 if (need_resched()) {
181 local_irq_enable();
182 return;
183 }
184
185
186
187
188
189
190
191 if (cpuidle_not_available(drv, dev)) {
192 tick_nohz_idle_stop_tick();
193
194 default_idle_call();
195 goto exit_idle;
196 }
197
198
199
200
201
202
203
204
205
206
207
208 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
209 u64 max_latency_ns;
210
211 if (idle_should_enter_s2idle()) {
212
213 entered_state = call_cpuidle_s2idle(drv, dev);
214 if (entered_state > 0)
215 goto exit_idle;
216
217 max_latency_ns = U64_MAX;
218 } else {
219 max_latency_ns = dev->forced_idle_latency_limit_ns;
220 }
221
222 tick_nohz_idle_stop_tick();
223
224 next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
225 call_cpuidle(drv, dev, next_state);
226 } else {
227 bool stop_tick = true;
228
229
230
231
232 next_state = cpuidle_select(drv, dev, &stop_tick);
233
234 if (stop_tick || tick_nohz_tick_stopped())
235 tick_nohz_idle_stop_tick();
236 else
237 tick_nohz_idle_retain_tick();
238
239 entered_state = call_cpuidle(drv, dev, next_state);
240
241
242
243 cpuidle_reflect(dev, entered_state);
244 }
245
246exit_idle:
247 __current_set_polling();
248
249
250
251
252 if (WARN_ON_ONCE(irqs_disabled()))
253 local_irq_enable();
254}
255
256
257
258
259
260
261static void do_idle(void)
262{
263 int cpu = smp_processor_id();
264
265
266
267
268 nohz_run_idle_balance(cpu);
269
270
271
272
273
274
275
276
277
278
279 __current_set_polling();
280 tick_nohz_idle_enter();
281
282 while (!need_resched()) {
283 rmb();
284
285 local_irq_disable();
286
287 if (cpu_is_offline(cpu)) {
288 tick_nohz_idle_stop_tick();
289 cpuhp_report_idle_dead();
290 arch_cpu_idle_dead();
291 }
292
293 arch_cpu_idle_enter();
294 rcu_nocb_flush_deferred_wakeup();
295
296
297
298
299
300
301
302 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
303 tick_nohz_idle_restart_tick();
304 cpu_idle_poll();
305 } else {
306 cpuidle_idle_call();
307 }
308 arch_cpu_idle_exit();
309 }
310
311
312
313
314
315
316
317
318 preempt_set_need_resched();
319 tick_nohz_idle_exit();
320 __current_clr_polling();
321
322
323
324
325
326
327 smp_mb__after_atomic();
328
329
330
331
332
333 flush_smp_call_function_from_idle();
334 schedule_idle();
335
336 if (unlikely(klp_patch_pending(current)))
337 klp_update_patch_state(current);
338}
339
340bool cpu_in_idle(unsigned long pc)
341{
342 return pc >= (unsigned long)__cpuidle_text_start &&
343 pc < (unsigned long)__cpuidle_text_end;
344}
345
346struct idle_timer {
347 struct hrtimer timer;
348 int done;
349};
350
351static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
352{
353 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
354
355 WRITE_ONCE(it->done, 1);
356 set_tsk_need_resched(current);
357
358 return HRTIMER_NORESTART;
359}
360
361void play_idle_precise(u64 duration_ns, u64 latency_ns)
362{
363 struct idle_timer it;
364
365
366
367
368
369 WARN_ON_ONCE(current->policy != SCHED_FIFO);
370 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
371 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
372 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
373 WARN_ON_ONCE(!duration_ns);
374 WARN_ON_ONCE(current->mm);
375
376 rcu_sleep_check();
377 preempt_disable();
378 current->flags |= PF_IDLE;
379 cpuidle_use_deepest_state(latency_ns);
380
381 it.done = 0;
382 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
383 it.timer.function = idle_inject_timer_fn;
384 hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
385 HRTIMER_MODE_REL_PINNED_HARD);
386
387 while (!READ_ONCE(it.done))
388 do_idle();
389
390 cpuidle_use_deepest_state(0);
391 current->flags &= ~PF_IDLE;
392
393 preempt_fold_need_resched();
394 preempt_enable();
395}
396EXPORT_SYMBOL_GPL(play_idle_precise);
397
398void cpu_startup_entry(enum cpuhp_state state)
399{
400 arch_cpu_idle_prepare();
401 cpuhp_online_idle(state);
402 while (1)
403 do_idle();
404}
405
406
407
408
409
410#ifdef CONFIG_SMP
411static int
412select_task_rq_idle(struct task_struct *p, int cpu, int flags)
413{
414 return task_cpu(p);
415}
416
417static int
418balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
419{
420 return WARN_ON_ONCE(1);
421}
422#endif
423
424
425
426
427static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
428{
429 resched_curr(rq);
430}
431
432static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
433{
434}
435
436static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
437{
438 update_idle_core(rq);
439 schedstat_inc(rq->sched_goidle);
440 queue_core_balance(rq);
441}
442
443#ifdef CONFIG_SMP
444static struct task_struct *pick_task_idle(struct rq *rq)
445{
446 return rq->idle;
447}
448#endif
449
450struct task_struct *pick_next_task_idle(struct rq *rq)
451{
452 struct task_struct *next = rq->idle;
453
454 set_next_task_idle(rq, next, true);
455
456 return next;
457}
458
459
460
461
462
463static void
464dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
465{
466 raw_spin_rq_unlock_irq(rq);
467 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
468 dump_stack();
469 raw_spin_rq_lock_irq(rq);
470}
471
472
473
474
475
476
477
478
479
480static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
481{
482}
483
484static void switched_to_idle(struct rq *rq, struct task_struct *p)
485{
486 BUG();
487}
488
489static void
490prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
491{
492 BUG();
493}
494
495static void update_curr_idle(struct rq *rq)
496{
497}
498
499
500
501
502DEFINE_SCHED_CLASS(idle) = {
503
504
505
506
507 .dequeue_task = dequeue_task_idle,
508
509 .check_preempt_curr = check_preempt_curr_idle,
510
511 .pick_next_task = pick_next_task_idle,
512 .put_prev_task = put_prev_task_idle,
513 .set_next_task = set_next_task_idle,
514
515#ifdef CONFIG_SMP
516 .balance = balance_idle,
517 .pick_task = pick_task_idle,
518 .select_task_rq = select_task_rq_idle,
519 .set_cpus_allowed = set_cpus_allowed_common,
520#endif
521
522 .task_tick = task_tick_idle,
523
524 .prio_changed = prio_changed_idle,
525 .switched_to = switched_to_idle,
526 .update_curr = update_curr_idle,
527};
528