1
2
3
4
5
6
7
8
9
10
11extern char __cpuidle_text_start[], __cpuidle_text_end[];
12
13
14
15
16
17void sched_idle_set_state(struct cpuidle_state *idle_state)
18{
19 idle_set_state(this_rq(), idle_state);
20}
21
22static int __read_mostly cpu_idle_force_poll;
23
24void cpu_idle_poll_ctrl(bool enable)
25{
26 if (enable) {
27 cpu_idle_force_poll++;
28 } else {
29 cpu_idle_force_poll--;
30 WARN_ON_ONCE(cpu_idle_force_poll < 0);
31 }
32}
33
34#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
35static int __init cpu_idle_poll_setup(char *__unused)
36{
37 cpu_idle_force_poll = 1;
38
39 return 1;
40}
41__setup("nohlt", cpu_idle_poll_setup);
42
43static int __init cpu_idle_nopoll_setup(char *__unused)
44{
45 cpu_idle_force_poll = 0;
46
47 return 1;
48}
49__setup("hlt", cpu_idle_nopoll_setup);
50#endif
51
52static noinline int __cpuidle cpu_idle_poll(void)
53{
54 trace_cpu_idle(0, smp_processor_id());
55 stop_critical_timings();
56 rcu_idle_enter();
57 local_irq_enable();
58
59 while (!tif_need_resched() &&
60 (cpu_idle_force_poll || tick_check_broadcast_expired()))
61 cpu_relax();
62
63 rcu_idle_exit();
64 start_critical_timings();
65 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
66
67 return 1;
68}
69
70
71void __weak arch_cpu_idle_prepare(void) { }
72void __weak arch_cpu_idle_enter(void) { }
73void __weak arch_cpu_idle_exit(void) { }
74void __weak arch_cpu_idle_dead(void) { }
75void __weak arch_cpu_idle(void)
76{
77 cpu_idle_force_poll = 1;
78 raw_local_irq_enable();
79}
80
81
82
83
84
85
86void __cpuidle default_idle_call(void)
87{
88 if (current_clr_polling_and_test()) {
89 local_irq_enable();
90 } else {
91
92 trace_cpu_idle(1, smp_processor_id());
93 stop_critical_timings();
94
95
96
97
98
99
100
101
102
103
104 trace_hardirqs_on_prepare();
105 lockdep_hardirqs_on_prepare(_THIS_IP_);
106 rcu_idle_enter();
107 lockdep_hardirqs_on(_THIS_IP_);
108
109 arch_cpu_idle();
110
111
112
113
114
115
116
117 raw_local_irq_disable();
118 lockdep_hardirqs_off(_THIS_IP_);
119 rcu_idle_exit();
120 lockdep_hardirqs_on(_THIS_IP_);
121 raw_local_irq_enable();
122
123 start_critical_timings();
124 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
125 }
126}
127
128static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
129 struct cpuidle_device *dev)
130{
131 if (current_clr_polling_and_test())
132 return -EBUSY;
133
134 return cpuidle_enter_s2idle(drv, dev);
135}
136
137static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
138 int next_state)
139{
140
141
142
143
144 if (current_clr_polling_and_test()) {
145 dev->last_residency_ns = 0;
146 local_irq_enable();
147 return -EBUSY;
148 }
149
150
151
152
153
154
155 return cpuidle_enter(drv, dev, next_state);
156}
157
158
159
160
161
162
163
164
165
166
167static void cpuidle_idle_call(void)
168{
169 struct cpuidle_device *dev = cpuidle_get_device();
170 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
171 int next_state, entered_state;
172
173
174
175
176
177 if (need_resched()) {
178 local_irq_enable();
179 return;
180 }
181
182
183
184
185
186
187
188 if (cpuidle_not_available(drv, dev)) {
189 tick_nohz_idle_stop_tick();
190
191 default_idle_call();
192 goto exit_idle;
193 }
194
195
196
197
198
199
200
201
202
203
204
205 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
206 u64 max_latency_ns;
207
208 if (idle_should_enter_s2idle()) {
209
210 entered_state = call_cpuidle_s2idle(drv, dev);
211 if (entered_state > 0)
212 goto exit_idle;
213
214 max_latency_ns = U64_MAX;
215 } else {
216 max_latency_ns = dev->forced_idle_latency_limit_ns;
217 }
218
219 tick_nohz_idle_stop_tick();
220
221 next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
222 call_cpuidle(drv, dev, next_state);
223 } else {
224 bool stop_tick = true;
225
226
227
228
229 next_state = cpuidle_select(drv, dev, &stop_tick);
230
231 if (stop_tick || tick_nohz_tick_stopped())
232 tick_nohz_idle_stop_tick();
233 else
234 tick_nohz_idle_retain_tick();
235
236 entered_state = call_cpuidle(drv, dev, next_state);
237
238
239
240 cpuidle_reflect(dev, entered_state);
241 }
242
243exit_idle:
244 __current_set_polling();
245
246
247
248
249 if (WARN_ON_ONCE(irqs_disabled()))
250 local_irq_enable();
251}
252
253
254
255
256
257
258static void do_idle(void)
259{
260 int cpu = smp_processor_id();
261
262
263
264
265 nohz_run_idle_balance(cpu);
266
267
268
269
270
271
272
273
274
275
276 __current_set_polling();
277 tick_nohz_idle_enter();
278
279 while (!need_resched()) {
280 rmb();
281
282 local_irq_disable();
283
284 if (cpu_is_offline(cpu)) {
285 tick_nohz_idle_stop_tick();
286 cpuhp_report_idle_dead();
287 arch_cpu_idle_dead();
288 }
289
290 arch_cpu_idle_enter();
291 rcu_nocb_flush_deferred_wakeup();
292
293
294
295
296
297
298
299 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
300 tick_nohz_idle_restart_tick();
301 cpu_idle_poll();
302 } else {
303 cpuidle_idle_call();
304 }
305 arch_cpu_idle_exit();
306 }
307
308
309
310
311
312
313
314
315 preempt_set_need_resched();
316 tick_nohz_idle_exit();
317 __current_clr_polling();
318
319
320
321
322
323
324 smp_mb__after_atomic();
325
326
327
328
329
330 flush_smp_call_function_from_idle();
331 schedule_idle();
332
333 if (unlikely(klp_patch_pending(current)))
334 klp_update_patch_state(current);
335}
336
337bool cpu_in_idle(unsigned long pc)
338{
339 return pc >= (unsigned long)__cpuidle_text_start &&
340 pc < (unsigned long)__cpuidle_text_end;
341}
342
343struct idle_timer {
344 struct hrtimer timer;
345 int done;
346};
347
348static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
349{
350 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
351
352 WRITE_ONCE(it->done, 1);
353 set_tsk_need_resched(current);
354
355 return HRTIMER_NORESTART;
356}
357
358void play_idle_precise(u64 duration_ns, u64 latency_ns)
359{
360 struct idle_timer it;
361
362
363
364
365
366 WARN_ON_ONCE(current->policy != SCHED_FIFO);
367 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
368 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
369 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
370 WARN_ON_ONCE(!duration_ns);
371 WARN_ON_ONCE(current->mm);
372
373 rcu_sleep_check();
374 preempt_disable();
375 current->flags |= PF_IDLE;
376 cpuidle_use_deepest_state(latency_ns);
377
378 it.done = 0;
379 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
380 it.timer.function = idle_inject_timer_fn;
381 hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
382 HRTIMER_MODE_REL_PINNED_HARD);
383
384 while (!READ_ONCE(it.done))
385 do_idle();
386
387 cpuidle_use_deepest_state(0);
388 current->flags &= ~PF_IDLE;
389
390 preempt_fold_need_resched();
391 preempt_enable();
392}
393EXPORT_SYMBOL_GPL(play_idle_precise);
394
395void cpu_startup_entry(enum cpuhp_state state)
396{
397 arch_cpu_idle_prepare();
398 cpuhp_online_idle(state);
399 while (1)
400 do_idle();
401}
402
403
404
405
406
407#ifdef CONFIG_SMP
408static int
409select_task_rq_idle(struct task_struct *p, int cpu, int flags)
410{
411 return task_cpu(p);
412}
413
414static int
415balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
416{
417 return WARN_ON_ONCE(1);
418}
419#endif
420
421
422
423
424static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
425{
426 resched_curr(rq);
427}
428
429static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
430{
431}
432
433static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
434{
435 update_idle_core(rq);
436 schedstat_inc(rq->sched_goidle);
437}
438
439#ifdef CONFIG_SMP
440static struct task_struct *pick_task_idle(struct rq *rq)
441{
442 return rq->idle;
443}
444#endif
445
446struct task_struct *pick_next_task_idle(struct rq *rq)
447{
448 struct task_struct *next = rq->idle;
449
450 set_next_task_idle(rq, next, true);
451
452 return next;
453}
454
455
456
457
458
459static void
460dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
461{
462 raw_spin_rq_unlock_irq(rq);
463 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
464 dump_stack();
465 raw_spin_rq_lock_irq(rq);
466}
467
468
469
470
471
472
473
474
475
476static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
477{
478}
479
480static void switched_to_idle(struct rq *rq, struct task_struct *p)
481{
482 BUG();
483}
484
485static void
486prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
487{
488 BUG();
489}
490
491static void update_curr_idle(struct rq *rq)
492{
493}
494
495
496
497
498DEFINE_SCHED_CLASS(idle) = {
499
500
501
502
503 .dequeue_task = dequeue_task_idle,
504
505 .check_preempt_curr = check_preempt_curr_idle,
506
507 .pick_next_task = pick_next_task_idle,
508 .put_prev_task = put_prev_task_idle,
509 .set_next_task = set_next_task_idle,
510
511#ifdef CONFIG_SMP
512 .balance = balance_idle,
513 .pick_task = pick_task_idle,
514 .select_task_rq = select_task_rq_idle,
515 .set_cpus_allowed = set_cpus_allowed_common,
516#endif
517
518 .task_tick = task_tick_idle,
519
520 .prio_changed = prio_changed_idle,
521 .switched_to = switched_to_idle,
522 .update_curr = update_curr_idle,
523};
524