1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/module.h>
22#include <trace/events/power.h>
23
24#include <asm/irq_regs.h>
25
26#include "tick-internal.h"
27
28
29
30
31DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
32
33
34
35ktime_t tick_next_period;
36ktime_t tick_period;
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
53#ifdef CONFIG_NO_HZ_FULL
54
55
56
57
58
59static int tick_do_timer_boot_cpu __read_mostly = -1;
60#endif
61
62
63
64
65struct tick_device *tick_get_device(int cpu)
66{
67 return &per_cpu(tick_cpu_device, cpu);
68}
69
70
71
72
73int tick_is_oneshot_available(void)
74{
75 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
76
77 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
78 return 0;
79 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
80 return 1;
81 return tick_broadcast_oneshot_available();
82}
83
84
85
86
87static void tick_periodic(int cpu)
88{
89 if (tick_do_timer_cpu == cpu) {
90 write_seqlock(&jiffies_lock);
91
92
93 tick_next_period = ktime_add(tick_next_period, tick_period);
94
95 do_timer(1);
96 write_sequnlock(&jiffies_lock);
97 update_wall_time();
98 }
99
100 update_process_times(user_mode(get_irq_regs()));
101 profile_tick(CPU_PROFILING);
102}
103
104
105
106
107void tick_handle_periodic(struct clock_event_device *dev)
108{
109 int cpu = smp_processor_id();
110 ktime_t next = dev->next_event;
111
112 tick_periodic(cpu);
113
114#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
115
116
117
118
119
120 if (dev->event_handler != tick_handle_periodic)
121 return;
122#endif
123
124 if (!clockevent_state_oneshot(dev))
125 return;
126 for (;;) {
127
128
129
130
131 next = ktime_add(next, tick_period);
132
133 if (!clockevents_program_event(dev, next, false))
134 return;
135
136
137
138
139
140
141
142
143
144 if (timekeeping_valid_for_hres())
145 tick_periodic(cpu);
146 }
147}
148
149
150
151
152void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
153{
154 tick_set_periodic_handler(dev, broadcast);
155
156
157 if (!tick_device_is_functional(dev))
158 return;
159
160 if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
161 !tick_broadcast_oneshot_active()) {
162 clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
163 } else {
164 unsigned long seq;
165 ktime_t next;
166
167 do {
168 seq = read_seqbegin(&jiffies_lock);
169 next = tick_next_period;
170 } while (read_seqretry(&jiffies_lock, seq));
171
172 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
173
174 for (;;) {
175 if (!clockevents_program_event(dev, next, false))
176 return;
177 next = ktime_add(next, tick_period);
178 }
179 }
180}
181
182#ifdef CONFIG_NO_HZ_FULL
183static void giveup_do_timer(void *info)
184{
185 int cpu = *(unsigned int *)info;
186
187 WARN_ON(tick_do_timer_cpu != smp_processor_id());
188
189 tick_do_timer_cpu = cpu;
190}
191
192static void tick_take_do_timer_from_boot(void)
193{
194 int cpu = smp_processor_id();
195 int from = tick_do_timer_boot_cpu;
196
197 if (from >= 0 && from != cpu)
198 smp_call_function_single(from, giveup_do_timer, &cpu, 1);
199}
200#endif
201
202
203
204
205static void tick_setup_device(struct tick_device *td,
206 struct clock_event_device *newdev, int cpu,
207 const struct cpumask *cpumask)
208{
209 void (*handler)(struct clock_event_device *) = NULL;
210 ktime_t next_event = 0;
211
212
213
214
215 if (!td->evtdev) {
216
217
218
219
220 if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
221 tick_do_timer_cpu = cpu;
222
223 tick_next_period = ktime_get();
224 tick_period = NSEC_PER_SEC / HZ;
225#ifdef CONFIG_NO_HZ_FULL
226
227
228
229
230
231
232 if (tick_nohz_full_cpu(cpu))
233 tick_do_timer_boot_cpu = cpu;
234
235 } else if (tick_do_timer_boot_cpu != -1 &&
236 !tick_nohz_full_cpu(cpu)) {
237 tick_take_do_timer_from_boot();
238 tick_do_timer_boot_cpu = -1;
239 WARN_ON(tick_do_timer_cpu != cpu);
240#endif
241 }
242
243
244
245
246 td->mode = TICKDEV_MODE_PERIODIC;
247 } else {
248 handler = td->evtdev->event_handler;
249 next_event = td->evtdev->next_event;
250 td->evtdev->event_handler = clockevents_handle_noop;
251 }
252
253 td->evtdev = newdev;
254
255
256
257
258
259 if (!cpumask_equal(newdev->cpumask, cpumask))
260 irq_set_affinity(newdev->irq, cpumask);
261
262
263
264
265
266
267
268
269 if (tick_device_uses_broadcast(newdev, cpu))
270 return;
271
272 if (td->mode == TICKDEV_MODE_PERIODIC)
273 tick_setup_periodic(newdev, 0);
274 else
275 tick_setup_oneshot(newdev, handler, next_event);
276}
277
278void tick_install_replacement(struct clock_event_device *newdev)
279{
280 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
281 int cpu = smp_processor_id();
282
283 clockevents_exchange_device(td->evtdev, newdev);
284 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
285 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
286 tick_oneshot_notify();
287}
288
289static bool tick_check_percpu(struct clock_event_device *curdev,
290 struct clock_event_device *newdev, int cpu)
291{
292 if (!cpumask_test_cpu(cpu, newdev->cpumask))
293 return false;
294 if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
295 return true;
296
297 if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
298 return false;
299
300 if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
301 return false;
302 return true;
303}
304
305static bool tick_check_preferred(struct clock_event_device *curdev,
306 struct clock_event_device *newdev)
307{
308
309 if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
310 if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
311 return false;
312 if (tick_oneshot_mode_active())
313 return false;
314 }
315
316
317
318
319
320 return !curdev ||
321 newdev->rating > curdev->rating ||
322 !cpumask_equal(curdev->cpumask, newdev->cpumask);
323}
324
325
326
327
328
329bool tick_check_replacement(struct clock_event_device *curdev,
330 struct clock_event_device *newdev)
331{
332 if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
333 return false;
334
335 return tick_check_preferred(curdev, newdev);
336}
337
338
339
340
341
342void tick_check_new_device(struct clock_event_device *newdev)
343{
344 struct clock_event_device *curdev;
345 struct tick_device *td;
346 int cpu;
347
348 cpu = smp_processor_id();
349 td = &per_cpu(tick_cpu_device, cpu);
350 curdev = td->evtdev;
351
352
353 if (!tick_check_percpu(curdev, newdev, cpu))
354 goto out_bc;
355
356
357 if (!tick_check_preferred(curdev, newdev))
358 goto out_bc;
359
360 if (!try_module_get(newdev->owner))
361 return;
362
363
364
365
366
367
368 if (tick_is_broadcast_device(curdev)) {
369 clockevents_shutdown(curdev);
370 curdev = NULL;
371 }
372 clockevents_exchange_device(curdev, newdev);
373 tick_setup_device(td, newdev, cpu, cpumask_of(cpu));
374 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
375 tick_oneshot_notify();
376 return;
377
378out_bc:
379
380
381
382 tick_install_broadcast_device(newdev);
383}
384
385
386
387
388
389
390
391
392
393
394
395
396int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
397{
398 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
399
400 if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
401 return 0;
402
403 return __tick_broadcast_oneshot_control(state);
404}
405EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
406
407#ifdef CONFIG_HOTPLUG_CPU
408
409
410
411
412
413
414void tick_handover_do_timer(void)
415{
416 if (tick_do_timer_cpu == smp_processor_id()) {
417 int cpu = cpumask_first(cpu_online_mask);
418
419 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
420 TICK_DO_TIMER_NONE;
421 }
422}
423
424
425
426
427
428
429
430
431void tick_shutdown(unsigned int cpu)
432{
433 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
434 struct clock_event_device *dev = td->evtdev;
435
436 td->mode = TICKDEV_MODE_PERIODIC;
437 if (dev) {
438
439
440
441
442 clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
443 clockevents_exchange_device(dev, NULL);
444 dev->event_handler = clockevents_handle_noop;
445 td->evtdev = NULL;
446 }
447}
448#endif
449
450
451
452
453
454
455
456
457void tick_suspend_local(void)
458{
459 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
460
461 clockevents_shutdown(td->evtdev);
462}
463
464
465
466
467
468
469
470
471void tick_resume_local(void)
472{
473 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
474 bool broadcast = tick_resume_check_broadcast();
475
476 clockevents_tick_resume(td->evtdev);
477 if (!broadcast) {
478 if (td->mode == TICKDEV_MODE_PERIODIC)
479 tick_setup_periodic(td->evtdev, 0);
480 else
481 tick_resume_oneshot();
482 }
483}
484
485
486
487
488
489
490
491
492
493
494void tick_suspend(void)
495{
496 tick_suspend_local();
497 tick_suspend_broadcast();
498}
499
500
501
502
503
504
505
506
507
508void tick_resume(void)
509{
510 tick_resume_broadcast();
511 tick_resume_local();
512}
513
514#ifdef CONFIG_SUSPEND
515static DEFINE_RAW_SPINLOCK(tick_freeze_lock);
516static unsigned int tick_freeze_depth;
517
518
519
520
521
522
523
524
525
526
527void tick_freeze(void)
528{
529 raw_spin_lock(&tick_freeze_lock);
530
531 tick_freeze_depth++;
532 if (tick_freeze_depth == num_online_cpus()) {
533 trace_suspend_resume(TPS("timekeeping_freeze"),
534 smp_processor_id(), true);
535 system_state = SYSTEM_SUSPEND;
536 timekeeping_suspend();
537 } else {
538 tick_suspend_local();
539 }
540
541 raw_spin_unlock(&tick_freeze_lock);
542}
543
544
545
546
547
548
549
550
551
552
553void tick_unfreeze(void)
554{
555 raw_spin_lock(&tick_freeze_lock);
556
557 if (tick_freeze_depth == num_online_cpus()) {
558 timekeeping_resume();
559 system_state = SYSTEM_RUNNING;
560 trace_suspend_resume(TPS("timekeeping_freeze"),
561 smp_processor_id(), false);
562 } else {
563 tick_resume_local();
564 }
565
566 tick_freeze_depth--;
567
568 raw_spin_unlock(&tick_freeze_lock);
569}
570#endif
571
572
573
574
575void __init tick_init(void)
576{
577 tick_broadcast_init();
578 tick_nohz_init();
579}
580