1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22
23#include "tick-internal.h"
24
25
26
27
28
29
30static struct tick_device tick_broadcast_device;
31static cpumask_var_t tick_broadcast_mask;
32static cpumask_var_t tmpmask;
33static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
34static int tick_broadcast_force;
35
36#ifdef CONFIG_TICK_ONESHOT
37static void tick_broadcast_clear_oneshot(int cpu);
38#else
39static inline void tick_broadcast_clear_oneshot(int cpu) { }
40#endif
41
42
43
44
45struct tick_device *tick_get_broadcast_device(void)
46{
47 return &tick_broadcast_device;
48}
49
50struct cpumask *tick_get_broadcast_mask(void)
51{
52 return tick_broadcast_mask;
53}
54
55
56
57
58static void tick_broadcast_start_periodic(struct clock_event_device *bc)
59{
60 if (bc)
61 tick_setup_periodic(bc, 1);
62}
63
64
65
66
67int tick_check_broadcast_device(struct clock_event_device *dev)
68{
69 struct clock_event_device *cur = tick_broadcast_device.evtdev;
70
71 if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
72 (tick_broadcast_device.evtdev &&
73 tick_broadcast_device.evtdev->rating >= dev->rating) ||
74 (dev->features & CLOCK_EVT_FEAT_C3STOP))
75 return 0;
76
77 clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
78 if (cur)
79 cur->event_handler = clockevents_handle_noop;
80 tick_broadcast_device.evtdev = dev;
81 if (!cpumask_empty(tick_broadcast_mask))
82 tick_broadcast_start_periodic(dev);
83
84
85
86
87
88
89
90
91 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
92 tick_clock_notify();
93 return 1;
94}
95
96
97
98
99int tick_is_broadcast_device(struct clock_event_device *dev)
100{
101 return (dev && tick_broadcast_device.evtdev == dev);
102}
103
104static void err_broadcast(const struct cpumask *mask)
105{
106 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
107}
108
109static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
110{
111 if (!dev->broadcast)
112 dev->broadcast = tick_broadcast;
113 if (!dev->broadcast) {
114 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
115 dev->name);
116 dev->broadcast = err_broadcast;
117 }
118}
119
120
121
122
123
124int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
125{
126 unsigned long flags;
127 int ret = 0;
128
129 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
130
131
132
133
134
135
136
137 if (!tick_device_is_functional(dev)) {
138 dev->event_handler = tick_handle_periodic;
139 tick_device_setup_broadcast_func(dev);
140 cpumask_set_cpu(cpu, tick_broadcast_mask);
141 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
142 ret = 1;
143 } else {
144
145
146
147
148
149 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
150 int cpu = smp_processor_id();
151 cpumask_clear_cpu(cpu, tick_broadcast_mask);
152 tick_broadcast_clear_oneshot(cpu);
153 } else {
154 tick_device_setup_broadcast_func(dev);
155 }
156 }
157 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
158 return ret;
159}
160
161#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
162int tick_receive_broadcast(void)
163{
164 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
165 struct clock_event_device *evt = td->evtdev;
166
167 if (!evt)
168 return -ENODEV;
169
170 if (!evt->event_handler)
171 return -EINVAL;
172
173 evt->event_handler(evt);
174 return 0;
175}
176#endif
177
178
179
180
181static void tick_do_broadcast(struct cpumask *mask)
182{
183 int cpu = smp_processor_id();
184 struct tick_device *td;
185
186
187
188
189 if (cpumask_test_cpu(cpu, mask)) {
190 cpumask_clear_cpu(cpu, mask);
191 td = &per_cpu(tick_cpu_device, cpu);
192 td->evtdev->event_handler(td->evtdev);
193 }
194
195 if (!cpumask_empty(mask)) {
196
197
198
199
200
201
202 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
203 td->evtdev->broadcast(mask);
204 }
205}
206
207
208
209
210
211static void tick_do_periodic_broadcast(void)
212{
213 raw_spin_lock(&tick_broadcast_lock);
214
215 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
216 tick_do_broadcast(tmpmask);
217
218 raw_spin_unlock(&tick_broadcast_lock);
219}
220
221
222
223
224static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
225{
226 ktime_t next;
227
228 tick_do_periodic_broadcast();
229
230
231
232
233 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
234 return;
235
236
237
238
239
240
241
242
243 for (next = dev->next_event; ;) {
244 next = ktime_add(next, tick_period);
245
246 if (!clockevents_program_event(dev, next, false))
247 return;
248 tick_do_periodic_broadcast();
249 }
250}
251
252
253
254
255
256static void tick_do_broadcast_on_off(unsigned long *reason)
257{
258 struct clock_event_device *bc, *dev;
259 struct tick_device *td;
260 unsigned long flags;
261 int cpu, bc_stopped;
262
263 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
264
265 cpu = smp_processor_id();
266 td = &per_cpu(tick_cpu_device, cpu);
267 dev = td->evtdev;
268 bc = tick_broadcast_device.evtdev;
269
270
271
272
273 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
274 goto out;
275
276 if (!tick_device_is_functional(dev))
277 goto out;
278
279 bc_stopped = cpumask_empty(tick_broadcast_mask);
280
281 switch (*reason) {
282 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
283 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
284 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
285 if (tick_broadcast_device.mode ==
286 TICKDEV_MODE_PERIODIC)
287 clockevents_shutdown(dev);
288 }
289 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
290 tick_broadcast_force = 1;
291 break;
292 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
293 if (!tick_broadcast_force &&
294 cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
295 if (tick_broadcast_device.mode ==
296 TICKDEV_MODE_PERIODIC)
297 tick_setup_periodic(dev, 0);
298 }
299 break;
300 }
301
302 if (cpumask_empty(tick_broadcast_mask)) {
303 if (!bc_stopped)
304 clockevents_shutdown(bc);
305 } else if (bc_stopped) {
306 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
307 tick_broadcast_start_periodic(bc);
308 else
309 tick_broadcast_setup_oneshot(bc);
310 }
311out:
312 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
313}
314
315
316
317
318
319void tick_broadcast_on_off(unsigned long reason, int *oncpu)
320{
321 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
322 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
323 "offline CPU #%d\n", *oncpu);
324 else
325 tick_do_broadcast_on_off(&reason);
326}
327
328
329
330
331void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
332{
333 if (!broadcast)
334 dev->event_handler = tick_handle_periodic;
335 else
336 dev->event_handler = tick_handle_periodic_broadcast;
337}
338
339
340
341
342void tick_shutdown_broadcast(unsigned int *cpup)
343{
344 struct clock_event_device *bc;
345 unsigned long flags;
346 unsigned int cpu = *cpup;
347
348 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
349
350 bc = tick_broadcast_device.evtdev;
351 cpumask_clear_cpu(cpu, tick_broadcast_mask);
352
353 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
354 if (bc && cpumask_empty(tick_broadcast_mask))
355 clockevents_shutdown(bc);
356 }
357
358 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
359}
360
361void tick_suspend_broadcast(void)
362{
363 struct clock_event_device *bc;
364 unsigned long flags;
365
366 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
367
368 bc = tick_broadcast_device.evtdev;
369 if (bc)
370 clockevents_shutdown(bc);
371
372 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
373}
374
375int tick_resume_broadcast(void)
376{
377 struct clock_event_device *bc;
378 unsigned long flags;
379 int broadcast = 0;
380
381 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
382
383 bc = tick_broadcast_device.evtdev;
384
385 if (bc) {
386 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
387
388 switch (tick_broadcast_device.mode) {
389 case TICKDEV_MODE_PERIODIC:
390 if (!cpumask_empty(tick_broadcast_mask))
391 tick_broadcast_start_periodic(bc);
392 broadcast = cpumask_test_cpu(smp_processor_id(),
393 tick_broadcast_mask);
394 break;
395 case TICKDEV_MODE_ONESHOT:
396 if (!cpumask_empty(tick_broadcast_mask))
397 broadcast = tick_resume_broadcast_oneshot(bc);
398 break;
399 }
400 }
401 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
402
403 return broadcast;
404}
405
406
407#ifdef CONFIG_TICK_ONESHOT
408
409static cpumask_var_t tick_broadcast_oneshot_mask;
410static cpumask_var_t tick_broadcast_pending_mask;
411static cpumask_var_t tick_broadcast_force_mask;
412
413
414
415
416struct cpumask *tick_get_broadcast_oneshot_mask(void)
417{
418 return tick_broadcast_oneshot_mask;
419}
420
421
422
423
424
425
426
427
428int tick_check_broadcast_expired(void)
429{
430 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
431}
432
433
434
435
436static void tick_broadcast_set_affinity(struct clock_event_device *bc,
437 const struct cpumask *cpumask)
438{
439 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
440 return;
441
442 if (cpumask_equal(bc->cpumask, cpumask))
443 return;
444
445 bc->cpumask = cpumask;
446 irq_set_affinity(bc->irq, bc->cpumask);
447}
448
449static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
450 ktime_t expires, int force)
451{
452 int ret;
453
454 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
455 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
456
457 ret = clockevents_program_event(bc, expires, force);
458 if (!ret)
459 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
460 return ret;
461}
462
463int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
464{
465 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
466 return 0;
467}
468
469
470
471
472
473void tick_check_oneshot_broadcast(int cpu)
474{
475 if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
476 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
477
478 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
479 }
480}
481
482
483
484
485static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
486{
487 struct tick_device *td;
488 ktime_t now, next_event;
489 int cpu, next_cpu = 0;
490
491 raw_spin_lock(&tick_broadcast_lock);
492again:
493 dev->next_event.tv64 = KTIME_MAX;
494 next_event.tv64 = KTIME_MAX;
495 cpumask_clear(tmpmask);
496 now = ktime_get();
497
498 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
499 td = &per_cpu(tick_cpu_device, cpu);
500 if (td->evtdev->next_event.tv64 <= now.tv64) {
501 cpumask_set_cpu(cpu, tmpmask);
502
503
504
505
506
507 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
508 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
509 next_event.tv64 = td->evtdev->next_event.tv64;
510 next_cpu = cpu;
511 }
512 }
513
514
515
516
517
518 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
519
520
521 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
522 cpumask_clear(tick_broadcast_force_mask);
523
524
525
526
527 tick_do_broadcast(tmpmask);
528
529
530
531
532
533
534
535
536
537
538
539 if (next_event.tv64 != KTIME_MAX) {
540
541
542
543
544 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
545 goto again;
546 }
547 raw_spin_unlock(&tick_broadcast_lock);
548}
549
550
551
552
553
554void tick_broadcast_oneshot_control(unsigned long reason)
555{
556 struct clock_event_device *bc, *dev;
557 struct tick_device *td;
558 unsigned long flags;
559 ktime_t now;
560 int cpu;
561
562
563
564
565
566 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
567 return;
568
569
570
571
572
573 cpu = smp_processor_id();
574 td = &per_cpu(tick_cpu_device, cpu);
575 dev = td->evtdev;
576
577 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
578 return;
579
580 bc = tick_broadcast_device.evtdev;
581
582 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
583 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
584 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
585 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
586 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
587
588
589
590
591
592
593
594
595 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
596 dev->next_event.tv64 < bc->next_event.tv64)
597 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
598 }
599 } else {
600 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
601 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
602
603
604
605
606
607
608
609
610
611 if (cpumask_test_and_clear_cpu(cpu,
612 tick_broadcast_pending_mask))
613 goto out;
614
615
616
617
618 if (dev->next_event.tv64 == KTIME_MAX)
619 goto out;
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652 now = ktime_get();
653 if (dev->next_event.tv64 <= now.tv64) {
654 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
655 goto out;
656 }
657
658
659
660
661 tick_program_event(dev->next_event, 1);
662 }
663 }
664out:
665 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
666}
667
668
669
670
671
672
673static void tick_broadcast_clear_oneshot(int cpu)
674{
675 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
676}
677
678static void tick_broadcast_init_next_event(struct cpumask *mask,
679 ktime_t expires)
680{
681 struct tick_device *td;
682 int cpu;
683
684 for_each_cpu(cpu, mask) {
685 td = &per_cpu(tick_cpu_device, cpu);
686 if (td->evtdev)
687 td->evtdev->next_event = expires;
688 }
689}
690
691
692
693
694void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
695{
696 int cpu = smp_processor_id();
697
698
699 if (bc->event_handler != tick_handle_oneshot_broadcast) {
700 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
701
702 bc->event_handler = tick_handle_oneshot_broadcast;
703
704
705
706
707
708
709
710 cpumask_copy(tmpmask, tick_broadcast_mask);
711 cpumask_clear_cpu(cpu, tmpmask);
712 cpumask_or(tick_broadcast_oneshot_mask,
713 tick_broadcast_oneshot_mask, tmpmask);
714
715 if (was_periodic && !cpumask_empty(tmpmask)) {
716 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
717 tick_broadcast_init_next_event(tmpmask,
718 tick_next_period);
719 tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
720 } else
721 bc->next_event.tv64 = KTIME_MAX;
722 } else {
723
724
725
726
727
728
729
730 tick_broadcast_clear_oneshot(cpu);
731 }
732}
733
734
735
736
737void tick_broadcast_switch_to_oneshot(void)
738{
739 struct clock_event_device *bc;
740 unsigned long flags;
741
742 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
743
744 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
745 bc = tick_broadcast_device.evtdev;
746 if (bc)
747 tick_broadcast_setup_oneshot(bc);
748
749 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
750}
751
752
753
754
755
756void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
757{
758 unsigned long flags;
759 unsigned int cpu = *cpup;
760
761 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
762
763
764
765
766
767 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
768
769 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
770}
771
772
773
774
775int tick_broadcast_oneshot_active(void)
776{
777 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
778}
779
780
781
782
783bool tick_broadcast_oneshot_available(void)
784{
785 struct clock_event_device *bc = tick_broadcast_device.evtdev;
786
787 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
788}
789
790#endif
791
792void __init tick_broadcast_init(void)
793{
794 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
795 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
796#ifdef CONFIG_TICK_ONESHOT
797 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
798 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
799 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
800#endif
801}
802