1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <linux/module.h>
23
24#include "tick-internal.h"
25
26
27
28
29
30
31static struct tick_device tick_broadcast_device;
32static cpumask_var_t tick_broadcast_mask __cpumask_var_read_mostly;
33static cpumask_var_t tick_broadcast_on __cpumask_var_read_mostly;
34static cpumask_var_t tmpmask __cpumask_var_read_mostly;
35static int tick_broadcast_forced;
36
37static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
38
39#ifdef CONFIG_TICK_ONESHOT
40static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
41static void tick_broadcast_clear_oneshot(int cpu);
42static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
43#else
44static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
45static inline void tick_broadcast_clear_oneshot(int cpu) { }
46static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
47#endif
48
49
50
51
52struct tick_device *tick_get_broadcast_device(void)
53{
54 return &tick_broadcast_device;
55}
56
57struct cpumask *tick_get_broadcast_mask(void)
58{
59 return tick_broadcast_mask;
60}
61
62
63
64
65static void tick_broadcast_start_periodic(struct clock_event_device *bc)
66{
67 if (bc)
68 tick_setup_periodic(bc, 1);
69}
70
71
72
73
74static bool tick_check_broadcast_device(struct clock_event_device *curdev,
75 struct clock_event_device *newdev)
76{
77 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
78 (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
79 (newdev->features & CLOCK_EVT_FEAT_C3STOP))
80 return false;
81
82 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
83 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
84 return false;
85
86 return !curdev || newdev->rating > curdev->rating;
87}
88
89
90
91
92void tick_install_broadcast_device(struct clock_event_device *dev)
93{
94 struct clock_event_device *cur = tick_broadcast_device.evtdev;
95
96 if (!tick_check_broadcast_device(cur, dev))
97 return;
98
99 if (!try_module_get(dev->owner))
100 return;
101
102 clockevents_exchange_device(cur, dev);
103 if (cur)
104 cur->event_handler = clockevents_handle_noop;
105 tick_broadcast_device.evtdev = dev;
106 if (!cpumask_empty(tick_broadcast_mask))
107 tick_broadcast_start_periodic(dev);
108
109
110
111
112
113
114
115
116 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
117 tick_clock_notify();
118}
119
120
121
122
123int tick_is_broadcast_device(struct clock_event_device *dev)
124{
125 return (dev && tick_broadcast_device.evtdev == dev);
126}
127
128int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
129{
130 int ret = -ENODEV;
131
132 if (tick_is_broadcast_device(dev)) {
133 raw_spin_lock(&tick_broadcast_lock);
134 ret = __clockevents_update_freq(dev, freq);
135 raw_spin_unlock(&tick_broadcast_lock);
136 }
137 return ret;
138}
139
140
141static void err_broadcast(const struct cpumask *mask)
142{
143 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
144}
145
146static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
147{
148 if (!dev->broadcast)
149 dev->broadcast = tick_broadcast;
150 if (!dev->broadcast) {
151 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
152 dev->name);
153 dev->broadcast = err_broadcast;
154 }
155}
156
157
158
159
160
161int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
162{
163 struct clock_event_device *bc = tick_broadcast_device.evtdev;
164 unsigned long flags;
165 int ret = 0;
166
167 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
168
169
170
171
172
173
174
175 if (!tick_device_is_functional(dev)) {
176 dev->event_handler = tick_handle_periodic;
177 tick_device_setup_broadcast_func(dev);
178 cpumask_set_cpu(cpu, tick_broadcast_mask);
179 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
180 tick_broadcast_start_periodic(bc);
181 else
182 tick_broadcast_setup_oneshot(bc);
183 ret = 1;
184 } else {
185
186
187
188
189 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
190 cpumask_clear_cpu(cpu, tick_broadcast_mask);
191 else
192 tick_device_setup_broadcast_func(dev);
193
194
195
196
197
198 if (!cpumask_test_cpu(cpu, tick_broadcast_on))
199 cpumask_clear_cpu(cpu, tick_broadcast_mask);
200
201 switch (tick_broadcast_device.mode) {
202 case TICKDEV_MODE_ONESHOT:
203
204
205
206
207
208
209
210
211 tick_broadcast_clear_oneshot(cpu);
212 ret = 0;
213 break;
214
215 case TICKDEV_MODE_PERIODIC:
216
217
218
219
220
221 if (cpumask_empty(tick_broadcast_mask) && bc)
222 clockevents_shutdown(bc);
223
224
225
226
227
228
229
230
231 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
232 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
233 break;
234 default:
235 break;
236 }
237 }
238 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
239 return ret;
240}
241
242#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
243int tick_receive_broadcast(void)
244{
245 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
246 struct clock_event_device *evt = td->evtdev;
247
248 if (!evt)
249 return -ENODEV;
250
251 if (!evt->event_handler)
252 return -EINVAL;
253
254 evt->event_handler(evt);
255 return 0;
256}
257#endif
258
259
260
261
262static bool tick_do_broadcast(struct cpumask *mask)
263{
264 int cpu = smp_processor_id();
265 struct tick_device *td;
266 bool local = false;
267
268
269
270
271 if (cpumask_test_cpu(cpu, mask)) {
272 struct clock_event_device *bc = tick_broadcast_device.evtdev;
273
274 cpumask_clear_cpu(cpu, mask);
275
276
277
278
279
280
281
282
283
284
285
286
287 local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
288 }
289
290 if (!cpumask_empty(mask)) {
291
292
293
294
295
296
297 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
298 td->evtdev->broadcast(mask);
299 }
300 return local;
301}
302
303
304
305
306
307static bool tick_do_periodic_broadcast(void)
308{
309 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
310 return tick_do_broadcast(tmpmask);
311}
312
313
314
315
316static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
317{
318 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
319 bool bc_local;
320
321 raw_spin_lock(&tick_broadcast_lock);
322
323
324 if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
325 raw_spin_unlock(&tick_broadcast_lock);
326 return;
327 }
328
329 bc_local = tick_do_periodic_broadcast();
330
331 if (clockevent_state_oneshot(dev)) {
332 ktime_t next = ktime_add(dev->next_event, tick_period);
333
334 clockevents_program_event(dev, next, true);
335 }
336 raw_spin_unlock(&tick_broadcast_lock);
337
338
339
340
341
342
343 if (bc_local)
344 td->evtdev->event_handler(td->evtdev);
345}
346
347
348
349
350
351
352
353
354void tick_broadcast_control(enum tick_broadcast_mode mode)
355{
356 struct clock_event_device *bc, *dev;
357 struct tick_device *td;
358 int cpu, bc_stopped;
359 unsigned long flags;
360
361
362 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
363 td = this_cpu_ptr(&tick_cpu_device);
364 dev = td->evtdev;
365
366
367
368
369 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
370 goto out;
371
372 if (!tick_device_is_functional(dev))
373 goto out;
374
375 cpu = smp_processor_id();
376 bc = tick_broadcast_device.evtdev;
377 bc_stopped = cpumask_empty(tick_broadcast_mask);
378
379 switch (mode) {
380 case TICK_BROADCAST_FORCE:
381 tick_broadcast_forced = 1;
382 case TICK_BROADCAST_ON:
383 cpumask_set_cpu(cpu, tick_broadcast_on);
384 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
385
386
387
388
389
390
391
392
393 if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
394 tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
395 clockevents_shutdown(dev);
396 }
397 break;
398
399 case TICK_BROADCAST_OFF:
400 if (tick_broadcast_forced)
401 break;
402 cpumask_clear_cpu(cpu, tick_broadcast_on);
403 if (!tick_device_is_functional(dev))
404 break;
405 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
406 if (tick_broadcast_device.mode ==
407 TICKDEV_MODE_PERIODIC)
408 tick_setup_periodic(dev, 0);
409 }
410 break;
411 }
412
413 if (bc) {
414 if (cpumask_empty(tick_broadcast_mask)) {
415 if (!bc_stopped)
416 clockevents_shutdown(bc);
417 } else if (bc_stopped) {
418 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
419 tick_broadcast_start_periodic(bc);
420 else
421 tick_broadcast_setup_oneshot(bc);
422 }
423 }
424out:
425 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
426}
427EXPORT_SYMBOL_GPL(tick_broadcast_control);
428
429
430
431
432void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
433{
434 if (!broadcast)
435 dev->event_handler = tick_handle_periodic;
436 else
437 dev->event_handler = tick_handle_periodic_broadcast;
438}
439
440#ifdef CONFIG_HOTPLUG_CPU
441
442
443
444void tick_shutdown_broadcast(unsigned int cpu)
445{
446 struct clock_event_device *bc;
447 unsigned long flags;
448
449 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
450
451 bc = tick_broadcast_device.evtdev;
452 cpumask_clear_cpu(cpu, tick_broadcast_mask);
453 cpumask_clear_cpu(cpu, tick_broadcast_on);
454
455 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
456 if (bc && cpumask_empty(tick_broadcast_mask))
457 clockevents_shutdown(bc);
458 }
459
460 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
461}
462#endif
463
464void tick_suspend_broadcast(void)
465{
466 struct clock_event_device *bc;
467 unsigned long flags;
468
469 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
470
471 bc = tick_broadcast_device.evtdev;
472 if (bc)
473 clockevents_shutdown(bc);
474
475 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
476}
477
478
479
480
481
482
483
484
485
486bool tick_resume_check_broadcast(void)
487{
488 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT)
489 return false;
490 else
491 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_mask);
492}
493
494void tick_resume_broadcast(void)
495{
496 struct clock_event_device *bc;
497 unsigned long flags;
498
499 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
500
501 bc = tick_broadcast_device.evtdev;
502
503 if (bc) {
504 clockevents_tick_resume(bc);
505
506 switch (tick_broadcast_device.mode) {
507 case TICKDEV_MODE_PERIODIC:
508 if (!cpumask_empty(tick_broadcast_mask))
509 tick_broadcast_start_periodic(bc);
510 break;
511 case TICKDEV_MODE_ONESHOT:
512 if (!cpumask_empty(tick_broadcast_mask))
513 tick_resume_broadcast_oneshot(bc);
514 break;
515 }
516 }
517 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
518}
519
520#ifdef CONFIG_TICK_ONESHOT
521
522static cpumask_var_t tick_broadcast_oneshot_mask __cpumask_var_read_mostly;
523static cpumask_var_t tick_broadcast_pending_mask __cpumask_var_read_mostly;
524static cpumask_var_t tick_broadcast_force_mask __cpumask_var_read_mostly;
525
526
527
528
529struct cpumask *tick_get_broadcast_oneshot_mask(void)
530{
531 return tick_broadcast_oneshot_mask;
532}
533
534
535
536
537
538
539
540
541int tick_check_broadcast_expired(void)
542{
543 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
544}
545
546
547
548
549static void tick_broadcast_set_affinity(struct clock_event_device *bc,
550 const struct cpumask *cpumask)
551{
552 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
553 return;
554
555 if (cpumask_equal(bc->cpumask, cpumask))
556 return;
557
558 bc->cpumask = cpumask;
559 irq_set_affinity(bc->irq, bc->cpumask);
560}
561
562static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
563 ktime_t expires)
564{
565 if (!clockevent_state_oneshot(bc))
566 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
567
568 clockevents_program_event(bc, expires, 1);
569 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
570}
571
572static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
573{
574 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
575}
576
577
578
579
580
581void tick_check_oneshot_broadcast_this_cpu(void)
582{
583 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
584 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
585
586
587
588
589
590
591 if (td->mode == TICKDEV_MODE_ONESHOT) {
592 clockevents_switch_state(td->evtdev,
593 CLOCK_EVT_STATE_ONESHOT);
594 }
595 }
596}
597
598
599
600
601static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
602{
603 struct tick_device *td;
604 ktime_t now, next_event;
605 int cpu, next_cpu = 0;
606 bool bc_local;
607
608 raw_spin_lock(&tick_broadcast_lock);
609 dev->next_event = KTIME_MAX;
610 next_event = KTIME_MAX;
611 cpumask_clear(tmpmask);
612 now = ktime_get();
613
614 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
615
616
617
618
619 if (!IS_ENABLED(CONFIG_SMP) &&
620 cpumask_empty(tick_broadcast_oneshot_mask))
621 break;
622
623 td = &per_cpu(tick_cpu_device, cpu);
624 if (td->evtdev->next_event <= now) {
625 cpumask_set_cpu(cpu, tmpmask);
626
627
628
629
630
631 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
632 } else if (td->evtdev->next_event < next_event) {
633 next_event = td->evtdev->next_event;
634 next_cpu = cpu;
635 }
636 }
637
638
639
640
641
642 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
643
644
645 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
646 cpumask_clear(tick_broadcast_force_mask);
647
648
649
650
651
652 if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
653 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
654
655
656
657
658 bc_local = tick_do_broadcast(tmpmask);
659
660
661
662
663
664
665
666
667
668
669
670 if (next_event != KTIME_MAX)
671 tick_broadcast_set_event(dev, next_cpu, next_event);
672
673 raw_spin_unlock(&tick_broadcast_lock);
674
675 if (bc_local) {
676 td = this_cpu_ptr(&tick_cpu_device);
677 td->evtdev->event_handler(td->evtdev);
678 }
679}
680
681static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
682{
683 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
684 return 0;
685 if (bc->next_event == KTIME_MAX)
686 return 0;
687 return bc->bound_on == cpu ? -EBUSY : 0;
688}
689
690static void broadcast_shutdown_local(struct clock_event_device *bc,
691 struct clock_event_device *dev)
692{
693
694
695
696
697
698 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
699 if (broadcast_needs_cpu(bc, smp_processor_id()))
700 return;
701 if (dev->next_event < bc->next_event)
702 return;
703 }
704 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
705}
706
707int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
708{
709 struct clock_event_device *bc, *dev;
710 int cpu, ret = 0;
711 ktime_t now;
712
713
714
715
716
717 if (!tick_broadcast_device.evtdev)
718 return -EBUSY;
719
720 dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
721
722 raw_spin_lock(&tick_broadcast_lock);
723 bc = tick_broadcast_device.evtdev;
724 cpu = smp_processor_id();
725
726 if (state == TICK_BROADCAST_ENTER) {
727
728
729
730
731
732
733
734 ret = broadcast_needs_cpu(bc, cpu);
735 if (ret)
736 goto out;
737
738
739
740
741
742 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
743
744 if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
745 ret = -EBUSY;
746 goto out;
747 }
748
749 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
750 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
751
752
753 broadcast_shutdown_local(bc, dev);
754
755
756
757
758
759
760
761
762
763
764
765 if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
766 ret = -EBUSY;
767 } else if (dev->next_event < bc->next_event) {
768 tick_broadcast_set_event(bc, cpu, dev->next_event);
769
770
771
772
773
774
775
776 ret = broadcast_needs_cpu(bc, cpu);
777 if (ret) {
778 cpumask_clear_cpu(cpu,
779 tick_broadcast_oneshot_mask);
780 }
781 }
782 }
783 } else {
784 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
785 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
786
787
788
789
790
791
792
793
794
795 if (cpumask_test_and_clear_cpu(cpu,
796 tick_broadcast_pending_mask))
797 goto out;
798
799
800
801
802 if (dev->next_event == KTIME_MAX)
803 goto out;
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836 now = ktime_get();
837 if (dev->next_event <= now) {
838 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
839 goto out;
840 }
841
842
843
844
845 tick_program_event(dev->next_event, 1);
846 }
847 }
848out:
849 raw_spin_unlock(&tick_broadcast_lock);
850 return ret;
851}
852
853
854
855
856
857
858static void tick_broadcast_clear_oneshot(int cpu)
859{
860 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
861 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
862}
863
864static void tick_broadcast_init_next_event(struct cpumask *mask,
865 ktime_t expires)
866{
867 struct tick_device *td;
868 int cpu;
869
870 for_each_cpu(cpu, mask) {
871 td = &per_cpu(tick_cpu_device, cpu);
872 if (td->evtdev)
873 td->evtdev->next_event = expires;
874 }
875}
876
877
878
879
880static void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
881{
882 int cpu = smp_processor_id();
883
884 if (!bc)
885 return;
886
887
888 if (bc->event_handler != tick_handle_oneshot_broadcast) {
889 int was_periodic = clockevent_state_periodic(bc);
890
891 bc->event_handler = tick_handle_oneshot_broadcast;
892
893
894
895
896
897
898
899 cpumask_copy(tmpmask, tick_broadcast_mask);
900 cpumask_clear_cpu(cpu, tmpmask);
901 cpumask_or(tick_broadcast_oneshot_mask,
902 tick_broadcast_oneshot_mask, tmpmask);
903
904 if (was_periodic && !cpumask_empty(tmpmask)) {
905 clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
906 tick_broadcast_init_next_event(tmpmask,
907 tick_next_period);
908 tick_broadcast_set_event(bc, cpu, tick_next_period);
909 } else
910 bc->next_event = KTIME_MAX;
911 } else {
912
913
914
915
916
917
918
919 tick_broadcast_clear_oneshot(cpu);
920 }
921}
922
923
924
925
926void tick_broadcast_switch_to_oneshot(void)
927{
928 struct clock_event_device *bc;
929 unsigned long flags;
930
931 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
932
933 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
934 bc = tick_broadcast_device.evtdev;
935 if (bc)
936 tick_broadcast_setup_oneshot(bc);
937
938 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
939}
940
941#ifdef CONFIG_HOTPLUG_CPU
942void hotplug_cpu__broadcast_tick_pull(int deadcpu)
943{
944 struct clock_event_device *bc;
945 unsigned long flags;
946
947 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
948 bc = tick_broadcast_device.evtdev;
949
950 if (bc && broadcast_needs_cpu(bc, deadcpu)) {
951
952 clockevents_program_event(bc, bc->next_event, 1);
953 }
954 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
955}
956
957
958
959
960void tick_shutdown_broadcast_oneshot(unsigned int cpu)
961{
962 unsigned long flags;
963
964 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
965
966
967
968
969
970 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
971 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
972 cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
973
974 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
975}
976#endif
977
978
979
980
981int tick_broadcast_oneshot_active(void)
982{
983 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
984}
985
986
987
988
989bool tick_broadcast_oneshot_available(void)
990{
991 struct clock_event_device *bc = tick_broadcast_device.evtdev;
992
993 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
994}
995
996#else
997int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
998{
999 struct clock_event_device *bc = tick_broadcast_device.evtdev;
1000
1001 if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
1002 return -EBUSY;
1003
1004 return 0;
1005}
1006#endif
1007
1008void __init tick_broadcast_init(void)
1009{
1010 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
1011 zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
1012 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
1013#ifdef CONFIG_TICK_ONESHOT
1014 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
1015 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
1016 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
1017#endif
1018}
1019