1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/cpu.h>
15#include <linux/err.h>
16#include <linux/hrtimer.h>
17#include <linux/interrupt.h>
18#include <linux/percpu.h>
19#include <linux/profile.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <linux/module.h>
23
24#include "tick-internal.h"
25
26
27
28
29
30
31static struct tick_device tick_broadcast_device;
32static cpumask_var_t tick_broadcast_mask;
33static cpumask_var_t tick_broadcast_on;
34static cpumask_var_t tmpmask;
35static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
36static int tick_broadcast_force;
37
38#ifdef CONFIG_TICK_ONESHOT
39static void tick_broadcast_clear_oneshot(int cpu);
40#else
41static inline void tick_broadcast_clear_oneshot(int cpu) { }
42#endif
43
44
45
46
47struct tick_device *tick_get_broadcast_device(void)
48{
49 return &tick_broadcast_device;
50}
51
52struct cpumask *tick_get_broadcast_mask(void)
53{
54 return tick_broadcast_mask;
55}
56
57
58
59
60static void tick_broadcast_start_periodic(struct clock_event_device *bc)
61{
62 if (bc)
63 tick_setup_periodic(bc, 1);
64}
65
66
67
68
69static bool tick_check_broadcast_device(struct clock_event_device *curdev,
70 struct clock_event_device *newdev)
71{
72 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
73 (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
74 (newdev->features & CLOCK_EVT_FEAT_C3STOP))
75 return false;
76
77 if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
78 !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
79 return false;
80
81 return !curdev || newdev->rating > curdev->rating;
82}
83
84
85
86
87void tick_install_broadcast_device(struct clock_event_device *dev)
88{
89 struct clock_event_device *cur = tick_broadcast_device.evtdev;
90
91 if (!tick_check_broadcast_device(cur, dev))
92 return;
93
94 if (!try_module_get(dev->owner))
95 return;
96
97 clockevents_exchange_device(cur, dev);
98 if (cur)
99 cur->event_handler = clockevents_handle_noop;
100 tick_broadcast_device.evtdev = dev;
101 if (!cpumask_empty(tick_broadcast_mask))
102 tick_broadcast_start_periodic(dev);
103
104
105
106
107
108
109
110
111 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
112 tick_clock_notify();
113}
114
115
116
117
118int tick_is_broadcast_device(struct clock_event_device *dev)
119{
120 return (dev && tick_broadcast_device.evtdev == dev);
121}
122
123int tick_broadcast_update_freq(struct clock_event_device *dev, u32 freq)
124{
125 int ret = -ENODEV;
126
127 if (tick_is_broadcast_device(dev)) {
128 raw_spin_lock(&tick_broadcast_lock);
129 ret = __clockevents_update_freq(dev, freq);
130 raw_spin_unlock(&tick_broadcast_lock);
131 }
132 return ret;
133}
134
135
136static void err_broadcast(const struct cpumask *mask)
137{
138 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
139}
140
141static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
142{
143 if (!dev->broadcast)
144 dev->broadcast = tick_broadcast;
145 if (!dev->broadcast) {
146 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
147 dev->name);
148 dev->broadcast = err_broadcast;
149 }
150}
151
152
153
154
155
156int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
157{
158 struct clock_event_device *bc = tick_broadcast_device.evtdev;
159 unsigned long flags;
160 int ret;
161
162 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
163
164
165
166
167
168
169
170 if (!tick_device_is_functional(dev)) {
171 dev->event_handler = tick_handle_periodic;
172 tick_device_setup_broadcast_func(dev);
173 cpumask_set_cpu(cpu, tick_broadcast_mask);
174 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
175 tick_broadcast_start_periodic(bc);
176 else
177 tick_broadcast_setup_oneshot(bc);
178 ret = 1;
179 } else {
180
181
182
183
184 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
185 cpumask_clear_cpu(cpu, tick_broadcast_mask);
186 else
187 tick_device_setup_broadcast_func(dev);
188
189
190
191
192
193 if (!cpumask_test_cpu(cpu, tick_broadcast_on))
194 cpumask_clear_cpu(cpu, tick_broadcast_mask);
195
196 switch (tick_broadcast_device.mode) {
197 case TICKDEV_MODE_ONESHOT:
198
199
200
201
202
203
204
205
206 tick_broadcast_clear_oneshot(cpu);
207 ret = 0;
208 break;
209
210 case TICKDEV_MODE_PERIODIC:
211
212
213
214
215
216 if (cpumask_empty(tick_broadcast_mask) && bc)
217 clockevents_shutdown(bc);
218
219
220
221
222
223
224 ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
225 break;
226 default:
227
228 ret = 0;
229 break;
230 }
231 }
232 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
233 return ret;
234}
235
236#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
237int tick_receive_broadcast(void)
238{
239 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
240 struct clock_event_device *evt = td->evtdev;
241
242 if (!evt)
243 return -ENODEV;
244
245 if (!evt->event_handler)
246 return -EINVAL;
247
248 evt->event_handler(evt);
249 return 0;
250}
251#endif
252
253
254
255
256static void tick_do_broadcast(struct cpumask *mask)
257{
258 int cpu = smp_processor_id();
259 struct tick_device *td;
260
261
262
263
264 if (cpumask_test_cpu(cpu, mask)) {
265 cpumask_clear_cpu(cpu, mask);
266 td = &per_cpu(tick_cpu_device, cpu);
267 td->evtdev->event_handler(td->evtdev);
268 }
269
270 if (!cpumask_empty(mask)) {
271
272
273
274
275
276
277 td = &per_cpu(tick_cpu_device, cpumask_first(mask));
278 td->evtdev->broadcast(mask);
279 }
280}
281
282
283
284
285
286static void tick_do_periodic_broadcast(void)
287{
288 cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
289 tick_do_broadcast(tmpmask);
290}
291
292
293
294
295static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
296{
297 ktime_t next;
298
299 raw_spin_lock(&tick_broadcast_lock);
300
301 tick_do_periodic_broadcast();
302
303
304
305
306 if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
307 goto unlock;
308
309
310
311
312
313
314
315
316 for (next = dev->next_event; ;) {
317 next = ktime_add(next, tick_period);
318
319 if (!clockevents_program_event(dev, next, false))
320 goto unlock;
321 tick_do_periodic_broadcast();
322 }
323unlock:
324 raw_spin_unlock(&tick_broadcast_lock);
325}
326
327
328
329
330
331static void tick_do_broadcast_on_off(unsigned long *reason)
332{
333 struct clock_event_device *bc, *dev;
334 struct tick_device *td;
335 unsigned long flags;
336 int cpu, bc_stopped;
337
338 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
339
340 cpu = smp_processor_id();
341 td = &per_cpu(tick_cpu_device, cpu);
342 dev = td->evtdev;
343 bc = tick_broadcast_device.evtdev;
344
345
346
347
348 if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
349 goto out;
350
351 if (!tick_device_is_functional(dev))
352 goto out;
353
354 bc_stopped = cpumask_empty(tick_broadcast_mask);
355
356 switch (*reason) {
357 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
358 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
359 cpumask_set_cpu(cpu, tick_broadcast_on);
360 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
361 if (tick_broadcast_device.mode ==
362 TICKDEV_MODE_PERIODIC)
363 clockevents_shutdown(dev);
364 }
365 if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
366 tick_broadcast_force = 1;
367 break;
368 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
369 if (tick_broadcast_force)
370 break;
371 cpumask_clear_cpu(cpu, tick_broadcast_on);
372 if (!tick_device_is_functional(dev))
373 break;
374 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
375 if (tick_broadcast_device.mode ==
376 TICKDEV_MODE_PERIODIC)
377 tick_setup_periodic(dev, 0);
378 }
379 break;
380 }
381
382 if (cpumask_empty(tick_broadcast_mask)) {
383 if (!bc_stopped)
384 clockevents_shutdown(bc);
385 } else if (bc_stopped) {
386 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
387 tick_broadcast_start_periodic(bc);
388 else
389 tick_broadcast_setup_oneshot(bc);
390 }
391out:
392 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
393}
394
395
396
397
398
399void tick_broadcast_on_off(unsigned long reason, int *oncpu)
400{
401 if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
402 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
403 "offline CPU #%d\n", *oncpu);
404 else
405 tick_do_broadcast_on_off(&reason);
406}
407
408
409
410
411void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
412{
413 if (!broadcast)
414 dev->event_handler = tick_handle_periodic;
415 else
416 dev->event_handler = tick_handle_periodic_broadcast;
417}
418
419
420
421
422void tick_shutdown_broadcast(unsigned int *cpup)
423{
424 struct clock_event_device *bc;
425 unsigned long flags;
426 unsigned int cpu = *cpup;
427
428 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
429
430 bc = tick_broadcast_device.evtdev;
431 cpumask_clear_cpu(cpu, tick_broadcast_mask);
432 cpumask_clear_cpu(cpu, tick_broadcast_on);
433
434 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
435 if (bc && cpumask_empty(tick_broadcast_mask))
436 clockevents_shutdown(bc);
437 }
438
439 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
440}
441
442void tick_suspend_broadcast(void)
443{
444 struct clock_event_device *bc;
445 unsigned long flags;
446
447 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
448
449 bc = tick_broadcast_device.evtdev;
450 if (bc)
451 clockevents_shutdown(bc);
452
453 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
454}
455
456int tick_resume_broadcast(void)
457{
458 struct clock_event_device *bc;
459 unsigned long flags;
460 int broadcast = 0;
461
462 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
463
464 bc = tick_broadcast_device.evtdev;
465
466 if (bc) {
467 clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
468
469 switch (tick_broadcast_device.mode) {
470 case TICKDEV_MODE_PERIODIC:
471 if (!cpumask_empty(tick_broadcast_mask))
472 tick_broadcast_start_periodic(bc);
473 broadcast = cpumask_test_cpu(smp_processor_id(),
474 tick_broadcast_mask);
475 break;
476 case TICKDEV_MODE_ONESHOT:
477 if (!cpumask_empty(tick_broadcast_mask))
478 broadcast = tick_resume_broadcast_oneshot(bc);
479 break;
480 }
481 }
482 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
483
484 return broadcast;
485}
486
487
488#ifdef CONFIG_TICK_ONESHOT
489
490static cpumask_var_t tick_broadcast_oneshot_mask;
491static cpumask_var_t tick_broadcast_pending_mask;
492static cpumask_var_t tick_broadcast_force_mask;
493
494
495
496
497struct cpumask *tick_get_broadcast_oneshot_mask(void)
498{
499 return tick_broadcast_oneshot_mask;
500}
501
502
503
504
505
506
507
508
509int tick_check_broadcast_expired(void)
510{
511 return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
512}
513
514
515
516
517static void tick_broadcast_set_affinity(struct clock_event_device *bc,
518 const struct cpumask *cpumask)
519{
520 if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
521 return;
522
523 if (cpumask_equal(bc->cpumask, cpumask))
524 return;
525
526 bc->cpumask = cpumask;
527 irq_set_affinity(bc->irq, bc->cpumask);
528}
529
530static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
531 ktime_t expires, int force)
532{
533 int ret;
534
535 if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
536 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
537
538 ret = clockevents_program_event(bc, expires, force);
539 if (!ret)
540 tick_broadcast_set_affinity(bc, cpumask_of(cpu));
541 return ret;
542}
543
544int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
545{
546 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
547 return 0;
548}
549
550
551
552
553
554void tick_check_oneshot_broadcast_this_cpu(void)
555{
556 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
557 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
558
559
560
561
562
563
564 if (td->mode == TICKDEV_MODE_ONESHOT) {
565 clockevents_set_mode(td->evtdev,
566 CLOCK_EVT_MODE_ONESHOT);
567 }
568 }
569}
570
571
572
573
574static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
575{
576 struct tick_device *td;
577 ktime_t now, next_event;
578 int cpu, next_cpu = 0;
579
580 raw_spin_lock(&tick_broadcast_lock);
581again:
582 dev->next_event.tv64 = KTIME_MAX;
583 next_event.tv64 = KTIME_MAX;
584 cpumask_clear(tmpmask);
585 now = ktime_get();
586
587 for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
588 td = &per_cpu(tick_cpu_device, cpu);
589 if (td->evtdev->next_event.tv64 <= now.tv64) {
590 cpumask_set_cpu(cpu, tmpmask);
591
592
593
594
595
596 cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
597 } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
598 next_event.tv64 = td->evtdev->next_event.tv64;
599 next_cpu = cpu;
600 }
601 }
602
603
604
605
606
607 cpumask_clear_cpu(smp_processor_id(), tick_broadcast_pending_mask);
608
609
610 cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
611 cpumask_clear(tick_broadcast_force_mask);
612
613
614
615
616
617 if (WARN_ON_ONCE(!cpumask_subset(tmpmask, cpu_online_mask)))
618 cpumask_and(tmpmask, tmpmask, cpu_online_mask);
619
620
621
622
623 tick_do_broadcast(tmpmask);
624
625
626
627
628
629
630
631
632
633
634
635 if (next_event.tv64 != KTIME_MAX) {
636
637
638
639
640 if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
641 goto again;
642 }
643 raw_spin_unlock(&tick_broadcast_lock);
644}
645
646static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
647{
648 if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
649 return 0;
650 if (bc->next_event.tv64 == KTIME_MAX)
651 return 0;
652 return bc->bound_on == cpu ? -EBUSY : 0;
653}
654
655static void broadcast_shutdown_local(struct clock_event_device *bc,
656 struct clock_event_device *dev)
657{
658
659
660
661
662
663 if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
664 if (broadcast_needs_cpu(bc, smp_processor_id()))
665 return;
666 if (dev->next_event.tv64 < bc->next_event.tv64)
667 return;
668 }
669 clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
670}
671
672static void broadcast_move_bc(int deadcpu)
673{
674 struct clock_event_device *bc = tick_broadcast_device.evtdev;
675
676 if (!bc || !broadcast_needs_cpu(bc, deadcpu))
677 return;
678
679 clockevents_program_event(bc, bc->next_event, 1);
680}
681
682
683
684
685
686
687int tick_broadcast_oneshot_control(unsigned long reason)
688{
689 struct clock_event_device *bc, *dev;
690 struct tick_device *td;
691 unsigned long flags;
692 ktime_t now;
693 int cpu, ret = 0;
694
695
696
697
698
699 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
700 return 0;
701
702
703
704
705
706 cpu = smp_processor_id();
707 td = &per_cpu(tick_cpu_device, cpu);
708 dev = td->evtdev;
709
710 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
711 return 0;
712
713 bc = tick_broadcast_device.evtdev;
714
715 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
716 if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
717 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
718 WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
719 broadcast_shutdown_local(bc, dev);
720
721
722
723
724
725
726
727
728 if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
729 dev->next_event.tv64 < bc->next_event.tv64)
730 tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
731 }
732
733
734
735
736
737
738
739 ret = broadcast_needs_cpu(bc, cpu);
740 if (ret)
741 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
742 } else {
743 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
744 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
745
746
747
748
749
750
751
752
753
754 if (cpumask_test_and_clear_cpu(cpu,
755 tick_broadcast_pending_mask))
756 goto out;
757
758
759
760
761 if (dev->next_event.tv64 == KTIME_MAX)
762 goto out;
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795 now = ktime_get();
796 if (dev->next_event.tv64 <= now.tv64) {
797 cpumask_set_cpu(cpu, tick_broadcast_force_mask);
798 goto out;
799 }
800
801
802
803
804 tick_program_event(dev->next_event, 1);
805 }
806 }
807out:
808 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
809 return ret;
810}
811
812
813
814
815
816
817static void tick_broadcast_clear_oneshot(int cpu)
818{
819 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
820 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
821}
822
823static void tick_broadcast_init_next_event(struct cpumask *mask,
824 ktime_t expires)
825{
826 struct tick_device *td;
827 int cpu;
828
829 for_each_cpu(cpu, mask) {
830 td = &per_cpu(tick_cpu_device, cpu);
831 if (td->evtdev)
832 td->evtdev->next_event = expires;
833 }
834}
835
836
837
838
839void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
840{
841 int cpu = smp_processor_id();
842
843
844 if (bc->event_handler != tick_handle_oneshot_broadcast) {
845 int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
846
847 bc->event_handler = tick_handle_oneshot_broadcast;
848
849
850
851
852
853
854
855 cpumask_copy(tmpmask, tick_broadcast_mask);
856 cpumask_clear_cpu(cpu, tmpmask);
857 cpumask_or(tick_broadcast_oneshot_mask,
858 tick_broadcast_oneshot_mask, tmpmask);
859
860 if (was_periodic && !cpumask_empty(tmpmask)) {
861 clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
862 tick_broadcast_init_next_event(tmpmask,
863 tick_next_period);
864 tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
865 } else
866 bc->next_event.tv64 = KTIME_MAX;
867 } else {
868
869
870
871
872
873
874
875 tick_broadcast_clear_oneshot(cpu);
876 }
877}
878
879
880
881
882void tick_broadcast_switch_to_oneshot(void)
883{
884 struct clock_event_device *bc;
885 unsigned long flags;
886
887 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
888
889 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
890 bc = tick_broadcast_device.evtdev;
891 if (bc)
892 tick_broadcast_setup_oneshot(bc);
893
894 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
895}
896
897
898
899
900
901void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
902{
903 unsigned long flags;
904 unsigned int cpu = *cpup;
905
906 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
907
908
909
910
911
912 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
913 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
914 cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
915
916 broadcast_move_bc(cpu);
917
918 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
919}
920
921
922
923
924int tick_broadcast_oneshot_active(void)
925{
926 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
927}
928
929
930
931
932bool tick_broadcast_oneshot_available(void)
933{
934 struct clock_event_device *bc = tick_broadcast_device.evtdev;
935
936 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
937}
938
939#endif
940
941void __init tick_broadcast_init(void)
942{
943 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
944 zalloc_cpumask_var(&tick_broadcast_on, GFP_NOWAIT);
945 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
946#ifdef CONFIG_TICK_ONESHOT
947 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
948 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
949 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
950#endif
951}
952