1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/irq_work.h>
11#include <linux/rcupdate.h>
12#include <linux/rculist.h>
13#include <linux/kernel.h>
14#include <linux/export.h>
15#include <linux/percpu.h>
16#include <linux/init.h>
17#include <linux/gfp.h>
18#include <linux/smp.h>
19#include <linux/cpu.h>
20#include <linux/sched.h>
21#include <linux/sched/idle.h>
22#include <linux/hypervisor.h>
23#include <linux/sched/clock.h>
24#include <linux/nmi.h>
25#include <linux/sched/debug.h>
26
27#include "smpboot.h"
28#include "sched/smp.h"
29
30#define CSD_TYPE(_csd) ((_csd)->flags & CSD_FLAG_TYPE_MASK)
31
32struct call_function_data {
33 call_single_data_t __percpu *csd;
34 cpumask_var_t cpumask;
35 cpumask_var_t cpumask_ipi;
36};
37
38static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
39
40static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
41
42static void flush_smp_call_function_queue(bool warn_cpu_offline);
43
44int smpcfd_prepare_cpu(unsigned int cpu)
45{
46 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
47
48 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
49 cpu_to_node(cpu)))
50 return -ENOMEM;
51 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
52 cpu_to_node(cpu))) {
53 free_cpumask_var(cfd->cpumask);
54 return -ENOMEM;
55 }
56 cfd->csd = alloc_percpu(call_single_data_t);
57 if (!cfd->csd) {
58 free_cpumask_var(cfd->cpumask);
59 free_cpumask_var(cfd->cpumask_ipi);
60 return -ENOMEM;
61 }
62
63 return 0;
64}
65
66int smpcfd_dead_cpu(unsigned int cpu)
67{
68 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
69
70 free_cpumask_var(cfd->cpumask);
71 free_cpumask_var(cfd->cpumask_ipi);
72 free_percpu(cfd->csd);
73 return 0;
74}
75
76int smpcfd_dying_cpu(unsigned int cpu)
77{
78
79
80
81
82
83
84
85
86
87 flush_smp_call_function_queue(false);
88 irq_work_run();
89 return 0;
90}
91
92void __init call_function_init(void)
93{
94 int i;
95
96 for_each_possible_cpu(i)
97 init_llist_head(&per_cpu(call_single_queue, i));
98
99 smpcfd_prepare_cpu(smp_processor_id());
100}
101
102#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
103
104static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
105static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
106static DEFINE_PER_CPU(void *, cur_csd_info);
107
108#define CSD_LOCK_TIMEOUT (5ULL * NSEC_PER_SEC)
109static atomic_t csd_bug_count = ATOMIC_INIT(0);
110
111
112static void csd_lock_record(call_single_data_t *csd)
113{
114 if (!csd) {
115 smp_mb();
116 __this_cpu_write(cur_csd, NULL);
117 return;
118 }
119 __this_cpu_write(cur_csd_func, csd->func);
120 __this_cpu_write(cur_csd_info, csd->info);
121 smp_wmb();
122 __this_cpu_write(cur_csd, csd);
123 smp_mb();
124
125}
126
127static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
128{
129 unsigned int csd_type;
130
131 csd_type = CSD_TYPE(csd);
132 if (csd_type == CSD_TYPE_ASYNC || csd_type == CSD_TYPE_SYNC)
133 return csd->dst;
134 return -1;
135}
136
137
138
139
140
141
142static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
143{
144 int cpu = -1;
145 int cpux;
146 bool firsttime;
147 u64 ts2, ts_delta;
148 call_single_data_t *cpu_cur_csd;
149 unsigned int flags = READ_ONCE(csd->flags);
150
151 if (!(flags & CSD_FLAG_LOCK)) {
152 if (!unlikely(*bug_id))
153 return true;
154 cpu = csd_lock_wait_getcpu(csd);
155 pr_alert("csd: CSD lock (#%d) got unstuck on CPU#%02d, CPU#%02d released the lock.\n",
156 *bug_id, raw_smp_processor_id(), cpu);
157 return true;
158 }
159
160 ts2 = sched_clock();
161 ts_delta = ts2 - *ts1;
162 if (likely(ts_delta <= CSD_LOCK_TIMEOUT))
163 return false;
164
165 firsttime = !*bug_id;
166 if (firsttime)
167 *bug_id = atomic_inc_return(&csd_bug_count);
168 cpu = csd_lock_wait_getcpu(csd);
169 if (WARN_ONCE(cpu < 0 || cpu >= nr_cpu_ids, "%s: cpu = %d\n", __func__, cpu))
170 cpux = 0;
171 else
172 cpux = cpu;
173 cpu_cur_csd = smp_load_acquire(&per_cpu(cur_csd, cpux));
174 pr_alert("csd: %s non-responsive CSD lock (#%d) on CPU#%d, waiting %llu ns for CPU#%02d %pS(%ps).\n",
175 firsttime ? "Detected" : "Continued", *bug_id, raw_smp_processor_id(), ts2 - ts0,
176 cpu, csd->func, csd->info);
177 if (cpu_cur_csd && csd != cpu_cur_csd) {
178 pr_alert("\tcsd: CSD lock (#%d) handling prior %pS(%ps) request.\n",
179 *bug_id, READ_ONCE(per_cpu(cur_csd_func, cpux)),
180 READ_ONCE(per_cpu(cur_csd_info, cpux)));
181 } else {
182 pr_alert("\tcsd: CSD lock (#%d) %s.\n",
183 *bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
184 }
185 if (cpu >= 0) {
186 if (!trigger_single_cpu_backtrace(cpu))
187 dump_cpu_task(cpu);
188 if (!cpu_cur_csd) {
189 pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
190 arch_send_call_function_single_ipi(cpu);
191 }
192 }
193 dump_stack();
194 *ts1 = ts2;
195
196 return false;
197}
198
199
200
201
202
203
204
205
206static __always_inline void csd_lock_wait(call_single_data_t *csd)
207{
208 int bug_id = 0;
209 u64 ts0, ts1;
210
211 ts1 = ts0 = sched_clock();
212 for (;;) {
213 if (csd_lock_wait_toolong(csd, ts0, &ts1, &bug_id))
214 break;
215 cpu_relax();
216 }
217 smp_acquire__after_ctrl_dep();
218}
219
220#else
221static void csd_lock_record(call_single_data_t *csd)
222{
223}
224
225static __always_inline void csd_lock_wait(call_single_data_t *csd)
226{
227 smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
228}
229#endif
230
231static __always_inline void csd_lock(call_single_data_t *csd)
232{
233 csd_lock_wait(csd);
234 csd->flags |= CSD_FLAG_LOCK;
235
236
237
238
239
240
241 smp_wmb();
242}
243
244static __always_inline void csd_unlock(call_single_data_t *csd)
245{
246 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
247
248
249
250
251 smp_store_release(&csd->flags, 0);
252}
253
254static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
255
256void __smp_call_single_queue(int cpu, struct llist_node *node)
257{
258
259
260
261
262
263
264
265
266
267
268
269 if (llist_add(node, &per_cpu(call_single_queue, cpu)))
270 send_call_function_single_ipi(cpu);
271}
272
273
274
275
276
277
278static int generic_exec_single(int cpu, call_single_data_t *csd)
279{
280 if (cpu == smp_processor_id()) {
281 smp_call_func_t func = csd->func;
282 void *info = csd->info;
283 unsigned long flags;
284
285
286
287
288
289 csd_lock_record(csd);
290 csd_unlock(csd);
291 local_irq_save(flags);
292 func(info);
293 csd_lock_record(NULL);
294 local_irq_restore(flags);
295 return 0;
296 }
297
298 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
299 csd_unlock(csd);
300 return -ENXIO;
301 }
302
303 __smp_call_single_queue(cpu, &csd->llist);
304
305 return 0;
306}
307
308
309
310
311
312
313
314void generic_smp_call_function_single_interrupt(void)
315{
316 flush_smp_call_function_queue(true);
317}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333static void flush_smp_call_function_queue(bool warn_cpu_offline)
334{
335 call_single_data_t *csd, *csd_next;
336 struct llist_node *entry, *prev;
337 struct llist_head *head;
338 static bool warned;
339
340 lockdep_assert_irqs_disabled();
341
342 head = this_cpu_ptr(&call_single_queue);
343 entry = llist_del_all(head);
344 entry = llist_reverse_order(entry);
345
346
347 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
348 !warned && !llist_empty(head))) {
349 warned = true;
350 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
351
352
353
354
355
356 llist_for_each_entry(csd, entry, llist) {
357 switch (CSD_TYPE(csd)) {
358 case CSD_TYPE_ASYNC:
359 case CSD_TYPE_SYNC:
360 case CSD_TYPE_IRQ_WORK:
361 pr_warn("IPI callback %pS sent to offline CPU\n",
362 csd->func);
363 break;
364
365 case CSD_TYPE_TTWU:
366 pr_warn("IPI task-wakeup sent to offline CPU\n");
367 break;
368
369 default:
370 pr_warn("IPI callback, unknown type %d, sent to offline CPU\n",
371 CSD_TYPE(csd));
372 break;
373 }
374 }
375 }
376
377
378
379
380 prev = NULL;
381 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
382
383 if (CSD_TYPE(csd) == CSD_TYPE_SYNC) {
384 smp_call_func_t func = csd->func;
385 void *info = csd->info;
386
387 if (prev) {
388 prev->next = &csd_next->llist;
389 } else {
390 entry = &csd_next->llist;
391 }
392
393 csd_lock_record(csd);
394 func(info);
395 csd_unlock(csd);
396 csd_lock_record(NULL);
397 } else {
398 prev = &csd->llist;
399 }
400 }
401
402 if (!entry)
403 return;
404
405
406
407
408 prev = NULL;
409 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
410 int type = CSD_TYPE(csd);
411
412 if (type != CSD_TYPE_TTWU) {
413 if (prev) {
414 prev->next = &csd_next->llist;
415 } else {
416 entry = &csd_next->llist;
417 }
418
419 if (type == CSD_TYPE_ASYNC) {
420 smp_call_func_t func = csd->func;
421 void *info = csd->info;
422
423 csd_lock_record(csd);
424 csd_unlock(csd);
425 func(info);
426 csd_lock_record(NULL);
427 } else if (type == CSD_TYPE_IRQ_WORK) {
428 irq_work_single(csd);
429 }
430
431 } else {
432 prev = &csd->llist;
433 }
434 }
435
436
437
438
439 if (entry)
440 sched_ttwu_pending(entry);
441}
442
443void flush_smp_call_function_from_idle(void)
444{
445 unsigned long flags;
446
447 if (llist_empty(this_cpu_ptr(&call_single_queue)))
448 return;
449
450 local_irq_save(flags);
451 flush_smp_call_function_queue(true);
452 local_irq_restore(flags);
453}
454
455
456
457
458
459
460
461
462
463int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
464 int wait)
465{
466 call_single_data_t *csd;
467 call_single_data_t csd_stack = {
468 .flags = CSD_FLAG_LOCK | CSD_TYPE_SYNC,
469 };
470 int this_cpu;
471 int err;
472
473
474
475
476
477 this_cpu = get_cpu();
478
479
480
481
482
483
484
485 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
486 && !oops_in_progress);
487
488
489
490
491
492
493
494 WARN_ON_ONCE(!in_task());
495
496 csd = &csd_stack;
497 if (!wait) {
498 csd = this_cpu_ptr(&csd_data);
499 csd_lock(csd);
500 }
501
502 csd->func = func;
503 csd->info = info;
504#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
505 csd->src = smp_processor_id();
506 csd->dst = cpu;
507#endif
508
509 err = generic_exec_single(cpu, csd);
510
511 if (wait)
512 csd_lock_wait(csd);
513
514 put_cpu();
515
516 return err;
517}
518EXPORT_SYMBOL(smp_call_function_single);
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541int smp_call_function_single_async(int cpu, call_single_data_t *csd)
542{
543 int err = 0;
544
545 preempt_disable();
546
547 if (csd->flags & CSD_FLAG_LOCK) {
548 err = -EBUSY;
549 goto out;
550 }
551
552 csd->flags = CSD_FLAG_LOCK;
553 smp_wmb();
554
555 err = generic_exec_single(cpu, csd);
556
557out:
558 preempt_enable();
559
560 return err;
561}
562EXPORT_SYMBOL_GPL(smp_call_function_single_async);
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578int smp_call_function_any(const struct cpumask *mask,
579 smp_call_func_t func, void *info, int wait)
580{
581 unsigned int cpu;
582 const struct cpumask *nodemask;
583 int ret;
584
585
586 cpu = get_cpu();
587 if (cpumask_test_cpu(cpu, mask))
588 goto call;
589
590
591 nodemask = cpumask_of_node(cpu_to_node(cpu));
592 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
593 cpu = cpumask_next_and(cpu, nodemask, mask)) {
594 if (cpu_online(cpu))
595 goto call;
596 }
597
598
599 cpu = cpumask_any_and(mask, cpu_online_mask);
600call:
601 ret = smp_call_function_single(cpu, func, info, wait);
602 put_cpu();
603 return ret;
604}
605EXPORT_SYMBOL_GPL(smp_call_function_any);
606
607static void smp_call_function_many_cond(const struct cpumask *mask,
608 smp_call_func_t func, void *info,
609 bool wait, smp_cond_func_t cond_func)
610{
611 struct call_function_data *cfd;
612 int cpu, next_cpu, this_cpu = smp_processor_id();
613
614
615
616
617
618
619
620 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
621 && !oops_in_progress && !early_boot_irqs_disabled);
622
623
624
625
626
627
628
629 WARN_ON_ONCE(!in_task());
630
631
632 cpu = cpumask_first_and(mask, cpu_online_mask);
633 if (cpu == this_cpu)
634 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
635
636
637 if (cpu >= nr_cpu_ids)
638 return;
639
640
641 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
642 if (next_cpu == this_cpu)
643 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
644
645
646 if (next_cpu >= nr_cpu_ids) {
647 if (!cond_func || cond_func(cpu, info))
648 smp_call_function_single(cpu, func, info, wait);
649 return;
650 }
651
652 cfd = this_cpu_ptr(&cfd_data);
653
654 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
655 __cpumask_clear_cpu(this_cpu, cfd->cpumask);
656
657
658 if (unlikely(!cpumask_weight(cfd->cpumask)))
659 return;
660
661 cpumask_clear(cfd->cpumask_ipi);
662 for_each_cpu(cpu, cfd->cpumask) {
663 call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
664
665 if (cond_func && !cond_func(cpu, info))
666 continue;
667
668 csd_lock(csd);
669 if (wait)
670 csd->flags |= CSD_TYPE_SYNC;
671 csd->func = func;
672 csd->info = info;
673#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
674 csd->src = smp_processor_id();
675 csd->dst = cpu;
676#endif
677 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
678 __cpumask_set_cpu(cpu, cfd->cpumask_ipi);
679 }
680
681
682 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
683
684 if (wait) {
685 for_each_cpu(cpu, cfd->cpumask) {
686 call_single_data_t *csd;
687
688 csd = per_cpu_ptr(cfd->csd, cpu);
689 csd_lock_wait(csd);
690 }
691 }
692}
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708void smp_call_function_many(const struct cpumask *mask,
709 smp_call_func_t func, void *info, bool wait)
710{
711 smp_call_function_many_cond(mask, func, info, wait, NULL);
712}
713EXPORT_SYMBOL(smp_call_function_many);
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730void smp_call_function(smp_call_func_t func, void *info, int wait)
731{
732 preempt_disable();
733 smp_call_function_many(cpu_online_mask, func, info, wait);
734 preempt_enable();
735}
736EXPORT_SYMBOL(smp_call_function);
737
738
739unsigned int setup_max_cpus = NR_CPUS;
740EXPORT_SYMBOL(setup_max_cpus);
741
742
743
744
745
746
747
748
749
750
751
752
753
754void __weak arch_disable_smp_support(void) { }
755
756static int __init nosmp(char *str)
757{
758 setup_max_cpus = 0;
759 arch_disable_smp_support();
760
761 return 0;
762}
763
764early_param("nosmp", nosmp);
765
766
767static int __init nrcpus(char *str)
768{
769 int nr_cpus;
770
771 if (get_option(&str, &nr_cpus) && nr_cpus > 0 && nr_cpus < nr_cpu_ids)
772 nr_cpu_ids = nr_cpus;
773
774 return 0;
775}
776
777early_param("nr_cpus", nrcpus);
778
779static int __init maxcpus(char *str)
780{
781 get_option(&str, &setup_max_cpus);
782 if (setup_max_cpus == 0)
783 arch_disable_smp_support();
784
785 return 0;
786}
787
788early_param("maxcpus", maxcpus);
789
790
791unsigned int nr_cpu_ids __read_mostly = NR_CPUS;
792EXPORT_SYMBOL(nr_cpu_ids);
793
794
795void __init setup_nr_cpu_ids(void)
796{
797 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
798}
799
800
801void __init smp_init(void)
802{
803 int num_nodes, num_cpus;
804
805 idle_threads_init();
806 cpuhp_threads_init();
807
808 pr_info("Bringing up secondary CPUs ...\n");
809
810 bringup_nonboot_cpus(setup_max_cpus);
811
812 num_nodes = num_online_nodes();
813 num_cpus = num_online_cpus();
814 pr_info("Brought up %d node%s, %d CPU%s\n",
815 num_nodes, (num_nodes > 1 ? "s" : ""),
816 num_cpus, (num_cpus > 1 ? "s" : ""));
817
818
819 smp_cpus_done(setup_max_cpus);
820}
821
822
823
824
825
826
827void on_each_cpu(smp_call_func_t func, void *info, int wait)
828{
829 unsigned long flags;
830
831 preempt_disable();
832 smp_call_function(func, info, wait);
833 local_irq_save(flags);
834 func(info);
835 local_irq_restore(flags);
836 preempt_enable();
837}
838EXPORT_SYMBOL(on_each_cpu);
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
857 void *info, bool wait)
858{
859 int cpu = get_cpu();
860
861 smp_call_function_many(mask, func, info, wait);
862 if (cpumask_test_cpu(cpu, mask)) {
863 unsigned long flags;
864 local_irq_save(flags);
865 func(info);
866 local_irq_restore(flags);
867 }
868 put_cpu();
869}
870EXPORT_SYMBOL(on_each_cpu_mask);
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
895 void *info, bool wait, const struct cpumask *mask)
896{
897 int cpu = get_cpu();
898
899 smp_call_function_many_cond(mask, func, info, wait, cond_func);
900 if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
901 unsigned long flags;
902
903 local_irq_save(flags);
904 func(info);
905 local_irq_restore(flags);
906 }
907 put_cpu();
908}
909EXPORT_SYMBOL(on_each_cpu_cond_mask);
910
911void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
912 void *info, bool wait)
913{
914 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
915}
916EXPORT_SYMBOL(on_each_cpu_cond);
917
918static void do_nothing(void *unused)
919{
920}
921
922
923
924
925
926
927
928
929
930
931
932
933void kick_all_cpus_sync(void)
934{
935
936 smp_mb();
937 smp_call_function(do_nothing, NULL, 1);
938}
939EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
940
941
942
943
944
945
946
947void wake_up_all_idle_cpus(void)
948{
949 int cpu;
950
951 preempt_disable();
952 for_each_online_cpu(cpu) {
953 if (cpu == smp_processor_id())
954 continue;
955
956 wake_up_if_idle(cpu);
957 }
958 preempt_enable();
959}
960EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
961
962
963
964
965
966
967
968
969struct smp_call_on_cpu_struct {
970 struct work_struct work;
971 struct completion done;
972 int (*func)(void *);
973 void *data;
974 int ret;
975 int cpu;
976};
977
978static void smp_call_on_cpu_callback(struct work_struct *work)
979{
980 struct smp_call_on_cpu_struct *sscs;
981
982 sscs = container_of(work, struct smp_call_on_cpu_struct, work);
983 if (sscs->cpu >= 0)
984 hypervisor_pin_vcpu(sscs->cpu);
985 sscs->ret = sscs->func(sscs->data);
986 if (sscs->cpu >= 0)
987 hypervisor_pin_vcpu(-1);
988
989 complete(&sscs->done);
990}
991
992int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
993{
994 struct smp_call_on_cpu_struct sscs = {
995 .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
996 .func = func,
997 .data = par,
998 .cpu = phys ? cpu : -1,
999 };
1000
1001 INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
1002
1003 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
1004 return -ENXIO;
1005
1006 queue_work_on(cpu, system_wq, &sscs.work);
1007 wait_for_completion(&sscs.done);
1008
1009 return sscs.ret;
1010}
1011EXPORT_SYMBOL_GPL(smp_call_on_cpu);
1012