1
2
3
4
5
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched/signal.h>
11#include <linux/sched/hotplug.h>
12#include <linux/sched/task.h>
13#include <linux/unistd.h>
14#include <linux/cpu.h>
15#include <linux/oom.h>
16#include <linux/rcupdate.h>
17#include <linux/export.h>
18#include <linux/bug.h>
19#include <linux/kthread.h>
20#include <linux/stop_machine.h>
21#include <linux/mutex.h>
22#include <linux/gfp.h>
23#include <linux/suspend.h>
24#include <linux/lockdep.h>
25#include <linux/tick.h>
26#include <linux/irq.h>
27#include <linux/nmi.h>
28#include <linux/smpboot.h>
29#include <linux/relay.h>
30#include <linux/slab.h>
31#include <linux/percpu-rwsem.h>
32
33#include <trace/events/power.h>
34#define CREATE_TRACE_POINTS
35#include <trace/events/cpuhp.h>
36
37#include "smpboot.h"
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53struct cpuhp_cpu_state {
54 enum cpuhp_state state;
55 enum cpuhp_state target;
56 enum cpuhp_state fail;
57#ifdef CONFIG_SMP
58 struct task_struct *thread;
59 bool should_run;
60 bool rollback;
61 bool single;
62 bool bringup;
63 bool booted_once;
64 struct hlist_node *node;
65 struct hlist_node *last;
66 enum cpuhp_state cb_state;
67 int result;
68 struct completion done_up;
69 struct completion done_down;
70#endif
71};
72
73static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
74 .fail = CPUHP_INVALID,
75};
76
77#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
78static struct lockdep_map cpuhp_state_up_map =
79 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
80static struct lockdep_map cpuhp_state_down_map =
81 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
82
83
84static inline void cpuhp_lock_acquire(bool bringup)
85{
86 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
87}
88
89static inline void cpuhp_lock_release(bool bringup)
90{
91 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
92}
93#else
94
95static inline void cpuhp_lock_acquire(bool bringup) { }
96static inline void cpuhp_lock_release(bool bringup) { }
97
98#endif
99
100
101
102
103
104
105
106
107struct cpuhp_step {
108 const char *name;
109 union {
110 int (*single)(unsigned int cpu);
111 int (*multi)(unsigned int cpu,
112 struct hlist_node *node);
113 } startup;
114 union {
115 int (*single)(unsigned int cpu);
116 int (*multi)(unsigned int cpu,
117 struct hlist_node *node);
118 } teardown;
119 struct hlist_head list;
120 bool cant_stop;
121 bool multi_instance;
122};
123
124static DEFINE_MUTEX(cpuhp_state_mutex);
125static struct cpuhp_step cpuhp_hp_states[];
126
127static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
128{
129 return cpuhp_hp_states + state;
130}
131
132
133
134
135
136
137
138
139
140
141
142static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
143 bool bringup, struct hlist_node *node,
144 struct hlist_node **lastp)
145{
146 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
147 struct cpuhp_step *step = cpuhp_get_step(state);
148 int (*cbm)(unsigned int cpu, struct hlist_node *node);
149 int (*cb)(unsigned int cpu);
150 int ret, cnt;
151
152 if (st->fail == state) {
153 st->fail = CPUHP_INVALID;
154
155 if (!(bringup ? step->startup.single : step->teardown.single))
156 return 0;
157
158 return -EAGAIN;
159 }
160
161 if (!step->multi_instance) {
162 WARN_ON_ONCE(lastp && *lastp);
163 cb = bringup ? step->startup.single : step->teardown.single;
164 if (!cb)
165 return 0;
166 trace_cpuhp_enter(cpu, st->target, state, cb);
167 ret = cb(cpu);
168 trace_cpuhp_exit(cpu, st->state, state, ret);
169 return ret;
170 }
171 cbm = bringup ? step->startup.multi : step->teardown.multi;
172 if (!cbm)
173 return 0;
174
175
176 if (node) {
177 WARN_ON_ONCE(lastp && *lastp);
178 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
179 ret = cbm(cpu, node);
180 trace_cpuhp_exit(cpu, st->state, state, ret);
181 return ret;
182 }
183
184
185 cnt = 0;
186 hlist_for_each(node, &step->list) {
187 if (lastp && node == *lastp)
188 break;
189
190 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
191 ret = cbm(cpu, node);
192 trace_cpuhp_exit(cpu, st->state, state, ret);
193 if (ret) {
194 if (!lastp)
195 goto err;
196
197 *lastp = node;
198 return ret;
199 }
200 cnt++;
201 }
202 if (lastp)
203 *lastp = NULL;
204 return 0;
205err:
206
207 cbm = !bringup ? step->startup.multi : step->teardown.multi;
208 if (!cbm)
209 return ret;
210
211 hlist_for_each(node, &step->list) {
212 if (!cnt--)
213 break;
214
215 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
216 ret = cbm(cpu, node);
217 trace_cpuhp_exit(cpu, st->state, state, ret);
218
219
220
221 WARN_ON_ONCE(ret);
222 }
223 return ret;
224}
225
226#ifdef CONFIG_SMP
227static bool cpuhp_is_ap_state(enum cpuhp_state state)
228{
229
230
231
232
233 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
234}
235
236static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
237{
238 struct completion *done = bringup ? &st->done_up : &st->done_down;
239 wait_for_completion(done);
240}
241
242static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
243{
244 struct completion *done = bringup ? &st->done_up : &st->done_down;
245 complete(done);
246}
247
248
249
250
251static bool cpuhp_is_atomic_state(enum cpuhp_state state)
252{
253 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
254}
255
256
257static DEFINE_MUTEX(cpu_add_remove_lock);
258bool cpuhp_tasks_frozen;
259EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
260
261
262
263
264
265void cpu_maps_update_begin(void)
266{
267 mutex_lock(&cpu_add_remove_lock);
268}
269
270void cpu_maps_update_done(void)
271{
272 mutex_unlock(&cpu_add_remove_lock);
273}
274
275
276
277
278
279static int cpu_hotplug_disabled;
280
281#ifdef CONFIG_HOTPLUG_CPU
282
283DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
284
285void cpus_read_lock(void)
286{
287 percpu_down_read(&cpu_hotplug_lock);
288}
289EXPORT_SYMBOL_GPL(cpus_read_lock);
290
291int cpus_read_trylock(void)
292{
293 return percpu_down_read_trylock(&cpu_hotplug_lock);
294}
295EXPORT_SYMBOL_GPL(cpus_read_trylock);
296
297void cpus_read_unlock(void)
298{
299 percpu_up_read(&cpu_hotplug_lock);
300}
301EXPORT_SYMBOL_GPL(cpus_read_unlock);
302
303void cpus_write_lock(void)
304{
305 percpu_down_write(&cpu_hotplug_lock);
306}
307
308void cpus_write_unlock(void)
309{
310 percpu_up_write(&cpu_hotplug_lock);
311}
312
313void lockdep_assert_cpus_held(void)
314{
315 percpu_rwsem_assert_held(&cpu_hotplug_lock);
316}
317
318
319
320
321
322
323
324
325void cpu_hotplug_disable(void)
326{
327 cpu_maps_update_begin();
328 cpu_hotplug_disabled++;
329 cpu_maps_update_done();
330}
331EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
332
333static void __cpu_hotplug_enable(void)
334{
335 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
336 return;
337 cpu_hotplug_disabled--;
338}
339
340void cpu_hotplug_enable(void)
341{
342 cpu_maps_update_begin();
343 __cpu_hotplug_enable();
344 cpu_maps_update_done();
345}
346EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
347#endif
348
349#ifdef CONFIG_HOTPLUG_SMT
350enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
351EXPORT_SYMBOL_GPL(cpu_smt_control);
352
353static bool cpu_smt_available __read_mostly;
354
355void __init cpu_smt_disable(bool force)
356{
357 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
358 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
359 return;
360
361 if (force) {
362 pr_info("SMT: Force disabled\n");
363 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
364 } else {
365 cpu_smt_control = CPU_SMT_DISABLED;
366 }
367}
368
369
370
371
372
373
374void __init cpu_smt_check_topology_early(void)
375{
376 if (!topology_smt_supported())
377 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
378}
379
380
381
382
383
384
385
386
387void __init cpu_smt_check_topology(void)
388{
389 if (!cpu_smt_available)
390 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
391}
392
393static int __init smt_cmdline_disable(char *str)
394{
395 cpu_smt_disable(str && !strcmp(str, "force"));
396 return 0;
397}
398early_param("nosmt", smt_cmdline_disable);
399
400static inline bool cpu_smt_allowed(unsigned int cpu)
401{
402 if (topology_is_primary_thread(cpu))
403 return true;
404
405
406
407
408
409
410 if (per_cpu(cpuhp_state, cpu).booted_once)
411 cpu_smt_available = true;
412
413 if (cpu_smt_control == CPU_SMT_ENABLED)
414 return true;
415
416
417
418
419
420
421
422 return !per_cpu(cpuhp_state, cpu).booted_once;
423}
424#else
425static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
426#endif
427
428static inline enum cpuhp_state
429cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
430{
431 enum cpuhp_state prev_state = st->state;
432
433 st->rollback = false;
434 st->last = NULL;
435
436 st->target = target;
437 st->single = false;
438 st->bringup = st->state < target;
439
440 return prev_state;
441}
442
443static inline void
444cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
445{
446 st->rollback = true;
447
448
449
450
451
452 if (!st->last) {
453 if (st->bringup)
454 st->state--;
455 else
456 st->state++;
457 }
458
459 st->target = prev_state;
460 st->bringup = !st->bringup;
461}
462
463
464static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
465{
466 if (!st->single && st->state == st->target)
467 return;
468
469 st->result = 0;
470
471
472
473
474 smp_mb();
475 st->should_run = true;
476 wake_up_process(st->thread);
477 wait_for_ap_thread(st, st->bringup);
478}
479
480static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
481{
482 enum cpuhp_state prev_state;
483 int ret;
484
485 prev_state = cpuhp_set_state(st, target);
486 __cpuhp_kick_ap(st);
487 if ((ret = st->result)) {
488 cpuhp_reset_state(st, prev_state);
489 __cpuhp_kick_ap(st);
490 }
491
492 return ret;
493}
494
495static int bringup_wait_for_ap(unsigned int cpu)
496{
497 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
498
499
500 wait_for_ap_thread(st, true);
501 if (WARN_ON_ONCE((!cpu_online(cpu))))
502 return -ECANCELED;
503
504
505 stop_machine_unpark(cpu);
506 kthread_unpark(st->thread);
507
508
509
510
511
512
513
514
515 if (!cpu_smt_allowed(cpu))
516 return -ECANCELED;
517
518 if (st->target <= CPUHP_AP_ONLINE_IDLE)
519 return 0;
520
521 return cpuhp_kick_ap(st, st->target);
522}
523
524static int bringup_cpu(unsigned int cpu)
525{
526 struct task_struct *idle = idle_thread_get(cpu);
527 int ret;
528
529
530
531
532
533
534 irq_lock_sparse();
535
536
537 ret = __cpu_up(cpu, idle);
538 irq_unlock_sparse();
539 if (ret)
540 return ret;
541 return bringup_wait_for_ap(cpu);
542}
543
544
545
546
547
548static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
549{
550 for (st->state--; st->state > st->target; st->state--)
551 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
552}
553
554static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
555 enum cpuhp_state target)
556{
557 enum cpuhp_state prev_state = st->state;
558 int ret = 0;
559
560 while (st->state < target) {
561 st->state++;
562 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
563 if (ret) {
564 st->target = prev_state;
565 undo_cpu_up(cpu, st);
566 break;
567 }
568 }
569 return ret;
570}
571
572
573
574
575static void cpuhp_create(unsigned int cpu)
576{
577 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
578
579 init_completion(&st->done_up);
580 init_completion(&st->done_down);
581}
582
583static int cpuhp_should_run(unsigned int cpu)
584{
585 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
586
587 return st->should_run;
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604static void cpuhp_thread_fun(unsigned int cpu)
605{
606 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
607 bool bringup = st->bringup;
608 enum cpuhp_state state;
609
610 if (WARN_ON_ONCE(!st->should_run))
611 return;
612
613
614
615
616
617 smp_mb();
618
619 cpuhp_lock_acquire(bringup);
620
621 if (st->single) {
622 state = st->cb_state;
623 st->should_run = false;
624 } else {
625 if (bringup) {
626 st->state++;
627 state = st->state;
628 st->should_run = (st->state < st->target);
629 WARN_ON_ONCE(st->state > st->target);
630 } else {
631 state = st->state;
632 st->state--;
633 st->should_run = (st->state > st->target);
634 WARN_ON_ONCE(st->state < st->target);
635 }
636 }
637
638 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
639
640 if (cpuhp_is_atomic_state(state)) {
641 local_irq_disable();
642 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
643 local_irq_enable();
644
645
646
647
648 WARN_ON_ONCE(st->result);
649 } else {
650 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
651 }
652
653 if (st->result) {
654
655
656
657
658
659 WARN_ON_ONCE(st->rollback);
660 st->should_run = false;
661 }
662
663 cpuhp_lock_release(bringup);
664
665 if (!st->should_run)
666 complete_ap_thread(st, bringup);
667}
668
669
670static int
671cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
672 struct hlist_node *node)
673{
674 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
675 int ret;
676
677 if (!cpu_online(cpu))
678 return 0;
679
680 cpuhp_lock_acquire(false);
681 cpuhp_lock_release(false);
682
683 cpuhp_lock_acquire(true);
684 cpuhp_lock_release(true);
685
686
687
688
689
690 if (!st->thread)
691 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
692
693 st->rollback = false;
694 st->last = NULL;
695
696 st->node = node;
697 st->bringup = bringup;
698 st->cb_state = state;
699 st->single = true;
700
701 __cpuhp_kick_ap(st);
702
703
704
705
706 if ((ret = st->result) && st->last) {
707 st->rollback = true;
708 st->bringup = !bringup;
709
710 __cpuhp_kick_ap(st);
711 }
712
713
714
715
716
717 st->node = st->last = NULL;
718 return ret;
719}
720
721static int cpuhp_kick_ap_work(unsigned int cpu)
722{
723 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
724 enum cpuhp_state prev_state = st->state;
725 int ret;
726
727 cpuhp_lock_acquire(false);
728 cpuhp_lock_release(false);
729
730 cpuhp_lock_acquire(true);
731 cpuhp_lock_release(true);
732
733 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
734 ret = cpuhp_kick_ap(st, st->target);
735 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
736
737 return ret;
738}
739
740static struct smp_hotplug_thread cpuhp_threads = {
741 .store = &cpuhp_state.thread,
742 .create = &cpuhp_create,
743 .thread_should_run = cpuhp_should_run,
744 .thread_fn = cpuhp_thread_fun,
745 .thread_comm = "cpuhp/%u",
746 .selfparking = true,
747};
748
749void __init cpuhp_threads_init(void)
750{
751 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
752 kthread_unpark(this_cpu_read(cpuhp_state.thread));
753}
754
755#ifdef CONFIG_HOTPLUG_CPU
756
757
758
759
760
761
762
763
764
765
766
767
768void clear_tasks_mm_cpumask(int cpu)
769{
770 struct task_struct *p;
771
772
773
774
775
776
777
778
779 WARN_ON(cpu_online(cpu));
780 rcu_read_lock();
781 for_each_process(p) {
782 struct task_struct *t;
783
784
785
786
787
788 t = find_lock_task_mm(p);
789 if (!t)
790 continue;
791 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
792 task_unlock(t);
793 }
794 rcu_read_unlock();
795}
796
797
798static int take_cpu_down(void *_param)
799{
800 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
801 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
802 int err, cpu = smp_processor_id();
803 int ret;
804
805
806 err = __cpu_disable();
807 if (err < 0)
808 return err;
809
810
811
812
813
814 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
815 st->state--;
816
817 for (; st->state > target; st->state--) {
818 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
819
820
821
822 WARN_ON_ONCE(ret);
823 }
824
825
826 tick_handover_do_timer();
827
828 stop_machine_park(cpu);
829 return 0;
830}
831
832static int takedown_cpu(unsigned int cpu)
833{
834 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
835 int err;
836
837
838 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
839
840
841
842
843
844 irq_lock_sparse();
845
846
847
848
849 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
850 if (err) {
851
852 irq_unlock_sparse();
853
854 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
855 return err;
856 }
857 BUG_ON(cpu_online(cpu));
858
859
860
861
862
863
864
865
866 wait_for_ap_thread(st, false);
867 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
868
869
870 irq_unlock_sparse();
871
872 hotplug_cpu__broadcast_tick_pull(cpu);
873
874 __cpu_die(cpu);
875
876 tick_cleanup_dead_cpu(cpu);
877 rcutree_migrate_callbacks(cpu);
878 return 0;
879}
880
881static void cpuhp_complete_idle_dead(void *arg)
882{
883 struct cpuhp_cpu_state *st = arg;
884
885 complete_ap_thread(st, false);
886}
887
888void cpuhp_report_idle_dead(void)
889{
890 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
891
892 BUG_ON(st->state != CPUHP_AP_OFFLINE);
893 rcu_report_dead(smp_processor_id());
894 st->state = CPUHP_AP_IDLE_DEAD;
895
896
897
898
899 smp_call_function_single(cpumask_first(cpu_online_mask),
900 cpuhp_complete_idle_dead, st, 0);
901}
902
903static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
904{
905 for (st->state++; st->state < st->target; st->state++)
906 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
907}
908
909static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
910 enum cpuhp_state target)
911{
912 enum cpuhp_state prev_state = st->state;
913 int ret = 0;
914
915 for (; st->state > target; st->state--) {
916 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
917 if (ret) {
918 st->target = prev_state;
919 if (st->state < prev_state)
920 undo_cpu_down(cpu, st);
921 break;
922 }
923 }
924 return ret;
925}
926
927
928static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
929 enum cpuhp_state target)
930{
931 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
932 int prev_state, ret = 0;
933
934 if (num_online_cpus() == 1)
935 return -EBUSY;
936
937 if (!cpu_present(cpu))
938 return -EINVAL;
939
940 cpus_write_lock();
941
942 cpuhp_tasks_frozen = tasks_frozen;
943
944 prev_state = cpuhp_set_state(st, target);
945
946
947
948
949 if (st->state > CPUHP_TEARDOWN_CPU) {
950 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
951 ret = cpuhp_kick_ap_work(cpu);
952
953
954
955
956 if (ret)
957 goto out;
958
959
960
961
962
963 if (st->state > CPUHP_TEARDOWN_CPU)
964 goto out;
965
966 st->target = target;
967 }
968
969
970
971
972 ret = cpuhp_down_callbacks(cpu, st, target);
973 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
974 cpuhp_reset_state(st, prev_state);
975 __cpuhp_kick_ap(st);
976 }
977
978out:
979 cpus_write_unlock();
980
981
982
983
984 lockup_detector_cleanup();
985 return ret;
986}
987
988static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
989{
990 if (cpu_hotplug_disabled)
991 return -EBUSY;
992 return _cpu_down(cpu, 0, target);
993}
994
995static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
996{
997 int err;
998
999 cpu_maps_update_begin();
1000 err = cpu_down_maps_locked(cpu, target);
1001 cpu_maps_update_done();
1002 return err;
1003}
1004
1005int cpu_down(unsigned int cpu)
1006{
1007 return do_cpu_down(cpu, CPUHP_OFFLINE);
1008}
1009EXPORT_SYMBOL(cpu_down);
1010
1011#else
1012#define takedown_cpu NULL
1013#endif
1014
1015
1016
1017
1018
1019
1020
1021
1022void notify_cpu_starting(unsigned int cpu)
1023{
1024 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1025 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1026 int ret;
1027
1028 rcu_cpu_starting(cpu);
1029 st->booted_once = true;
1030 while (st->state < target) {
1031 st->state++;
1032 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1033
1034
1035
1036 WARN_ON_ONCE(ret);
1037 }
1038}
1039
1040
1041
1042
1043
1044
1045void cpuhp_online_idle(enum cpuhp_state state)
1046{
1047 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1048
1049
1050 if (state != CPUHP_AP_ONLINE_IDLE)
1051 return;
1052
1053 st->state = CPUHP_AP_ONLINE_IDLE;
1054 complete_ap_thread(st, true);
1055}
1056
1057
1058static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1059{
1060 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1061 struct task_struct *idle;
1062 int ret = 0;
1063
1064 cpus_write_lock();
1065
1066 if (!cpu_present(cpu)) {
1067 ret = -EINVAL;
1068 goto out;
1069 }
1070
1071
1072
1073
1074
1075 if (st->state >= target)
1076 goto out;
1077
1078 if (st->state == CPUHP_OFFLINE) {
1079
1080 idle = idle_thread_get(cpu);
1081 if (IS_ERR(idle)) {
1082 ret = PTR_ERR(idle);
1083 goto out;
1084 }
1085 }
1086
1087 cpuhp_tasks_frozen = tasks_frozen;
1088
1089 cpuhp_set_state(st, target);
1090
1091
1092
1093
1094 if (st->state > CPUHP_BRINGUP_CPU) {
1095 ret = cpuhp_kick_ap_work(cpu);
1096
1097
1098
1099
1100 if (ret)
1101 goto out;
1102 }
1103
1104
1105
1106
1107
1108
1109 target = min((int)target, CPUHP_BRINGUP_CPU);
1110 ret = cpuhp_up_callbacks(cpu, st, target);
1111out:
1112 cpus_write_unlock();
1113 return ret;
1114}
1115
1116static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1117{
1118 int err = 0;
1119
1120 if (!cpu_possible(cpu)) {
1121 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1122 cpu);
1123#if defined(CONFIG_IA64)
1124 pr_err("please check additional_cpus= boot parameter\n");
1125#endif
1126 return -EINVAL;
1127 }
1128
1129 err = try_online_node(cpu_to_node(cpu));
1130 if (err)
1131 return err;
1132
1133 cpu_maps_update_begin();
1134
1135 if (cpu_hotplug_disabled) {
1136 err = -EBUSY;
1137 goto out;
1138 }
1139 if (!cpu_smt_allowed(cpu)) {
1140 err = -EPERM;
1141 goto out;
1142 }
1143
1144 err = _cpu_up(cpu, 0, target);
1145out:
1146 cpu_maps_update_done();
1147 return err;
1148}
1149
1150int cpu_up(unsigned int cpu)
1151{
1152 return do_cpu_up(cpu, CPUHP_ONLINE);
1153}
1154EXPORT_SYMBOL_GPL(cpu_up);
1155
1156#ifdef CONFIG_PM_SLEEP_SMP
1157static cpumask_var_t frozen_cpus;
1158
1159int freeze_secondary_cpus(int primary)
1160{
1161 int cpu, error = 0;
1162
1163 cpu_maps_update_begin();
1164 if (!cpu_online(primary))
1165 primary = cpumask_first(cpu_online_mask);
1166
1167
1168
1169
1170 cpumask_clear(frozen_cpus);
1171
1172 pr_info("Disabling non-boot CPUs ...\n");
1173 for_each_online_cpu(cpu) {
1174 if (cpu == primary)
1175 continue;
1176 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1177 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1178 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1179 if (!error)
1180 cpumask_set_cpu(cpu, frozen_cpus);
1181 else {
1182 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1183 break;
1184 }
1185 }
1186
1187 if (!error)
1188 BUG_ON(num_online_cpus() > 1);
1189 else
1190 pr_err("Non-boot CPUs are not disabled\n");
1191
1192
1193
1194
1195
1196
1197 cpu_hotplug_disabled++;
1198
1199 cpu_maps_update_done();
1200 return error;
1201}
1202
1203void __weak arch_enable_nonboot_cpus_begin(void)
1204{
1205}
1206
1207void __weak arch_enable_nonboot_cpus_end(void)
1208{
1209}
1210
1211void enable_nonboot_cpus(void)
1212{
1213 int cpu, error;
1214
1215
1216 cpu_maps_update_begin();
1217 __cpu_hotplug_enable();
1218 if (cpumask_empty(frozen_cpus))
1219 goto out;
1220
1221 pr_info("Enabling non-boot CPUs ...\n");
1222
1223 arch_enable_nonboot_cpus_begin();
1224
1225 for_each_cpu(cpu, frozen_cpus) {
1226 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1227 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1228 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1229 if (!error) {
1230 pr_info("CPU%d is up\n", cpu);
1231 continue;
1232 }
1233 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1234 }
1235
1236 arch_enable_nonboot_cpus_end();
1237
1238 cpumask_clear(frozen_cpus);
1239out:
1240 cpu_maps_update_done();
1241}
1242
1243static int __init alloc_frozen_cpus(void)
1244{
1245 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1246 return -ENOMEM;
1247 return 0;
1248}
1249core_initcall(alloc_frozen_cpus);
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static int
1263cpu_hotplug_pm_callback(struct notifier_block *nb,
1264 unsigned long action, void *ptr)
1265{
1266 switch (action) {
1267
1268 case PM_SUSPEND_PREPARE:
1269 case PM_HIBERNATION_PREPARE:
1270 cpu_hotplug_disable();
1271 break;
1272
1273 case PM_POST_SUSPEND:
1274 case PM_POST_HIBERNATION:
1275 cpu_hotplug_enable();
1276 break;
1277
1278 default:
1279 return NOTIFY_DONE;
1280 }
1281
1282 return NOTIFY_OK;
1283}
1284
1285
1286static int __init cpu_hotplug_pm_sync_init(void)
1287{
1288
1289
1290
1291
1292
1293 pm_notifier(cpu_hotplug_pm_callback, 0);
1294 return 0;
1295}
1296core_initcall(cpu_hotplug_pm_sync_init);
1297
1298#endif
1299
1300int __boot_cpu_id;
1301
1302#endif
1303
1304
1305static struct cpuhp_step cpuhp_hp_states[] = {
1306 [CPUHP_OFFLINE] = {
1307 .name = "offline",
1308 .startup.single = NULL,
1309 .teardown.single = NULL,
1310 },
1311#ifdef CONFIG_SMP
1312 [CPUHP_CREATE_THREADS]= {
1313 .name = "threads:prepare",
1314 .startup.single = smpboot_create_threads,
1315 .teardown.single = NULL,
1316 .cant_stop = true,
1317 },
1318 [CPUHP_PERF_PREPARE] = {
1319 .name = "perf:prepare",
1320 .startup.single = perf_event_init_cpu,
1321 .teardown.single = perf_event_exit_cpu,
1322 },
1323 [CPUHP_WORKQUEUE_PREP] = {
1324 .name = "workqueue:prepare",
1325 .startup.single = workqueue_prepare_cpu,
1326 .teardown.single = NULL,
1327 },
1328 [CPUHP_HRTIMERS_PREPARE] = {
1329 .name = "hrtimers:prepare",
1330 .startup.single = hrtimers_prepare_cpu,
1331 .teardown.single = hrtimers_dead_cpu,
1332 },
1333 [CPUHP_SMPCFD_PREPARE] = {
1334 .name = "smpcfd:prepare",
1335 .startup.single = smpcfd_prepare_cpu,
1336 .teardown.single = smpcfd_dead_cpu,
1337 },
1338 [CPUHP_RELAY_PREPARE] = {
1339 .name = "relay:prepare",
1340 .startup.single = relay_prepare_cpu,
1341 .teardown.single = NULL,
1342 },
1343 [CPUHP_SLAB_PREPARE] = {
1344 .name = "slab:prepare",
1345 .startup.single = slab_prepare_cpu,
1346 .teardown.single = slab_dead_cpu,
1347 },
1348 [CPUHP_RCUTREE_PREP] = {
1349 .name = "RCU/tree:prepare",
1350 .startup.single = rcutree_prepare_cpu,
1351 .teardown.single = rcutree_dead_cpu,
1352 },
1353
1354
1355
1356
1357
1358 [CPUHP_TIMERS_PREPARE] = {
1359 .name = "timers:prepare",
1360 .startup.single = timers_prepare_cpu,
1361 .teardown.single = timers_dead_cpu,
1362 },
1363
1364 [CPUHP_BRINGUP_CPU] = {
1365 .name = "cpu:bringup",
1366 .startup.single = bringup_cpu,
1367 .teardown.single = NULL,
1368 .cant_stop = true,
1369 },
1370
1371 [CPUHP_AP_IDLE_DEAD] = {
1372 .name = "idle:dead",
1373 },
1374
1375
1376
1377
1378 [CPUHP_AP_OFFLINE] = {
1379 .name = "ap:offline",
1380 .cant_stop = true,
1381 },
1382
1383 [CPUHP_AP_SCHED_STARTING] = {
1384 .name = "sched:starting",
1385 .startup.single = sched_cpu_starting,
1386 .teardown.single = sched_cpu_dying,
1387 },
1388 [CPUHP_AP_RCUTREE_DYING] = {
1389 .name = "RCU/tree:dying",
1390 .startup.single = NULL,
1391 .teardown.single = rcutree_dying_cpu,
1392 },
1393 [CPUHP_AP_SMPCFD_DYING] = {
1394 .name = "smpcfd:dying",
1395 .startup.single = NULL,
1396 .teardown.single = smpcfd_dying_cpu,
1397 },
1398
1399
1400 [CPUHP_AP_ONLINE] = {
1401 .name = "ap:online",
1402 },
1403
1404
1405
1406
1407 [CPUHP_TEARDOWN_CPU] = {
1408 .name = "cpu:teardown",
1409 .startup.single = NULL,
1410 .teardown.single = takedown_cpu,
1411 .cant_stop = true,
1412 },
1413
1414 [CPUHP_AP_SMPBOOT_THREADS] = {
1415 .name = "smpboot/threads:online",
1416 .startup.single = smpboot_unpark_threads,
1417 .teardown.single = smpboot_park_threads,
1418 },
1419 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1420 .name = "irq/affinity:online",
1421 .startup.single = irq_affinity_online_cpu,
1422 .teardown.single = NULL,
1423 },
1424 [CPUHP_AP_PERF_ONLINE] = {
1425 .name = "perf:online",
1426 .startup.single = perf_event_init_cpu,
1427 .teardown.single = perf_event_exit_cpu,
1428 },
1429 [CPUHP_AP_WATCHDOG_ONLINE] = {
1430 .name = "lockup_detector:online",
1431 .startup.single = lockup_detector_online_cpu,
1432 .teardown.single = lockup_detector_offline_cpu,
1433 },
1434 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1435 .name = "workqueue:online",
1436 .startup.single = workqueue_online_cpu,
1437 .teardown.single = workqueue_offline_cpu,
1438 },
1439 [CPUHP_AP_RCUTREE_ONLINE] = {
1440 .name = "RCU/tree:online",
1441 .startup.single = rcutree_online_cpu,
1442 .teardown.single = rcutree_offline_cpu,
1443 },
1444#endif
1445
1446
1447
1448
1449#ifdef CONFIG_SMP
1450
1451 [CPUHP_AP_ACTIVE] = {
1452 .name = "sched:active",
1453 .startup.single = sched_cpu_activate,
1454 .teardown.single = sched_cpu_deactivate,
1455 },
1456#endif
1457
1458
1459 [CPUHP_ONLINE] = {
1460 .name = "online",
1461 .startup.single = NULL,
1462 .teardown.single = NULL,
1463 },
1464};
1465
1466
1467static int cpuhp_cb_check(enum cpuhp_state state)
1468{
1469 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1470 return -EINVAL;
1471 return 0;
1472}
1473
1474
1475
1476
1477
1478
1479static int cpuhp_reserve_state(enum cpuhp_state state)
1480{
1481 enum cpuhp_state i, end;
1482 struct cpuhp_step *step;
1483
1484 switch (state) {
1485 case CPUHP_AP_ONLINE_DYN:
1486 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1487 end = CPUHP_AP_ONLINE_DYN_END;
1488 break;
1489 case CPUHP_BP_PREPARE_DYN:
1490 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1491 end = CPUHP_BP_PREPARE_DYN_END;
1492 break;
1493 default:
1494 return -EINVAL;
1495 }
1496
1497 for (i = state; i <= end; i++, step++) {
1498 if (!step->name)
1499 return i;
1500 }
1501 WARN(1, "No more dynamic states available for CPU hotplug\n");
1502 return -ENOSPC;
1503}
1504
1505static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1506 int (*startup)(unsigned int cpu),
1507 int (*teardown)(unsigned int cpu),
1508 bool multi_instance)
1509{
1510
1511 struct cpuhp_step *sp;
1512 int ret = 0;
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1524 state == CPUHP_BP_PREPARE_DYN)) {
1525 ret = cpuhp_reserve_state(state);
1526 if (ret < 0)
1527 return ret;
1528 state = ret;
1529 }
1530 sp = cpuhp_get_step(state);
1531 if (name && sp->name)
1532 return -EBUSY;
1533
1534 sp->startup.single = startup;
1535 sp->teardown.single = teardown;
1536 sp->name = name;
1537 sp->multi_instance = multi_instance;
1538 INIT_HLIST_HEAD(&sp->list);
1539 return ret;
1540}
1541
1542static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1543{
1544 return cpuhp_get_step(state)->teardown.single;
1545}
1546
1547
1548
1549
1550
1551static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1552 struct hlist_node *node)
1553{
1554 struct cpuhp_step *sp = cpuhp_get_step(state);
1555 int ret;
1556
1557
1558
1559
1560
1561 if ((bringup && !sp->startup.single) ||
1562 (!bringup && !sp->teardown.single))
1563 return 0;
1564
1565
1566
1567
1568#ifdef CONFIG_SMP
1569 if (cpuhp_is_ap_state(state))
1570 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1571 else
1572 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1573#else
1574 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1575#endif
1576 BUG_ON(ret && !bringup);
1577 return ret;
1578}
1579
1580
1581
1582
1583
1584
1585static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1586 struct hlist_node *node)
1587{
1588 int cpu;
1589
1590
1591 for_each_present_cpu(cpu) {
1592 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1593 int cpustate = st->state;
1594
1595 if (cpu >= failedcpu)
1596 break;
1597
1598
1599 if (cpustate >= state)
1600 cpuhp_issue_call(cpu, state, false, node);
1601 }
1602}
1603
1604int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1605 struct hlist_node *node,
1606 bool invoke)
1607{
1608 struct cpuhp_step *sp;
1609 int cpu;
1610 int ret;
1611
1612 lockdep_assert_cpus_held();
1613
1614 sp = cpuhp_get_step(state);
1615 if (sp->multi_instance == false)
1616 return -EINVAL;
1617
1618 mutex_lock(&cpuhp_state_mutex);
1619
1620 if (!invoke || !sp->startup.multi)
1621 goto add_node;
1622
1623
1624
1625
1626
1627 for_each_present_cpu(cpu) {
1628 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1629 int cpustate = st->state;
1630
1631 if (cpustate < state)
1632 continue;
1633
1634 ret = cpuhp_issue_call(cpu, state, true, node);
1635 if (ret) {
1636 if (sp->teardown.multi)
1637 cpuhp_rollback_install(cpu, state, node);
1638 goto unlock;
1639 }
1640 }
1641add_node:
1642 ret = 0;
1643 hlist_add_head(node, &sp->list);
1644unlock:
1645 mutex_unlock(&cpuhp_state_mutex);
1646 return ret;
1647}
1648
1649int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1650 bool invoke)
1651{
1652 int ret;
1653
1654 cpus_read_lock();
1655 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1656 cpus_read_unlock();
1657 return ret;
1658}
1659EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1679 const char *name, bool invoke,
1680 int (*startup)(unsigned int cpu),
1681 int (*teardown)(unsigned int cpu),
1682 bool multi_instance)
1683{
1684 int cpu, ret = 0;
1685 bool dynstate;
1686
1687 lockdep_assert_cpus_held();
1688
1689 if (cpuhp_cb_check(state) || !name)
1690 return -EINVAL;
1691
1692 mutex_lock(&cpuhp_state_mutex);
1693
1694 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1695 multi_instance);
1696
1697 dynstate = state == CPUHP_AP_ONLINE_DYN;
1698 if (ret > 0 && dynstate) {
1699 state = ret;
1700 ret = 0;
1701 }
1702
1703 if (ret || !invoke || !startup)
1704 goto out;
1705
1706
1707
1708
1709
1710 for_each_present_cpu(cpu) {
1711 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1712 int cpustate = st->state;
1713
1714 if (cpustate < state)
1715 continue;
1716
1717 ret = cpuhp_issue_call(cpu, state, true, NULL);
1718 if (ret) {
1719 if (teardown)
1720 cpuhp_rollback_install(cpu, state, NULL);
1721 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1722 goto out;
1723 }
1724 }
1725out:
1726 mutex_unlock(&cpuhp_state_mutex);
1727
1728
1729
1730
1731 if (!ret && dynstate)
1732 return state;
1733 return ret;
1734}
1735EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1736
1737int __cpuhp_setup_state(enum cpuhp_state state,
1738 const char *name, bool invoke,
1739 int (*startup)(unsigned int cpu),
1740 int (*teardown)(unsigned int cpu),
1741 bool multi_instance)
1742{
1743 int ret;
1744
1745 cpus_read_lock();
1746 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1747 teardown, multi_instance);
1748 cpus_read_unlock();
1749 return ret;
1750}
1751EXPORT_SYMBOL(__cpuhp_setup_state);
1752
1753int __cpuhp_state_remove_instance(enum cpuhp_state state,
1754 struct hlist_node *node, bool invoke)
1755{
1756 struct cpuhp_step *sp = cpuhp_get_step(state);
1757 int cpu;
1758
1759 BUG_ON(cpuhp_cb_check(state));
1760
1761 if (!sp->multi_instance)
1762 return -EINVAL;
1763
1764 cpus_read_lock();
1765 mutex_lock(&cpuhp_state_mutex);
1766
1767 if (!invoke || !cpuhp_get_teardown_cb(state))
1768 goto remove;
1769
1770
1771
1772
1773
1774 for_each_present_cpu(cpu) {
1775 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1776 int cpustate = st->state;
1777
1778 if (cpustate >= state)
1779 cpuhp_issue_call(cpu, state, false, node);
1780 }
1781
1782remove:
1783 hlist_del(node);
1784 mutex_unlock(&cpuhp_state_mutex);
1785 cpus_read_unlock();
1786
1787 return 0;
1788}
1789EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1802{
1803 struct cpuhp_step *sp = cpuhp_get_step(state);
1804 int cpu;
1805
1806 BUG_ON(cpuhp_cb_check(state));
1807
1808 lockdep_assert_cpus_held();
1809
1810 mutex_lock(&cpuhp_state_mutex);
1811 if (sp->multi_instance) {
1812 WARN(!hlist_empty(&sp->list),
1813 "Error: Removing state %d which has instances left.\n",
1814 state);
1815 goto remove;
1816 }
1817
1818 if (!invoke || !cpuhp_get_teardown_cb(state))
1819 goto remove;
1820
1821
1822
1823
1824
1825
1826 for_each_present_cpu(cpu) {
1827 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1828 int cpustate = st->state;
1829
1830 if (cpustate >= state)
1831 cpuhp_issue_call(cpu, state, false, NULL);
1832 }
1833remove:
1834 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1835 mutex_unlock(&cpuhp_state_mutex);
1836}
1837EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1838
1839void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1840{
1841 cpus_read_lock();
1842 __cpuhp_remove_state_cpuslocked(state, invoke);
1843 cpus_read_unlock();
1844}
1845EXPORT_SYMBOL(__cpuhp_remove_state);
1846
1847#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1848static ssize_t show_cpuhp_state(struct device *dev,
1849 struct device_attribute *attr, char *buf)
1850{
1851 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1852
1853 return sprintf(buf, "%d\n", st->state);
1854}
1855static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1856
1857static ssize_t write_cpuhp_target(struct device *dev,
1858 struct device_attribute *attr,
1859 const char *buf, size_t count)
1860{
1861 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1862 struct cpuhp_step *sp;
1863 int target, ret;
1864
1865 ret = kstrtoint(buf, 10, &target);
1866 if (ret)
1867 return ret;
1868
1869#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1870 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1871 return -EINVAL;
1872#else
1873 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1874 return -EINVAL;
1875#endif
1876
1877 ret = lock_device_hotplug_sysfs();
1878 if (ret)
1879 return ret;
1880
1881 mutex_lock(&cpuhp_state_mutex);
1882 sp = cpuhp_get_step(target);
1883 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1884 mutex_unlock(&cpuhp_state_mutex);
1885 if (ret)
1886 goto out;
1887
1888 if (st->state < target)
1889 ret = do_cpu_up(dev->id, target);
1890 else
1891 ret = do_cpu_down(dev->id, target);
1892out:
1893 unlock_device_hotplug();
1894 return ret ? ret : count;
1895}
1896
1897static ssize_t show_cpuhp_target(struct device *dev,
1898 struct device_attribute *attr, char *buf)
1899{
1900 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1901
1902 return sprintf(buf, "%d\n", st->target);
1903}
1904static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1905
1906
1907static ssize_t write_cpuhp_fail(struct device *dev,
1908 struct device_attribute *attr,
1909 const char *buf, size_t count)
1910{
1911 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1912 struct cpuhp_step *sp;
1913 int fail, ret;
1914
1915 ret = kstrtoint(buf, 10, &fail);
1916 if (ret)
1917 return ret;
1918
1919
1920
1921
1922 if (cpuhp_is_atomic_state(fail))
1923 return -EINVAL;
1924
1925
1926
1927
1928 mutex_lock(&cpuhp_state_mutex);
1929 sp = cpuhp_get_step(fail);
1930 if (!sp->startup.single && !sp->teardown.single)
1931 ret = -EINVAL;
1932 mutex_unlock(&cpuhp_state_mutex);
1933 if (ret)
1934 return ret;
1935
1936 st->fail = fail;
1937
1938 return count;
1939}
1940
1941static ssize_t show_cpuhp_fail(struct device *dev,
1942 struct device_attribute *attr, char *buf)
1943{
1944 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1945
1946 return sprintf(buf, "%d\n", st->fail);
1947}
1948
1949static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
1950
1951static struct attribute *cpuhp_cpu_attrs[] = {
1952 &dev_attr_state.attr,
1953 &dev_attr_target.attr,
1954 &dev_attr_fail.attr,
1955 NULL
1956};
1957
1958static const struct attribute_group cpuhp_cpu_attr_group = {
1959 .attrs = cpuhp_cpu_attrs,
1960 .name = "hotplug",
1961 NULL
1962};
1963
1964static ssize_t show_cpuhp_states(struct device *dev,
1965 struct device_attribute *attr, char *buf)
1966{
1967 ssize_t cur, res = 0;
1968 int i;
1969
1970 mutex_lock(&cpuhp_state_mutex);
1971 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1972 struct cpuhp_step *sp = cpuhp_get_step(i);
1973
1974 if (sp->name) {
1975 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1976 buf += cur;
1977 res += cur;
1978 }
1979 }
1980 mutex_unlock(&cpuhp_state_mutex);
1981 return res;
1982}
1983static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1984
1985static struct attribute *cpuhp_cpu_root_attrs[] = {
1986 &dev_attr_states.attr,
1987 NULL
1988};
1989
1990static const struct attribute_group cpuhp_cpu_root_attr_group = {
1991 .attrs = cpuhp_cpu_root_attrs,
1992 .name = "hotplug",
1993 NULL
1994};
1995
1996#ifdef CONFIG_HOTPLUG_SMT
1997
1998static const char *smt_states[] = {
1999 [CPU_SMT_ENABLED] = "on",
2000 [CPU_SMT_DISABLED] = "off",
2001 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2002 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2003};
2004
2005static ssize_t
2006show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2007{
2008 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2009}
2010
2011static void cpuhp_offline_cpu_device(unsigned int cpu)
2012{
2013 struct device *dev = get_cpu_device(cpu);
2014
2015 dev->offline = true;
2016
2017 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2018}
2019
2020static void cpuhp_online_cpu_device(unsigned int cpu)
2021{
2022 struct device *dev = get_cpu_device(cpu);
2023
2024 dev->offline = false;
2025
2026 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2027}
2028
2029static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2030{
2031 int cpu, ret = 0;
2032
2033 cpu_maps_update_begin();
2034 for_each_online_cpu(cpu) {
2035 if (topology_is_primary_thread(cpu))
2036 continue;
2037 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2038 if (ret)
2039 break;
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053 cpuhp_offline_cpu_device(cpu);
2054 }
2055 if (!ret)
2056 cpu_smt_control = ctrlval;
2057 cpu_maps_update_done();
2058 return ret;
2059}
2060
2061static int cpuhp_smt_enable(void)
2062{
2063 int cpu, ret = 0;
2064
2065 cpu_maps_update_begin();
2066 cpu_smt_control = CPU_SMT_ENABLED;
2067 for_each_present_cpu(cpu) {
2068
2069 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2070 continue;
2071 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2072 if (ret)
2073 break;
2074
2075 cpuhp_online_cpu_device(cpu);
2076 }
2077 cpu_maps_update_done();
2078 return ret;
2079}
2080
2081static ssize_t
2082store_smt_control(struct device *dev, struct device_attribute *attr,
2083 const char *buf, size_t count)
2084{
2085 int ctrlval, ret;
2086
2087 if (sysfs_streq(buf, "on"))
2088 ctrlval = CPU_SMT_ENABLED;
2089 else if (sysfs_streq(buf, "off"))
2090 ctrlval = CPU_SMT_DISABLED;
2091 else if (sysfs_streq(buf, "forceoff"))
2092 ctrlval = CPU_SMT_FORCE_DISABLED;
2093 else
2094 return -EINVAL;
2095
2096 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2097 return -EPERM;
2098
2099 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2100 return -ENODEV;
2101
2102 ret = lock_device_hotplug_sysfs();
2103 if (ret)
2104 return ret;
2105
2106 if (ctrlval != cpu_smt_control) {
2107 switch (ctrlval) {
2108 case CPU_SMT_ENABLED:
2109 ret = cpuhp_smt_enable();
2110 break;
2111 case CPU_SMT_DISABLED:
2112 case CPU_SMT_FORCE_DISABLED:
2113 ret = cpuhp_smt_disable(ctrlval);
2114 break;
2115 }
2116 }
2117
2118 unlock_device_hotplug();
2119 return ret ? ret : count;
2120}
2121static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2122
2123static ssize_t
2124show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2125{
2126 bool active = topology_max_smt_threads() > 1;
2127
2128 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2129}
2130static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2131
2132static struct attribute *cpuhp_smt_attrs[] = {
2133 &dev_attr_control.attr,
2134 &dev_attr_active.attr,
2135 NULL
2136};
2137
2138static const struct attribute_group cpuhp_smt_attr_group = {
2139 .attrs = cpuhp_smt_attrs,
2140 .name = "smt",
2141 NULL
2142};
2143
2144static int __init cpu_smt_state_init(void)
2145{
2146 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2147 &cpuhp_smt_attr_group);
2148}
2149
2150#else
2151static inline int cpu_smt_state_init(void) { return 0; }
2152#endif
2153
2154static int __init cpuhp_sysfs_init(void)
2155{
2156 int cpu, ret;
2157
2158 ret = cpu_smt_state_init();
2159 if (ret)
2160 return ret;
2161
2162 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2163 &cpuhp_cpu_root_attr_group);
2164 if (ret)
2165 return ret;
2166
2167 for_each_possible_cpu(cpu) {
2168 struct device *dev = get_cpu_device(cpu);
2169
2170 if (!dev)
2171 continue;
2172 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2173 if (ret)
2174 return ret;
2175 }
2176 return 0;
2177}
2178device_initcall(cpuhp_sysfs_init);
2179#endif
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2191#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2192#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2193#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2194
2195const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2196
2197 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2198 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2199#if BITS_PER_LONG > 32
2200 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2201 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2202#endif
2203};
2204EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2205
2206const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2207EXPORT_SYMBOL(cpu_all_bits);
2208
2209#ifdef CONFIG_INIT_ALL_POSSIBLE
2210struct cpumask __cpu_possible_mask __read_mostly
2211 = {CPU_BITS_ALL};
2212#else
2213struct cpumask __cpu_possible_mask __read_mostly;
2214#endif
2215EXPORT_SYMBOL(__cpu_possible_mask);
2216
2217struct cpumask __cpu_online_mask __read_mostly;
2218EXPORT_SYMBOL(__cpu_online_mask);
2219
2220struct cpumask __cpu_present_mask __read_mostly;
2221EXPORT_SYMBOL(__cpu_present_mask);
2222
2223struct cpumask __cpu_active_mask __read_mostly;
2224EXPORT_SYMBOL(__cpu_active_mask);
2225
2226void init_cpu_present(const struct cpumask *src)
2227{
2228 cpumask_copy(&__cpu_present_mask, src);
2229}
2230
2231void init_cpu_possible(const struct cpumask *src)
2232{
2233 cpumask_copy(&__cpu_possible_mask, src);
2234}
2235
2236void init_cpu_online(const struct cpumask *src)
2237{
2238 cpumask_copy(&__cpu_online_mask, src);
2239}
2240
2241
2242
2243
2244void __init boot_cpu_init(void)
2245{
2246 int cpu = smp_processor_id();
2247
2248
2249 set_cpu_online(cpu, true);
2250 set_cpu_active(cpu, true);
2251 set_cpu_present(cpu, true);
2252 set_cpu_possible(cpu, true);
2253
2254#ifdef CONFIG_SMP
2255 __boot_cpu_id = cpu;
2256#endif
2257}
2258
2259
2260
2261
2262void __init boot_cpu_hotplug_init(void)
2263{
2264#ifdef CONFIG_SMP
2265 this_cpu_write(cpuhp_state.booted_once, true);
2266#endif
2267 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2268}
2269