1
2
3
4
5
6#include <linux/sched/mm.h>
7#include <linux/proc_fs.h>
8#include <linux/smp.h>
9#include <linux/init.h>
10#include <linux/notifier.h>
11#include <linux/sched/signal.h>
12#include <linux/sched/hotplug.h>
13#include <linux/sched/isolation.h>
14#include <linux/sched/task.h>
15#include <linux/sched/smt.h>
16#include <linux/unistd.h>
17#include <linux/cpu.h>
18#include <linux/oom.h>
19#include <linux/rcupdate.h>
20#include <linux/export.h>
21#include <linux/bug.h>
22#include <linux/kthread.h>
23#include <linux/stop_machine.h>
24#include <linux/mutex.h>
25#include <linux/gfp.h>
26#include <linux/suspend.h>
27#include <linux/lockdep.h>
28#include <linux/tick.h>
29#include <linux/irq.h>
30#include <linux/nmi.h>
31#include <linux/smpboot.h>
32#include <linux/relay.h>
33#include <linux/slab.h>
34#include <linux/percpu-rwsem.h>
35#include <linux/cpuset.h>
36
37#include <trace/events/power.h>
38#define CREATE_TRACE_POINTS
39#include <trace/events/cpuhp.h>
40
41#include "smpboot.h"
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57struct cpuhp_cpu_state {
58 enum cpuhp_state state;
59 enum cpuhp_state target;
60 enum cpuhp_state fail;
61#ifdef CONFIG_SMP
62 struct task_struct *thread;
63 bool should_run;
64 bool rollback;
65 bool single;
66 bool bringup;
67 int cpu;
68 struct hlist_node *node;
69 struct hlist_node *last;
70 enum cpuhp_state cb_state;
71 int result;
72 struct completion done_up;
73 struct completion done_down;
74#endif
75};
76
77static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
78 .fail = CPUHP_INVALID,
79};
80
81#ifdef CONFIG_SMP
82cpumask_t cpus_booted_once_mask;
83#endif
84
85#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
86static struct lockdep_map cpuhp_state_up_map =
87 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
88static struct lockdep_map cpuhp_state_down_map =
89 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
90
91
92static inline void cpuhp_lock_acquire(bool bringup)
93{
94 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
95}
96
97static inline void cpuhp_lock_release(bool bringup)
98{
99 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
100}
101#else
102
103static inline void cpuhp_lock_acquire(bool bringup) { }
104static inline void cpuhp_lock_release(bool bringup) { }
105
106#endif
107
108
109
110
111
112
113
114
115struct cpuhp_step {
116 const char *name;
117 union {
118 int (*single)(unsigned int cpu);
119 int (*multi)(unsigned int cpu,
120 struct hlist_node *node);
121 } startup;
122 union {
123 int (*single)(unsigned int cpu);
124 int (*multi)(unsigned int cpu,
125 struct hlist_node *node);
126 } teardown;
127 struct hlist_head list;
128 bool cant_stop;
129 bool multi_instance;
130};
131
132static DEFINE_MUTEX(cpuhp_state_mutex);
133static struct cpuhp_step cpuhp_hp_states[];
134
135static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
136{
137 return cpuhp_hp_states + state;
138}
139
140static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
141{
142 return bringup ? !step->startup.single : !step->teardown.single;
143}
144
145
146
147
148
149
150
151
152
153
154
155static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
156 bool bringup, struct hlist_node *node,
157 struct hlist_node **lastp)
158{
159 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
160 struct cpuhp_step *step = cpuhp_get_step(state);
161 int (*cbm)(unsigned int cpu, struct hlist_node *node);
162 int (*cb)(unsigned int cpu);
163 int ret, cnt;
164
165 if (st->fail == state) {
166 st->fail = CPUHP_INVALID;
167 return -EAGAIN;
168 }
169
170 if (cpuhp_step_empty(bringup, step)) {
171 WARN_ON_ONCE(1);
172 return 0;
173 }
174
175 if (!step->multi_instance) {
176 WARN_ON_ONCE(lastp && *lastp);
177 cb = bringup ? step->startup.single : step->teardown.single;
178
179 trace_cpuhp_enter(cpu, st->target, state, cb);
180 ret = cb(cpu);
181 trace_cpuhp_exit(cpu, st->state, state, ret);
182 return ret;
183 }
184 cbm = bringup ? step->startup.multi : step->teardown.multi;
185
186
187 if (node) {
188 WARN_ON_ONCE(lastp && *lastp);
189 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
190 ret = cbm(cpu, node);
191 trace_cpuhp_exit(cpu, st->state, state, ret);
192 return ret;
193 }
194
195
196 cnt = 0;
197 hlist_for_each(node, &step->list) {
198 if (lastp && node == *lastp)
199 break;
200
201 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
202 ret = cbm(cpu, node);
203 trace_cpuhp_exit(cpu, st->state, state, ret);
204 if (ret) {
205 if (!lastp)
206 goto err;
207
208 *lastp = node;
209 return ret;
210 }
211 cnt++;
212 }
213 if (lastp)
214 *lastp = NULL;
215 return 0;
216err:
217
218 cbm = !bringup ? step->startup.multi : step->teardown.multi;
219 if (!cbm)
220 return ret;
221
222 hlist_for_each(node, &step->list) {
223 if (!cnt--)
224 break;
225
226 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
227 ret = cbm(cpu, node);
228 trace_cpuhp_exit(cpu, st->state, state, ret);
229
230
231
232 WARN_ON_ONCE(ret);
233 }
234 return ret;
235}
236
237#ifdef CONFIG_SMP
238static bool cpuhp_is_ap_state(enum cpuhp_state state)
239{
240
241
242
243
244 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
245}
246
247static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
248{
249 struct completion *done = bringup ? &st->done_up : &st->done_down;
250 wait_for_completion(done);
251}
252
253static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
254{
255 struct completion *done = bringup ? &st->done_up : &st->done_down;
256 complete(done);
257}
258
259
260
261
262static bool cpuhp_is_atomic_state(enum cpuhp_state state)
263{
264 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
265}
266
267
268static DEFINE_MUTEX(cpu_add_remove_lock);
269bool cpuhp_tasks_frozen;
270EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
271
272
273
274
275
276void cpu_maps_update_begin(void)
277{
278 mutex_lock(&cpu_add_remove_lock);
279}
280
281void cpu_maps_update_done(void)
282{
283 mutex_unlock(&cpu_add_remove_lock);
284}
285
286
287
288
289
290static int cpu_hotplug_disabled;
291
292#ifdef CONFIG_HOTPLUG_CPU
293
294DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
295
296void cpus_read_lock(void)
297{
298 percpu_down_read(&cpu_hotplug_lock);
299}
300EXPORT_SYMBOL_GPL(cpus_read_lock);
301
302int cpus_read_trylock(void)
303{
304 return percpu_down_read_trylock(&cpu_hotplug_lock);
305}
306EXPORT_SYMBOL_GPL(cpus_read_trylock);
307
308void cpus_read_unlock(void)
309{
310 percpu_up_read(&cpu_hotplug_lock);
311}
312EXPORT_SYMBOL_GPL(cpus_read_unlock);
313
314void cpus_write_lock(void)
315{
316 percpu_down_write(&cpu_hotplug_lock);
317}
318
319void cpus_write_unlock(void)
320{
321 percpu_up_write(&cpu_hotplug_lock);
322}
323
324void lockdep_assert_cpus_held(void)
325{
326
327
328
329
330
331
332 if (system_state < SYSTEM_RUNNING)
333 return;
334
335 percpu_rwsem_assert_held(&cpu_hotplug_lock);
336}
337
338#ifdef CONFIG_LOCKDEP
339int lockdep_is_cpus_held(void)
340{
341 return percpu_rwsem_is_held(&cpu_hotplug_lock);
342}
343#endif
344
345static void lockdep_acquire_cpus_lock(void)
346{
347 rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
348}
349
350static void lockdep_release_cpus_lock(void)
351{
352 rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
353}
354
355
356
357
358
359
360
361
362void cpu_hotplug_disable(void)
363{
364 cpu_maps_update_begin();
365 cpu_hotplug_disabled++;
366 cpu_maps_update_done();
367}
368EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
369
370static void __cpu_hotplug_enable(void)
371{
372 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
373 return;
374 cpu_hotplug_disabled--;
375}
376
377void cpu_hotplug_enable(void)
378{
379 cpu_maps_update_begin();
380 __cpu_hotplug_enable();
381 cpu_maps_update_done();
382}
383EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
384
385#else
386
387static void lockdep_acquire_cpus_lock(void)
388{
389}
390
391static void lockdep_release_cpus_lock(void)
392{
393}
394
395#endif
396
397
398
399
400
401void __weak arch_smt_update(void) { }
402
403#ifdef CONFIG_HOTPLUG_SMT
404enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
405
406void __init cpu_smt_disable(bool force)
407{
408 if (!cpu_smt_possible())
409 return;
410
411 if (force) {
412 pr_info("SMT: Force disabled\n");
413 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
414 } else {
415 pr_info("SMT: disabled\n");
416 cpu_smt_control = CPU_SMT_DISABLED;
417 }
418}
419
420
421
422
423
424void __init cpu_smt_check_topology(void)
425{
426 if (!topology_smt_supported())
427 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
428}
429
430static int __init smt_cmdline_disable(char *str)
431{
432 cpu_smt_disable(str && !strcmp(str, "force"));
433 return 0;
434}
435early_param("nosmt", smt_cmdline_disable);
436
437static inline bool cpu_smt_allowed(unsigned int cpu)
438{
439 if (cpu_smt_control == CPU_SMT_ENABLED)
440 return true;
441
442 if (topology_is_primary_thread(cpu))
443 return true;
444
445
446
447
448
449
450
451 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
452}
453
454
455bool cpu_smt_possible(void)
456{
457 return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
458 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
459}
460EXPORT_SYMBOL_GPL(cpu_smt_possible);
461#else
462static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
463#endif
464
465static inline enum cpuhp_state
466cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
467{
468 enum cpuhp_state prev_state = st->state;
469 bool bringup = st->state < target;
470
471 st->rollback = false;
472 st->last = NULL;
473
474 st->target = target;
475 st->single = false;
476 st->bringup = bringup;
477 if (cpu_dying(st->cpu) != !bringup)
478 set_cpu_dying(st->cpu, !bringup);
479
480 return prev_state;
481}
482
483static inline void
484cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
485{
486 bool bringup = !st->bringup;
487
488 st->target = prev_state;
489
490
491
492
493
494 if (st->rollback)
495 return;
496
497 st->rollback = true;
498
499
500
501
502
503 if (!st->last) {
504 if (st->bringup)
505 st->state--;
506 else
507 st->state++;
508 }
509
510 st->bringup = bringup;
511 if (cpu_dying(st->cpu) != !bringup)
512 set_cpu_dying(st->cpu, !bringup);
513}
514
515
516static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
517{
518 if (!st->single && st->state == st->target)
519 return;
520
521 st->result = 0;
522
523
524
525
526 smp_mb();
527 st->should_run = true;
528 wake_up_process(st->thread);
529 wait_for_ap_thread(st, st->bringup);
530}
531
532static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
533{
534 enum cpuhp_state prev_state;
535 int ret;
536
537 prev_state = cpuhp_set_state(st, target);
538 __cpuhp_kick_ap(st);
539 if ((ret = st->result)) {
540 cpuhp_reset_state(st, prev_state);
541 __cpuhp_kick_ap(st);
542 }
543
544 return ret;
545}
546
547static int bringup_wait_for_ap(unsigned int cpu)
548{
549 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
550
551
552 wait_for_ap_thread(st, true);
553 if (WARN_ON_ONCE((!cpu_online(cpu))))
554 return -ECANCELED;
555
556
557 kthread_unpark(st->thread);
558
559
560
561
562
563
564
565
566 if (!cpu_smt_allowed(cpu))
567 return -ECANCELED;
568
569 if (st->target <= CPUHP_AP_ONLINE_IDLE)
570 return 0;
571
572 return cpuhp_kick_ap(st, st->target);
573}
574
575static int bringup_cpu(unsigned int cpu)
576{
577 struct task_struct *idle = idle_thread_get(cpu);
578 int ret;
579
580
581
582
583
584
585 irq_lock_sparse();
586
587
588 ret = __cpu_up(cpu, idle);
589 irq_unlock_sparse();
590 if (ret)
591 return ret;
592 return bringup_wait_for_ap(cpu);
593}
594
595static int finish_cpu(unsigned int cpu)
596{
597 struct task_struct *idle = idle_thread_get(cpu);
598 struct mm_struct *mm = idle->active_mm;
599
600
601
602
603
604 if (mm != &init_mm)
605 idle->active_mm = &init_mm;
606 mmdrop(mm);
607 return 0;
608}
609
610
611
612
613
614
615
616
617
618
619
620
621static bool cpuhp_next_state(bool bringup,
622 enum cpuhp_state *state_to_run,
623 struct cpuhp_cpu_state *st,
624 enum cpuhp_state target)
625{
626 do {
627 if (bringup) {
628 if (st->state >= target)
629 return false;
630
631 *state_to_run = ++st->state;
632 } else {
633 if (st->state <= target)
634 return false;
635
636 *state_to_run = st->state--;
637 }
638
639 if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
640 break;
641 } while (true);
642
643 return true;
644}
645
646static int cpuhp_invoke_callback_range(bool bringup,
647 unsigned int cpu,
648 struct cpuhp_cpu_state *st,
649 enum cpuhp_state target)
650{
651 enum cpuhp_state state;
652 int err = 0;
653
654 while (cpuhp_next_state(bringup, &state, st, target)) {
655 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
656 if (err)
657 break;
658 }
659
660 return err;
661}
662
663static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
664{
665 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
666 return true;
667
668
669
670
671
672
673
674 return st->state <= CPUHP_BRINGUP_CPU;
675}
676
677static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
678 enum cpuhp_state target)
679{
680 enum cpuhp_state prev_state = st->state;
681 int ret = 0;
682
683 ret = cpuhp_invoke_callback_range(true, cpu, st, target);
684 if (ret) {
685 cpuhp_reset_state(st, prev_state);
686 if (can_rollback_cpu(st))
687 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
688 prev_state));
689 }
690 return ret;
691}
692
693
694
695
696static void cpuhp_create(unsigned int cpu)
697{
698 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
699
700 init_completion(&st->done_up);
701 init_completion(&st->done_down);
702 st->cpu = cpu;
703}
704
705static int cpuhp_should_run(unsigned int cpu)
706{
707 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
708
709 return st->should_run;
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726static void cpuhp_thread_fun(unsigned int cpu)
727{
728 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
729 bool bringup = st->bringup;
730 enum cpuhp_state state;
731
732 if (WARN_ON_ONCE(!st->should_run))
733 return;
734
735
736
737
738
739 smp_mb();
740
741
742
743
744
745
746 lockdep_acquire_cpus_lock();
747 cpuhp_lock_acquire(bringup);
748
749 if (st->single) {
750 state = st->cb_state;
751 st->should_run = false;
752 } else {
753 st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
754 if (!st->should_run)
755 goto end;
756 }
757
758 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
759
760 if (cpuhp_is_atomic_state(state)) {
761 local_irq_disable();
762 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
763 local_irq_enable();
764
765
766
767
768 WARN_ON_ONCE(st->result);
769 } else {
770 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
771 }
772
773 if (st->result) {
774
775
776
777
778
779 WARN_ON_ONCE(st->rollback);
780 st->should_run = false;
781 }
782
783end:
784 cpuhp_lock_release(bringup);
785 lockdep_release_cpus_lock();
786
787 if (!st->should_run)
788 complete_ap_thread(st, bringup);
789}
790
791
792static int
793cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
794 struct hlist_node *node)
795{
796 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
797 int ret;
798
799 if (!cpu_online(cpu))
800 return 0;
801
802 cpuhp_lock_acquire(false);
803 cpuhp_lock_release(false);
804
805 cpuhp_lock_acquire(true);
806 cpuhp_lock_release(true);
807
808
809
810
811
812 if (!st->thread)
813 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
814
815 st->rollback = false;
816 st->last = NULL;
817
818 st->node = node;
819 st->bringup = bringup;
820 st->cb_state = state;
821 st->single = true;
822
823 __cpuhp_kick_ap(st);
824
825
826
827
828 if ((ret = st->result) && st->last) {
829 st->rollback = true;
830 st->bringup = !bringup;
831
832 __cpuhp_kick_ap(st);
833 }
834
835
836
837
838
839 st->node = st->last = NULL;
840 return ret;
841}
842
843static int cpuhp_kick_ap_work(unsigned int cpu)
844{
845 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
846 enum cpuhp_state prev_state = st->state;
847 int ret;
848
849 cpuhp_lock_acquire(false);
850 cpuhp_lock_release(false);
851
852 cpuhp_lock_acquire(true);
853 cpuhp_lock_release(true);
854
855 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
856 ret = cpuhp_kick_ap(st, st->target);
857 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
858
859 return ret;
860}
861
862static struct smp_hotplug_thread cpuhp_threads = {
863 .store = &cpuhp_state.thread,
864 .create = &cpuhp_create,
865 .thread_should_run = cpuhp_should_run,
866 .thread_fn = cpuhp_thread_fun,
867 .thread_comm = "cpuhp/%u",
868 .selfparking = true,
869};
870
871void __init cpuhp_threads_init(void)
872{
873 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
874 kthread_unpark(this_cpu_read(cpuhp_state.thread));
875}
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen)
901{
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919 if (!tasks_frozen)
920 cpuset_wait_for_hotplug();
921}
922
923#ifdef CONFIG_HOTPLUG_CPU
924#ifndef arch_clear_mm_cpumask_cpu
925#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
926#endif
927
928
929
930
931
932
933
934
935
936
937
938
939
940void clear_tasks_mm_cpumask(int cpu)
941{
942 struct task_struct *p;
943
944
945
946
947
948
949
950
951 WARN_ON(cpu_online(cpu));
952 rcu_read_lock();
953 for_each_process(p) {
954 struct task_struct *t;
955
956
957
958
959
960 t = find_lock_task_mm(p);
961 if (!t)
962 continue;
963 arch_clear_mm_cpumask_cpu(cpu, t->mm);
964 task_unlock(t);
965 }
966 rcu_read_unlock();
967}
968
969
970static int take_cpu_down(void *_param)
971{
972 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
973 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
974 int err, cpu = smp_processor_id();
975 int ret;
976
977
978 err = __cpu_disable();
979 if (err < 0)
980 return err;
981
982
983
984
985
986 WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
987
988
989 ret = cpuhp_invoke_callback_range(false, cpu, st, target);
990
991
992
993
994 WARN_ON_ONCE(ret);
995
996
997 tick_handover_do_timer();
998
999 tick_offline_cpu(cpu);
1000
1001 stop_machine_park(cpu);
1002 return 0;
1003}
1004
1005static int takedown_cpu(unsigned int cpu)
1006{
1007 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1008 int err;
1009
1010
1011 kthread_park(st->thread);
1012
1013
1014
1015
1016
1017 irq_lock_sparse();
1018
1019
1020
1021
1022 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
1023 if (err) {
1024
1025 irq_unlock_sparse();
1026
1027 kthread_unpark(st->thread);
1028 return err;
1029 }
1030 BUG_ON(cpu_online(cpu));
1031
1032
1033
1034
1035
1036
1037
1038
1039 wait_for_ap_thread(st, false);
1040 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
1041
1042
1043 irq_unlock_sparse();
1044
1045 hotplug_cpu__broadcast_tick_pull(cpu);
1046
1047 __cpu_die(cpu);
1048
1049 tick_cleanup_dead_cpu(cpu);
1050 rcutree_migrate_callbacks(cpu);
1051 return 0;
1052}
1053
1054static void cpuhp_complete_idle_dead(void *arg)
1055{
1056 struct cpuhp_cpu_state *st = arg;
1057
1058 complete_ap_thread(st, false);
1059}
1060
1061void cpuhp_report_idle_dead(void)
1062{
1063 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1064
1065 BUG_ON(st->state != CPUHP_AP_OFFLINE);
1066 rcu_report_dead(smp_processor_id());
1067 st->state = CPUHP_AP_IDLE_DEAD;
1068
1069
1070
1071
1072 smp_call_function_single(cpumask_first(cpu_online_mask),
1073 cpuhp_complete_idle_dead, st, 0);
1074}
1075
1076static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1077 enum cpuhp_state target)
1078{
1079 enum cpuhp_state prev_state = st->state;
1080 int ret = 0;
1081
1082 ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1083 if (ret) {
1084
1085 cpuhp_reset_state(st, prev_state);
1086
1087 if (st->state < prev_state)
1088 WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1089 prev_state));
1090 }
1091
1092 return ret;
1093}
1094
1095
1096static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1097 enum cpuhp_state target)
1098{
1099 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1100 int prev_state, ret = 0;
1101
1102 if (num_online_cpus() == 1)
1103 return -EBUSY;
1104
1105 if (!cpu_present(cpu))
1106 return -EINVAL;
1107
1108 cpus_write_lock();
1109
1110 cpuhp_tasks_frozen = tasks_frozen;
1111
1112 prev_state = cpuhp_set_state(st, target);
1113
1114
1115
1116
1117 if (st->state > CPUHP_TEARDOWN_CPU) {
1118 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1119 ret = cpuhp_kick_ap_work(cpu);
1120
1121
1122
1123
1124 if (ret)
1125 goto out;
1126
1127
1128
1129
1130
1131 if (st->state > CPUHP_TEARDOWN_CPU)
1132 goto out;
1133
1134 st->target = target;
1135 }
1136
1137
1138
1139
1140 ret = cpuhp_down_callbacks(cpu, st, target);
1141 if (ret && st->state < prev_state) {
1142 if (st->state == CPUHP_TEARDOWN_CPU) {
1143 cpuhp_reset_state(st, prev_state);
1144 __cpuhp_kick_ap(st);
1145 } else {
1146 WARN(1, "DEAD callback error for CPU%d", cpu);
1147 }
1148 }
1149
1150out:
1151 cpus_write_unlock();
1152
1153
1154
1155
1156 lockup_detector_cleanup();
1157 arch_smt_update();
1158 cpu_up_down_serialize_trainwrecks(tasks_frozen);
1159 return ret;
1160}
1161
1162static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1163{
1164 if (cpu_hotplug_disabled)
1165 return -EBUSY;
1166 return _cpu_down(cpu, 0, target);
1167}
1168
1169static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1170{
1171 int err;
1172
1173 cpu_maps_update_begin();
1174 err = cpu_down_maps_locked(cpu, target);
1175 cpu_maps_update_done();
1176 return err;
1177}
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187int cpu_device_down(struct device *dev)
1188{
1189 return cpu_down(dev->id, CPUHP_OFFLINE);
1190}
1191
1192int remove_cpu(unsigned int cpu)
1193{
1194 int ret;
1195
1196 lock_device_hotplug();
1197 ret = device_offline(get_cpu_device(cpu));
1198 unlock_device_hotplug();
1199
1200 return ret;
1201}
1202EXPORT_SYMBOL_GPL(remove_cpu);
1203
1204void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1205{
1206 unsigned int cpu;
1207 int error;
1208
1209 cpu_maps_update_begin();
1210
1211
1212
1213
1214
1215
1216 if (!cpu_online(primary_cpu))
1217 primary_cpu = cpumask_first(cpu_online_mask);
1218
1219 for_each_online_cpu(cpu) {
1220 if (cpu == primary_cpu)
1221 continue;
1222
1223 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1224 if (error) {
1225 pr_err("Failed to offline CPU%d - error=%d",
1226 cpu, error);
1227 break;
1228 }
1229 }
1230
1231
1232
1233
1234 BUG_ON(num_online_cpus() > 1);
1235
1236
1237
1238
1239
1240
1241 cpu_hotplug_disabled++;
1242
1243 cpu_maps_update_done();
1244}
1245
1246#else
1247#define takedown_cpu NULL
1248#endif
1249
1250
1251
1252
1253
1254
1255
1256
1257void notify_cpu_starting(unsigned int cpu)
1258{
1259 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1260 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1261 int ret;
1262
1263 rcu_cpu_starting(cpu);
1264 cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1265 ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1266
1267
1268
1269
1270 WARN_ON_ONCE(ret);
1271}
1272
1273
1274
1275
1276
1277
1278void cpuhp_online_idle(enum cpuhp_state state)
1279{
1280 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1281
1282
1283 if (state != CPUHP_AP_ONLINE_IDLE)
1284 return;
1285
1286
1287
1288
1289
1290 stop_machine_unpark(smp_processor_id());
1291
1292 st->state = CPUHP_AP_ONLINE_IDLE;
1293 complete_ap_thread(st, true);
1294}
1295
1296
1297static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1298{
1299 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1300 struct task_struct *idle;
1301 int ret = 0;
1302
1303 cpus_write_lock();
1304
1305 if (!cpu_present(cpu)) {
1306 ret = -EINVAL;
1307 goto out;
1308 }
1309
1310
1311
1312
1313
1314 if (st->state >= target)
1315 goto out;
1316
1317 if (st->state == CPUHP_OFFLINE) {
1318
1319 idle = idle_thread_get(cpu);
1320 if (IS_ERR(idle)) {
1321 ret = PTR_ERR(idle);
1322 goto out;
1323 }
1324 }
1325
1326 cpuhp_tasks_frozen = tasks_frozen;
1327
1328 cpuhp_set_state(st, target);
1329
1330
1331
1332
1333 if (st->state > CPUHP_BRINGUP_CPU) {
1334 ret = cpuhp_kick_ap_work(cpu);
1335
1336
1337
1338
1339 if (ret)
1340 goto out;
1341 }
1342
1343
1344
1345
1346
1347
1348 target = min((int)target, CPUHP_BRINGUP_CPU);
1349 ret = cpuhp_up_callbacks(cpu, st, target);
1350out:
1351 cpus_write_unlock();
1352 arch_smt_update();
1353 cpu_up_down_serialize_trainwrecks(tasks_frozen);
1354 return ret;
1355}
1356
1357static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1358{
1359 int err = 0;
1360
1361 if (!cpu_possible(cpu)) {
1362 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1363 cpu);
1364#if defined(CONFIG_IA64)
1365 pr_err("please check additional_cpus= boot parameter\n");
1366#endif
1367 return -EINVAL;
1368 }
1369
1370 err = try_online_node(cpu_to_node(cpu));
1371 if (err)
1372 return err;
1373
1374 cpu_maps_update_begin();
1375
1376 if (cpu_hotplug_disabled) {
1377 err = -EBUSY;
1378 goto out;
1379 }
1380 if (!cpu_smt_allowed(cpu)) {
1381 err = -EPERM;
1382 goto out;
1383 }
1384
1385 err = _cpu_up(cpu, 0, target);
1386out:
1387 cpu_maps_update_done();
1388 return err;
1389}
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399int cpu_device_up(struct device *dev)
1400{
1401 return cpu_up(dev->id, CPUHP_ONLINE);
1402}
1403
1404int add_cpu(unsigned int cpu)
1405{
1406 int ret;
1407
1408 lock_device_hotplug();
1409 ret = device_online(get_cpu_device(cpu));
1410 unlock_device_hotplug();
1411
1412 return ret;
1413}
1414EXPORT_SYMBOL_GPL(add_cpu);
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424int bringup_hibernate_cpu(unsigned int sleep_cpu)
1425{
1426 int ret;
1427
1428 if (!cpu_online(sleep_cpu)) {
1429 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1430 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1431 if (ret) {
1432 pr_err("Failed to bring hibernate-CPU up!\n");
1433 return ret;
1434 }
1435 }
1436 return 0;
1437}
1438
1439void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1440{
1441 unsigned int cpu;
1442
1443 for_each_present_cpu(cpu) {
1444 if (num_online_cpus() >= setup_max_cpus)
1445 break;
1446 if (!cpu_online(cpu))
1447 cpu_up(cpu, CPUHP_ONLINE);
1448 }
1449}
1450
1451#ifdef CONFIG_PM_SLEEP_SMP
1452static cpumask_var_t frozen_cpus;
1453
1454int freeze_secondary_cpus(int primary)
1455{
1456 int cpu, error = 0;
1457
1458 cpu_maps_update_begin();
1459 if (primary == -1) {
1460 primary = cpumask_first(cpu_online_mask);
1461 if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1462 primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1463 } else {
1464 if (!cpu_online(primary))
1465 primary = cpumask_first(cpu_online_mask);
1466 }
1467
1468
1469
1470
1471
1472 cpumask_clear(frozen_cpus);
1473
1474 pr_info("Disabling non-boot CPUs ...\n");
1475 for_each_online_cpu(cpu) {
1476 if (cpu == primary)
1477 continue;
1478
1479 if (pm_wakeup_pending()) {
1480 pr_info("Wakeup pending. Abort CPU freeze\n");
1481 error = -EBUSY;
1482 break;
1483 }
1484
1485 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1486 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1487 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1488 if (!error)
1489 cpumask_set_cpu(cpu, frozen_cpus);
1490 else {
1491 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1492 break;
1493 }
1494 }
1495
1496 if (!error)
1497 BUG_ON(num_online_cpus() > 1);
1498 else
1499 pr_err("Non-boot CPUs are not disabled\n");
1500
1501
1502
1503
1504
1505
1506 cpu_hotplug_disabled++;
1507
1508 cpu_maps_update_done();
1509 return error;
1510}
1511
1512void __weak arch_thaw_secondary_cpus_begin(void)
1513{
1514}
1515
1516void __weak arch_thaw_secondary_cpus_end(void)
1517{
1518}
1519
1520void thaw_secondary_cpus(void)
1521{
1522 int cpu, error;
1523
1524
1525 cpu_maps_update_begin();
1526 __cpu_hotplug_enable();
1527 if (cpumask_empty(frozen_cpus))
1528 goto out;
1529
1530 pr_info("Enabling non-boot CPUs ...\n");
1531
1532 arch_thaw_secondary_cpus_begin();
1533
1534 for_each_cpu(cpu, frozen_cpus) {
1535 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1536 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1537 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1538 if (!error) {
1539 pr_info("CPU%d is up\n", cpu);
1540 continue;
1541 }
1542 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1543 }
1544
1545 arch_thaw_secondary_cpus_end();
1546
1547 cpumask_clear(frozen_cpus);
1548out:
1549 cpu_maps_update_done();
1550}
1551
1552static int __init alloc_frozen_cpus(void)
1553{
1554 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1555 return -ENOMEM;
1556 return 0;
1557}
1558core_initcall(alloc_frozen_cpus);
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571static int
1572cpu_hotplug_pm_callback(struct notifier_block *nb,
1573 unsigned long action, void *ptr)
1574{
1575 switch (action) {
1576
1577 case PM_SUSPEND_PREPARE:
1578 case PM_HIBERNATION_PREPARE:
1579 cpu_hotplug_disable();
1580 break;
1581
1582 case PM_POST_SUSPEND:
1583 case PM_POST_HIBERNATION:
1584 cpu_hotplug_enable();
1585 break;
1586
1587 default:
1588 return NOTIFY_DONE;
1589 }
1590
1591 return NOTIFY_OK;
1592}
1593
1594
1595static int __init cpu_hotplug_pm_sync_init(void)
1596{
1597
1598
1599
1600
1601
1602 pm_notifier(cpu_hotplug_pm_callback, 0);
1603 return 0;
1604}
1605core_initcall(cpu_hotplug_pm_sync_init);
1606
1607#endif
1608
1609int __boot_cpu_id;
1610
1611#endif
1612
1613
1614static struct cpuhp_step cpuhp_hp_states[] = {
1615 [CPUHP_OFFLINE] = {
1616 .name = "offline",
1617 .startup.single = NULL,
1618 .teardown.single = NULL,
1619 },
1620#ifdef CONFIG_SMP
1621 [CPUHP_CREATE_THREADS]= {
1622 .name = "threads:prepare",
1623 .startup.single = smpboot_create_threads,
1624 .teardown.single = NULL,
1625 .cant_stop = true,
1626 },
1627 [CPUHP_PERF_PREPARE] = {
1628 .name = "perf:prepare",
1629 .startup.single = perf_event_init_cpu,
1630 .teardown.single = perf_event_exit_cpu,
1631 },
1632 [CPUHP_WORKQUEUE_PREP] = {
1633 .name = "workqueue:prepare",
1634 .startup.single = workqueue_prepare_cpu,
1635 .teardown.single = NULL,
1636 },
1637 [CPUHP_HRTIMERS_PREPARE] = {
1638 .name = "hrtimers:prepare",
1639 .startup.single = hrtimers_prepare_cpu,
1640 .teardown.single = hrtimers_dead_cpu,
1641 },
1642 [CPUHP_SMPCFD_PREPARE] = {
1643 .name = "smpcfd:prepare",
1644 .startup.single = smpcfd_prepare_cpu,
1645 .teardown.single = smpcfd_dead_cpu,
1646 },
1647 [CPUHP_RELAY_PREPARE] = {
1648 .name = "relay:prepare",
1649 .startup.single = relay_prepare_cpu,
1650 .teardown.single = NULL,
1651 },
1652 [CPUHP_SLAB_PREPARE] = {
1653 .name = "slab:prepare",
1654 .startup.single = slab_prepare_cpu,
1655 .teardown.single = slab_dead_cpu,
1656 },
1657 [CPUHP_RCUTREE_PREP] = {
1658 .name = "RCU/tree:prepare",
1659 .startup.single = rcutree_prepare_cpu,
1660 .teardown.single = rcutree_dead_cpu,
1661 },
1662
1663
1664
1665
1666
1667 [CPUHP_TIMERS_PREPARE] = {
1668 .name = "timers:prepare",
1669 .startup.single = timers_prepare_cpu,
1670 .teardown.single = timers_dead_cpu,
1671 },
1672
1673 [CPUHP_BRINGUP_CPU] = {
1674 .name = "cpu:bringup",
1675 .startup.single = bringup_cpu,
1676 .teardown.single = finish_cpu,
1677 .cant_stop = true,
1678 },
1679
1680 [CPUHP_AP_IDLE_DEAD] = {
1681 .name = "idle:dead",
1682 },
1683
1684
1685
1686
1687 [CPUHP_AP_OFFLINE] = {
1688 .name = "ap:offline",
1689 .cant_stop = true,
1690 },
1691
1692 [CPUHP_AP_SCHED_STARTING] = {
1693 .name = "sched:starting",
1694 .startup.single = sched_cpu_starting,
1695 .teardown.single = sched_cpu_dying,
1696 },
1697 [CPUHP_AP_RCUTREE_DYING] = {
1698 .name = "RCU/tree:dying",
1699 .startup.single = NULL,
1700 .teardown.single = rcutree_dying_cpu,
1701 },
1702 [CPUHP_AP_SMPCFD_DYING] = {
1703 .name = "smpcfd:dying",
1704 .startup.single = NULL,
1705 .teardown.single = smpcfd_dying_cpu,
1706 },
1707
1708
1709 [CPUHP_AP_ONLINE] = {
1710 .name = "ap:online",
1711 },
1712
1713
1714
1715
1716 [CPUHP_TEARDOWN_CPU] = {
1717 .name = "cpu:teardown",
1718 .startup.single = NULL,
1719 .teardown.single = takedown_cpu,
1720 .cant_stop = true,
1721 },
1722
1723 [CPUHP_AP_SCHED_WAIT_EMPTY] = {
1724 .name = "sched:waitempty",
1725 .startup.single = NULL,
1726 .teardown.single = sched_cpu_wait_empty,
1727 },
1728
1729
1730 [CPUHP_AP_SMPBOOT_THREADS] = {
1731 .name = "smpboot/threads:online",
1732 .startup.single = smpboot_unpark_threads,
1733 .teardown.single = smpboot_park_threads,
1734 },
1735 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1736 .name = "irq/affinity:online",
1737 .startup.single = irq_affinity_online_cpu,
1738 .teardown.single = NULL,
1739 },
1740 [CPUHP_AP_PERF_ONLINE] = {
1741 .name = "perf:online",
1742 .startup.single = perf_event_init_cpu,
1743 .teardown.single = perf_event_exit_cpu,
1744 },
1745 [CPUHP_AP_WATCHDOG_ONLINE] = {
1746 .name = "lockup_detector:online",
1747 .startup.single = lockup_detector_online_cpu,
1748 .teardown.single = lockup_detector_offline_cpu,
1749 },
1750 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1751 .name = "workqueue:online",
1752 .startup.single = workqueue_online_cpu,
1753 .teardown.single = workqueue_offline_cpu,
1754 },
1755 [CPUHP_AP_RCUTREE_ONLINE] = {
1756 .name = "RCU/tree:online",
1757 .startup.single = rcutree_online_cpu,
1758 .teardown.single = rcutree_offline_cpu,
1759 },
1760#endif
1761
1762
1763
1764
1765#ifdef CONFIG_SMP
1766
1767 [CPUHP_AP_ACTIVE] = {
1768 .name = "sched:active",
1769 .startup.single = sched_cpu_activate,
1770 .teardown.single = sched_cpu_deactivate,
1771 },
1772#endif
1773
1774
1775 [CPUHP_ONLINE] = {
1776 .name = "online",
1777 .startup.single = NULL,
1778 .teardown.single = NULL,
1779 },
1780};
1781
1782
1783static int cpuhp_cb_check(enum cpuhp_state state)
1784{
1785 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1786 return -EINVAL;
1787 return 0;
1788}
1789
1790
1791
1792
1793
1794
1795static int cpuhp_reserve_state(enum cpuhp_state state)
1796{
1797 enum cpuhp_state i, end;
1798 struct cpuhp_step *step;
1799
1800 switch (state) {
1801 case CPUHP_AP_ONLINE_DYN:
1802 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1803 end = CPUHP_AP_ONLINE_DYN_END;
1804 break;
1805 case CPUHP_BP_PREPARE_DYN:
1806 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1807 end = CPUHP_BP_PREPARE_DYN_END;
1808 break;
1809 default:
1810 return -EINVAL;
1811 }
1812
1813 for (i = state; i <= end; i++, step++) {
1814 if (!step->name)
1815 return i;
1816 }
1817 WARN(1, "No more dynamic states available for CPU hotplug\n");
1818 return -ENOSPC;
1819}
1820
1821static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1822 int (*startup)(unsigned int cpu),
1823 int (*teardown)(unsigned int cpu),
1824 bool multi_instance)
1825{
1826
1827 struct cpuhp_step *sp;
1828 int ret = 0;
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1840 state == CPUHP_BP_PREPARE_DYN)) {
1841 ret = cpuhp_reserve_state(state);
1842 if (ret < 0)
1843 return ret;
1844 state = ret;
1845 }
1846 sp = cpuhp_get_step(state);
1847 if (name && sp->name)
1848 return -EBUSY;
1849
1850 sp->startup.single = startup;
1851 sp->teardown.single = teardown;
1852 sp->name = name;
1853 sp->multi_instance = multi_instance;
1854 INIT_HLIST_HEAD(&sp->list);
1855 return ret;
1856}
1857
1858static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1859{
1860 return cpuhp_get_step(state)->teardown.single;
1861}
1862
1863
1864
1865
1866
1867static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1868 struct hlist_node *node)
1869{
1870 struct cpuhp_step *sp = cpuhp_get_step(state);
1871 int ret;
1872
1873
1874
1875
1876
1877 if (cpuhp_step_empty(bringup, sp))
1878 return 0;
1879
1880
1881
1882
1883#ifdef CONFIG_SMP
1884 if (cpuhp_is_ap_state(state))
1885 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1886 else
1887 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1888#else
1889 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1890#endif
1891 BUG_ON(ret && !bringup);
1892 return ret;
1893}
1894
1895
1896
1897
1898
1899
1900static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1901 struct hlist_node *node)
1902{
1903 int cpu;
1904
1905
1906 for_each_present_cpu(cpu) {
1907 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1908 int cpustate = st->state;
1909
1910 if (cpu >= failedcpu)
1911 break;
1912
1913
1914 if (cpustate >= state)
1915 cpuhp_issue_call(cpu, state, false, node);
1916 }
1917}
1918
1919int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1920 struct hlist_node *node,
1921 bool invoke)
1922{
1923 struct cpuhp_step *sp;
1924 int cpu;
1925 int ret;
1926
1927 lockdep_assert_cpus_held();
1928
1929 sp = cpuhp_get_step(state);
1930 if (sp->multi_instance == false)
1931 return -EINVAL;
1932
1933 mutex_lock(&cpuhp_state_mutex);
1934
1935 if (!invoke || !sp->startup.multi)
1936 goto add_node;
1937
1938
1939
1940
1941
1942 for_each_present_cpu(cpu) {
1943 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1944 int cpustate = st->state;
1945
1946 if (cpustate < state)
1947 continue;
1948
1949 ret = cpuhp_issue_call(cpu, state, true, node);
1950 if (ret) {
1951 if (sp->teardown.multi)
1952 cpuhp_rollback_install(cpu, state, node);
1953 goto unlock;
1954 }
1955 }
1956add_node:
1957 ret = 0;
1958 hlist_add_head(node, &sp->list);
1959unlock:
1960 mutex_unlock(&cpuhp_state_mutex);
1961 return ret;
1962}
1963
1964int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1965 bool invoke)
1966{
1967 int ret;
1968
1969 cpus_read_lock();
1970 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1971 cpus_read_unlock();
1972 return ret;
1973}
1974EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1994 const char *name, bool invoke,
1995 int (*startup)(unsigned int cpu),
1996 int (*teardown)(unsigned int cpu),
1997 bool multi_instance)
1998{
1999 int cpu, ret = 0;
2000 bool dynstate;
2001
2002 lockdep_assert_cpus_held();
2003
2004 if (cpuhp_cb_check(state) || !name)
2005 return -EINVAL;
2006
2007 mutex_lock(&cpuhp_state_mutex);
2008
2009 ret = cpuhp_store_callbacks(state, name, startup, teardown,
2010 multi_instance);
2011
2012 dynstate = state == CPUHP_AP_ONLINE_DYN;
2013 if (ret > 0 && dynstate) {
2014 state = ret;
2015 ret = 0;
2016 }
2017
2018 if (ret || !invoke || !startup)
2019 goto out;
2020
2021
2022
2023
2024
2025 for_each_present_cpu(cpu) {
2026 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2027 int cpustate = st->state;
2028
2029 if (cpustate < state)
2030 continue;
2031
2032 ret = cpuhp_issue_call(cpu, state, true, NULL);
2033 if (ret) {
2034 if (teardown)
2035 cpuhp_rollback_install(cpu, state, NULL);
2036 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2037 goto out;
2038 }
2039 }
2040out:
2041 mutex_unlock(&cpuhp_state_mutex);
2042
2043
2044
2045
2046 if (!ret && dynstate)
2047 return state;
2048 return ret;
2049}
2050EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2051
2052int __cpuhp_setup_state(enum cpuhp_state state,
2053 const char *name, bool invoke,
2054 int (*startup)(unsigned int cpu),
2055 int (*teardown)(unsigned int cpu),
2056 bool multi_instance)
2057{
2058 int ret;
2059
2060 cpus_read_lock();
2061 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2062 teardown, multi_instance);
2063 cpus_read_unlock();
2064 return ret;
2065}
2066EXPORT_SYMBOL(__cpuhp_setup_state);
2067
2068int __cpuhp_state_remove_instance(enum cpuhp_state state,
2069 struct hlist_node *node, bool invoke)
2070{
2071 struct cpuhp_step *sp = cpuhp_get_step(state);
2072 int cpu;
2073
2074 BUG_ON(cpuhp_cb_check(state));
2075
2076 if (!sp->multi_instance)
2077 return -EINVAL;
2078
2079 cpus_read_lock();
2080 mutex_lock(&cpuhp_state_mutex);
2081
2082 if (!invoke || !cpuhp_get_teardown_cb(state))
2083 goto remove;
2084
2085
2086
2087
2088
2089 for_each_present_cpu(cpu) {
2090 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2091 int cpustate = st->state;
2092
2093 if (cpustate >= state)
2094 cpuhp_issue_call(cpu, state, false, node);
2095 }
2096
2097remove:
2098 hlist_del(node);
2099 mutex_unlock(&cpuhp_state_mutex);
2100 cpus_read_unlock();
2101
2102 return 0;
2103}
2104EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2117{
2118 struct cpuhp_step *sp = cpuhp_get_step(state);
2119 int cpu;
2120
2121 BUG_ON(cpuhp_cb_check(state));
2122
2123 lockdep_assert_cpus_held();
2124
2125 mutex_lock(&cpuhp_state_mutex);
2126 if (sp->multi_instance) {
2127 WARN(!hlist_empty(&sp->list),
2128 "Error: Removing state %d which has instances left.\n",
2129 state);
2130 goto remove;
2131 }
2132
2133 if (!invoke || !cpuhp_get_teardown_cb(state))
2134 goto remove;
2135
2136
2137
2138
2139
2140
2141 for_each_present_cpu(cpu) {
2142 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2143 int cpustate = st->state;
2144
2145 if (cpustate >= state)
2146 cpuhp_issue_call(cpu, state, false, NULL);
2147 }
2148remove:
2149 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2150 mutex_unlock(&cpuhp_state_mutex);
2151}
2152EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2153
2154void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2155{
2156 cpus_read_lock();
2157 __cpuhp_remove_state_cpuslocked(state, invoke);
2158 cpus_read_unlock();
2159}
2160EXPORT_SYMBOL(__cpuhp_remove_state);
2161
2162#ifdef CONFIG_HOTPLUG_SMT
2163static void cpuhp_offline_cpu_device(unsigned int cpu)
2164{
2165 struct device *dev = get_cpu_device(cpu);
2166
2167 dev->offline = true;
2168
2169 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2170}
2171
2172static void cpuhp_online_cpu_device(unsigned int cpu)
2173{
2174 struct device *dev = get_cpu_device(cpu);
2175
2176 dev->offline = false;
2177
2178 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2179}
2180
2181int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2182{
2183 int cpu, ret = 0;
2184
2185 cpu_maps_update_begin();
2186 for_each_online_cpu(cpu) {
2187 if (topology_is_primary_thread(cpu))
2188 continue;
2189 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2190 if (ret)
2191 break;
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 cpuhp_offline_cpu_device(cpu);
2206 }
2207 if (!ret)
2208 cpu_smt_control = ctrlval;
2209 cpu_maps_update_done();
2210 return ret;
2211}
2212
2213int cpuhp_smt_enable(void)
2214{
2215 int cpu, ret = 0;
2216
2217 cpu_maps_update_begin();
2218 cpu_smt_control = CPU_SMT_ENABLED;
2219 for_each_present_cpu(cpu) {
2220
2221 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2222 continue;
2223 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2224 if (ret)
2225 break;
2226
2227 cpuhp_online_cpu_device(cpu);
2228 }
2229 cpu_maps_update_done();
2230 return ret;
2231}
2232#endif
2233
2234#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2235static ssize_t show_cpuhp_state(struct device *dev,
2236 struct device_attribute *attr, char *buf)
2237{
2238 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2239
2240 return sprintf(buf, "%d\n", st->state);
2241}
2242static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2243
2244static ssize_t write_cpuhp_target(struct device *dev,
2245 struct device_attribute *attr,
2246 const char *buf, size_t count)
2247{
2248 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2249 struct cpuhp_step *sp;
2250 int target, ret;
2251
2252 ret = kstrtoint(buf, 10, &target);
2253 if (ret)
2254 return ret;
2255
2256#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2257 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2258 return -EINVAL;
2259#else
2260 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2261 return -EINVAL;
2262#endif
2263
2264 ret = lock_device_hotplug_sysfs();
2265 if (ret)
2266 return ret;
2267
2268 mutex_lock(&cpuhp_state_mutex);
2269 sp = cpuhp_get_step(target);
2270 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2271 mutex_unlock(&cpuhp_state_mutex);
2272 if (ret)
2273 goto out;
2274
2275 if (st->state < target)
2276 ret = cpu_up(dev->id, target);
2277 else
2278 ret = cpu_down(dev->id, target);
2279out:
2280 unlock_device_hotplug();
2281 return ret ? ret : count;
2282}
2283
2284static ssize_t show_cpuhp_target(struct device *dev,
2285 struct device_attribute *attr, char *buf)
2286{
2287 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2288
2289 return sprintf(buf, "%d\n", st->target);
2290}
2291static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2292
2293
2294static ssize_t write_cpuhp_fail(struct device *dev,
2295 struct device_attribute *attr,
2296 const char *buf, size_t count)
2297{
2298 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2299 struct cpuhp_step *sp;
2300 int fail, ret;
2301
2302 ret = kstrtoint(buf, 10, &fail);
2303 if (ret)
2304 return ret;
2305
2306 if (fail == CPUHP_INVALID) {
2307 st->fail = fail;
2308 return count;
2309 }
2310
2311 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2312 return -EINVAL;
2313
2314
2315
2316
2317 if (cpuhp_is_atomic_state(fail))
2318 return -EINVAL;
2319
2320
2321
2322
2323
2324
2325
2326 if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2327 return -EINVAL;
2328
2329
2330
2331
2332 mutex_lock(&cpuhp_state_mutex);
2333 sp = cpuhp_get_step(fail);
2334 if (!sp->startup.single && !sp->teardown.single)
2335 ret = -EINVAL;
2336 mutex_unlock(&cpuhp_state_mutex);
2337 if (ret)
2338 return ret;
2339
2340 st->fail = fail;
2341
2342 return count;
2343}
2344
2345static ssize_t show_cpuhp_fail(struct device *dev,
2346 struct device_attribute *attr, char *buf)
2347{
2348 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2349
2350 return sprintf(buf, "%d\n", st->fail);
2351}
2352
2353static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2354
2355static struct attribute *cpuhp_cpu_attrs[] = {
2356 &dev_attr_state.attr,
2357 &dev_attr_target.attr,
2358 &dev_attr_fail.attr,
2359 NULL
2360};
2361
2362static const struct attribute_group cpuhp_cpu_attr_group = {
2363 .attrs = cpuhp_cpu_attrs,
2364 .name = "hotplug",
2365 NULL
2366};
2367
2368static ssize_t show_cpuhp_states(struct device *dev,
2369 struct device_attribute *attr, char *buf)
2370{
2371 ssize_t cur, res = 0;
2372 int i;
2373
2374 mutex_lock(&cpuhp_state_mutex);
2375 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2376 struct cpuhp_step *sp = cpuhp_get_step(i);
2377
2378 if (sp->name) {
2379 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2380 buf += cur;
2381 res += cur;
2382 }
2383 }
2384 mutex_unlock(&cpuhp_state_mutex);
2385 return res;
2386}
2387static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2388
2389static struct attribute *cpuhp_cpu_root_attrs[] = {
2390 &dev_attr_states.attr,
2391 NULL
2392};
2393
2394static const struct attribute_group cpuhp_cpu_root_attr_group = {
2395 .attrs = cpuhp_cpu_root_attrs,
2396 .name = "hotplug",
2397 NULL
2398};
2399
2400#ifdef CONFIG_HOTPLUG_SMT
2401
2402static ssize_t
2403__store_smt_control(struct device *dev, struct device_attribute *attr,
2404 const char *buf, size_t count)
2405{
2406 int ctrlval, ret;
2407
2408 if (sysfs_streq(buf, "on"))
2409 ctrlval = CPU_SMT_ENABLED;
2410 else if (sysfs_streq(buf, "off"))
2411 ctrlval = CPU_SMT_DISABLED;
2412 else if (sysfs_streq(buf, "forceoff"))
2413 ctrlval = CPU_SMT_FORCE_DISABLED;
2414 else
2415 return -EINVAL;
2416
2417 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2418 return -EPERM;
2419
2420 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2421 return -ENODEV;
2422
2423 ret = lock_device_hotplug_sysfs();
2424 if (ret)
2425 return ret;
2426
2427 if (ctrlval != cpu_smt_control) {
2428 switch (ctrlval) {
2429 case CPU_SMT_ENABLED:
2430 ret = cpuhp_smt_enable();
2431 break;
2432 case CPU_SMT_DISABLED:
2433 case CPU_SMT_FORCE_DISABLED:
2434 ret = cpuhp_smt_disable(ctrlval);
2435 break;
2436 }
2437 }
2438
2439 unlock_device_hotplug();
2440 return ret ? ret : count;
2441}
2442
2443#else
2444static ssize_t
2445__store_smt_control(struct device *dev, struct device_attribute *attr,
2446 const char *buf, size_t count)
2447{
2448 return -ENODEV;
2449}
2450#endif
2451
2452static const char *smt_states[] = {
2453 [CPU_SMT_ENABLED] = "on",
2454 [CPU_SMT_DISABLED] = "off",
2455 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2456 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2457 [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
2458};
2459
2460static ssize_t
2461show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2462{
2463 const char *state = smt_states[cpu_smt_control];
2464
2465 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2466}
2467
2468static ssize_t
2469store_smt_control(struct device *dev, struct device_attribute *attr,
2470 const char *buf, size_t count)
2471{
2472 return __store_smt_control(dev, attr, buf, count);
2473}
2474static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2475
2476static ssize_t
2477show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2478{
2479 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2480}
2481static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2482
2483static struct attribute *cpuhp_smt_attrs[] = {
2484 &dev_attr_control.attr,
2485 &dev_attr_active.attr,
2486 NULL
2487};
2488
2489static const struct attribute_group cpuhp_smt_attr_group = {
2490 .attrs = cpuhp_smt_attrs,
2491 .name = "smt",
2492 NULL
2493};
2494
2495static int __init cpu_smt_sysfs_init(void)
2496{
2497 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2498 &cpuhp_smt_attr_group);
2499}
2500
2501static int __init cpuhp_sysfs_init(void)
2502{
2503 int cpu, ret;
2504
2505 ret = cpu_smt_sysfs_init();
2506 if (ret)
2507 return ret;
2508
2509 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2510 &cpuhp_cpu_root_attr_group);
2511 if (ret)
2512 return ret;
2513
2514 for_each_possible_cpu(cpu) {
2515 struct device *dev = get_cpu_device(cpu);
2516
2517 if (!dev)
2518 continue;
2519 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2520 if (ret)
2521 return ret;
2522 }
2523 return 0;
2524}
2525device_initcall(cpuhp_sysfs_init);
2526#endif
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2538#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2539#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2540#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2541
2542const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2543
2544 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2545 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2546#if BITS_PER_LONG > 32
2547 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2548 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2549#endif
2550};
2551EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2552
2553const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2554EXPORT_SYMBOL(cpu_all_bits);
2555
2556#ifdef CONFIG_INIT_ALL_POSSIBLE
2557struct cpumask __cpu_possible_mask __read_mostly
2558 = {CPU_BITS_ALL};
2559#else
2560struct cpumask __cpu_possible_mask __read_mostly;
2561#endif
2562EXPORT_SYMBOL(__cpu_possible_mask);
2563
2564struct cpumask __cpu_online_mask __read_mostly;
2565EXPORT_SYMBOL(__cpu_online_mask);
2566
2567struct cpumask __cpu_present_mask __read_mostly;
2568EXPORT_SYMBOL(__cpu_present_mask);
2569
2570struct cpumask __cpu_active_mask __read_mostly;
2571EXPORT_SYMBOL(__cpu_active_mask);
2572
2573struct cpumask __cpu_dying_mask __read_mostly;
2574EXPORT_SYMBOL(__cpu_dying_mask);
2575
2576atomic_t __num_online_cpus __read_mostly;
2577EXPORT_SYMBOL(__num_online_cpus);
2578
2579void init_cpu_present(const struct cpumask *src)
2580{
2581 cpumask_copy(&__cpu_present_mask, src);
2582}
2583
2584void init_cpu_possible(const struct cpumask *src)
2585{
2586 cpumask_copy(&__cpu_possible_mask, src);
2587}
2588
2589void init_cpu_online(const struct cpumask *src)
2590{
2591 cpumask_copy(&__cpu_online_mask, src);
2592}
2593
2594void set_cpu_online(unsigned int cpu, bool online)
2595{
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606 if (online) {
2607 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2608 atomic_inc(&__num_online_cpus);
2609 } else {
2610 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2611 atomic_dec(&__num_online_cpus);
2612 }
2613}
2614
2615
2616
2617
2618void __init boot_cpu_init(void)
2619{
2620 int cpu = smp_processor_id();
2621
2622
2623 set_cpu_online(cpu, true);
2624 set_cpu_active(cpu, true);
2625 set_cpu_present(cpu, true);
2626 set_cpu_possible(cpu, true);
2627
2628#ifdef CONFIG_SMP
2629 __boot_cpu_id = cpu;
2630#endif
2631}
2632
2633
2634
2635
2636void __init boot_cpu_hotplug_init(void)
2637{
2638#ifdef CONFIG_SMP
2639 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2640#endif
2641 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2642}
2643
2644
2645
2646
2647
2648enum cpu_mitigations {
2649 CPU_MITIGATIONS_OFF,
2650 CPU_MITIGATIONS_AUTO,
2651 CPU_MITIGATIONS_AUTO_NOSMT,
2652};
2653
2654static enum cpu_mitigations cpu_mitigations __ro_after_init =
2655 CPU_MITIGATIONS_AUTO;
2656
2657static int __init mitigations_parse_cmdline(char *arg)
2658{
2659 if (!strcmp(arg, "off"))
2660 cpu_mitigations = CPU_MITIGATIONS_OFF;
2661 else if (!strcmp(arg, "auto"))
2662 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2663 else if (!strcmp(arg, "auto,nosmt"))
2664 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2665 else
2666 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2667 arg);
2668
2669 return 0;
2670}
2671early_param("mitigations", mitigations_parse_cmdline);
2672
2673
2674bool cpu_mitigations_off(void)
2675{
2676 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2677}
2678EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2679
2680
2681bool cpu_mitigations_auto_nosmt(void)
2682{
2683 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2684}
2685EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
2686