1
2
3
4
5
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched/signal.h>
11#include <linux/sched/hotplug.h>
12#include <linux/sched/isolation.h>
13#include <linux/sched/task.h>
14#include <linux/sched/smt.h>
15#include <linux/unistd.h>
16#include <linux/cpu.h>
17#include <linux/oom.h>
18#include <linux/rcupdate.h>
19#include <linux/export.h>
20#include <linux/bug.h>
21#include <linux/kthread.h>
22#include <linux/stop_machine.h>
23#include <linux/mutex.h>
24#include <linux/gfp.h>
25#include <linux/suspend.h>
26#include <linux/lockdep.h>
27#include <linux/tick.h>
28#include <linux/irq.h>
29#include <linux/nmi.h>
30#include <linux/smpboot.h>
31#include <linux/relay.h>
32#include <linux/slab.h>
33#include <linux/percpu-rwsem.h>
34
35#include <trace/events/power.h>
36#define CREATE_TRACE_POINTS
37#include <trace/events/cpuhp.h>
38
39#include "smpboot.h"
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55struct cpuhp_cpu_state {
56 enum cpuhp_state state;
57 enum cpuhp_state target;
58 enum cpuhp_state fail;
59#ifdef CONFIG_SMP
60 struct task_struct *thread;
61 bool should_run;
62 bool rollback;
63 bool single;
64 bool bringup;
65 struct hlist_node *node;
66 struct hlist_node *last;
67 enum cpuhp_state cb_state;
68 int result;
69 struct completion done_up;
70 struct completion done_down;
71#endif
72};
73
74static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
75 .fail = CPUHP_INVALID,
76};
77
78#ifdef CONFIG_SMP
79cpumask_t cpus_booted_once_mask;
80#endif
81
82#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
83static struct lockdep_map cpuhp_state_up_map =
84 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
85static struct lockdep_map cpuhp_state_down_map =
86 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
87
88
89static inline void cpuhp_lock_acquire(bool bringup)
90{
91 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
92}
93
94static inline void cpuhp_lock_release(bool bringup)
95{
96 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
97}
98#else
99
100static inline void cpuhp_lock_acquire(bool bringup) { }
101static inline void cpuhp_lock_release(bool bringup) { }
102
103#endif
104
105
106
107
108
109
110
111
112struct cpuhp_step {
113 const char *name;
114 union {
115 int (*single)(unsigned int cpu);
116 int (*multi)(unsigned int cpu,
117 struct hlist_node *node);
118 } startup;
119 union {
120 int (*single)(unsigned int cpu);
121 int (*multi)(unsigned int cpu,
122 struct hlist_node *node);
123 } teardown;
124 struct hlist_head list;
125 bool cant_stop;
126 bool multi_instance;
127};
128
129static DEFINE_MUTEX(cpuhp_state_mutex);
130static struct cpuhp_step cpuhp_hp_states[];
131
132static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
133{
134 return cpuhp_hp_states + state;
135}
136
137
138
139
140
141
142
143
144
145
146
147static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
148 bool bringup, struct hlist_node *node,
149 struct hlist_node **lastp)
150{
151 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
152 struct cpuhp_step *step = cpuhp_get_step(state);
153 int (*cbm)(unsigned int cpu, struct hlist_node *node);
154 int (*cb)(unsigned int cpu);
155 int ret, cnt;
156
157 if (st->fail == state) {
158 st->fail = CPUHP_INVALID;
159
160 if (!(bringup ? step->startup.single : step->teardown.single))
161 return 0;
162
163 return -EAGAIN;
164 }
165
166 if (!step->multi_instance) {
167 WARN_ON_ONCE(lastp && *lastp);
168 cb = bringup ? step->startup.single : step->teardown.single;
169 if (!cb)
170 return 0;
171 trace_cpuhp_enter(cpu, st->target, state, cb);
172 ret = cb(cpu);
173 trace_cpuhp_exit(cpu, st->state, state, ret);
174 return ret;
175 }
176 cbm = bringup ? step->startup.multi : step->teardown.multi;
177 if (!cbm)
178 return 0;
179
180
181 if (node) {
182 WARN_ON_ONCE(lastp && *lastp);
183 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
184 ret = cbm(cpu, node);
185 trace_cpuhp_exit(cpu, st->state, state, ret);
186 return ret;
187 }
188
189
190 cnt = 0;
191 hlist_for_each(node, &step->list) {
192 if (lastp && node == *lastp)
193 break;
194
195 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
196 ret = cbm(cpu, node);
197 trace_cpuhp_exit(cpu, st->state, state, ret);
198 if (ret) {
199 if (!lastp)
200 goto err;
201
202 *lastp = node;
203 return ret;
204 }
205 cnt++;
206 }
207 if (lastp)
208 *lastp = NULL;
209 return 0;
210err:
211
212 cbm = !bringup ? step->startup.multi : step->teardown.multi;
213 if (!cbm)
214 return ret;
215
216 hlist_for_each(node, &step->list) {
217 if (!cnt--)
218 break;
219
220 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
221 ret = cbm(cpu, node);
222 trace_cpuhp_exit(cpu, st->state, state, ret);
223
224
225
226 WARN_ON_ONCE(ret);
227 }
228 return ret;
229}
230
231#ifdef CONFIG_SMP
232static bool cpuhp_is_ap_state(enum cpuhp_state state)
233{
234
235
236
237
238 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
239}
240
241static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
242{
243 struct completion *done = bringup ? &st->done_up : &st->done_down;
244 wait_for_completion(done);
245}
246
247static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
248{
249 struct completion *done = bringup ? &st->done_up : &st->done_down;
250 complete(done);
251}
252
253
254
255
256static bool cpuhp_is_atomic_state(enum cpuhp_state state)
257{
258 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
259}
260
261
262static DEFINE_MUTEX(cpu_add_remove_lock);
263bool cpuhp_tasks_frozen;
264EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
265
266
267
268
269
270void cpu_maps_update_begin(void)
271{
272 mutex_lock(&cpu_add_remove_lock);
273}
274
275void cpu_maps_update_done(void)
276{
277 mutex_unlock(&cpu_add_remove_lock);
278}
279
280
281
282
283
284static int cpu_hotplug_disabled;
285
286#ifdef CONFIG_HOTPLUG_CPU
287
288DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
289
290void cpus_read_lock(void)
291{
292 percpu_down_read(&cpu_hotplug_lock);
293}
294EXPORT_SYMBOL_GPL(cpus_read_lock);
295
296int cpus_read_trylock(void)
297{
298 return percpu_down_read_trylock(&cpu_hotplug_lock);
299}
300EXPORT_SYMBOL_GPL(cpus_read_trylock);
301
302void cpus_read_unlock(void)
303{
304 percpu_up_read(&cpu_hotplug_lock);
305}
306EXPORT_SYMBOL_GPL(cpus_read_unlock);
307
308void cpus_write_lock(void)
309{
310 percpu_down_write(&cpu_hotplug_lock);
311}
312
313void cpus_write_unlock(void)
314{
315 percpu_up_write(&cpu_hotplug_lock);
316}
317
318void lockdep_assert_cpus_held(void)
319{
320
321
322
323
324
325
326 if (system_state < SYSTEM_RUNNING)
327 return;
328
329 percpu_rwsem_assert_held(&cpu_hotplug_lock);
330}
331
332static void lockdep_acquire_cpus_lock(void)
333{
334 rwsem_acquire(&cpu_hotplug_lock.rw_sem.dep_map, 0, 0, _THIS_IP_);
335}
336
337static void lockdep_release_cpus_lock(void)
338{
339 rwsem_release(&cpu_hotplug_lock.rw_sem.dep_map, 1, _THIS_IP_);
340}
341
342
343
344
345
346
347
348
349void cpu_hotplug_disable(void)
350{
351 cpu_maps_update_begin();
352 cpu_hotplug_disabled++;
353 cpu_maps_update_done();
354}
355EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
356
357static void __cpu_hotplug_enable(void)
358{
359 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
360 return;
361 cpu_hotplug_disabled--;
362}
363
364void cpu_hotplug_enable(void)
365{
366 cpu_maps_update_begin();
367 __cpu_hotplug_enable();
368 cpu_maps_update_done();
369}
370EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
371
372#else
373
374static void lockdep_acquire_cpus_lock(void)
375{
376}
377
378static void lockdep_release_cpus_lock(void)
379{
380}
381
382#endif
383
384
385
386
387
388void __weak arch_smt_update(void) { }
389
390#ifdef CONFIG_HOTPLUG_SMT
391enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
392
393void __init cpu_smt_disable(bool force)
394{
395 if (!cpu_smt_possible())
396 return;
397
398 if (force) {
399 pr_info("SMT: Force disabled\n");
400 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
401 } else {
402 pr_info("SMT: disabled\n");
403 cpu_smt_control = CPU_SMT_DISABLED;
404 }
405}
406
407
408
409
410
411void __init cpu_smt_check_topology(void)
412{
413 if (!topology_smt_supported())
414 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
415}
416
417static int __init smt_cmdline_disable(char *str)
418{
419 cpu_smt_disable(str && !strcmp(str, "force"));
420 return 0;
421}
422early_param("nosmt", smt_cmdline_disable);
423
424static inline bool cpu_smt_allowed(unsigned int cpu)
425{
426 if (cpu_smt_control == CPU_SMT_ENABLED)
427 return true;
428
429 if (topology_is_primary_thread(cpu))
430 return true;
431
432
433
434
435
436
437
438 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
439}
440
441
442bool cpu_smt_possible(void)
443{
444 return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
445 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
446}
447EXPORT_SYMBOL_GPL(cpu_smt_possible);
448#else
449static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
450#endif
451
452static inline enum cpuhp_state
453cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
454{
455 enum cpuhp_state prev_state = st->state;
456
457 st->rollback = false;
458 st->last = NULL;
459
460 st->target = target;
461 st->single = false;
462 st->bringup = st->state < target;
463
464 return prev_state;
465}
466
467static inline void
468cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
469{
470 st->rollback = true;
471
472
473
474
475
476 if (!st->last) {
477 if (st->bringup)
478 st->state--;
479 else
480 st->state++;
481 }
482
483 st->target = prev_state;
484 st->bringup = !st->bringup;
485}
486
487
488static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
489{
490 if (!st->single && st->state == st->target)
491 return;
492
493 st->result = 0;
494
495
496
497
498 smp_mb();
499 st->should_run = true;
500 wake_up_process(st->thread);
501 wait_for_ap_thread(st, st->bringup);
502}
503
504static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
505{
506 enum cpuhp_state prev_state;
507 int ret;
508
509 prev_state = cpuhp_set_state(st, target);
510 __cpuhp_kick_ap(st);
511 if ((ret = st->result)) {
512 cpuhp_reset_state(st, prev_state);
513 __cpuhp_kick_ap(st);
514 }
515
516 return ret;
517}
518
519static int bringup_wait_for_ap(unsigned int cpu)
520{
521 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
522
523
524 wait_for_ap_thread(st, true);
525 if (WARN_ON_ONCE((!cpu_online(cpu))))
526 return -ECANCELED;
527
528
529 stop_machine_unpark(cpu);
530 kthread_unpark(st->thread);
531
532
533
534
535
536
537
538
539 if (!cpu_smt_allowed(cpu))
540 return -ECANCELED;
541
542 if (st->target <= CPUHP_AP_ONLINE_IDLE)
543 return 0;
544
545 return cpuhp_kick_ap(st, st->target);
546}
547
548static int bringup_cpu(unsigned int cpu)
549{
550 struct task_struct *idle = idle_thread_get(cpu);
551 int ret;
552
553
554
555
556
557
558 irq_lock_sparse();
559
560
561 ret = __cpu_up(cpu, idle);
562 irq_unlock_sparse();
563 if (ret)
564 return ret;
565 return bringup_wait_for_ap(cpu);
566}
567
568
569
570
571
572static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
573{
574 for (st->state--; st->state > st->target; st->state--)
575 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
576}
577
578static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
579{
580 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
581 return true;
582
583
584
585
586
587
588
589 return st->state <= CPUHP_BRINGUP_CPU;
590}
591
592static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
593 enum cpuhp_state target)
594{
595 enum cpuhp_state prev_state = st->state;
596 int ret = 0;
597
598 while (st->state < target) {
599 st->state++;
600 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
601 if (ret) {
602 if (can_rollback_cpu(st)) {
603 st->target = prev_state;
604 undo_cpu_up(cpu, st);
605 }
606 break;
607 }
608 }
609 return ret;
610}
611
612
613
614
615static void cpuhp_create(unsigned int cpu)
616{
617 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
618
619 init_completion(&st->done_up);
620 init_completion(&st->done_down);
621}
622
623static int cpuhp_should_run(unsigned int cpu)
624{
625 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
626
627 return st->should_run;
628}
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644static void cpuhp_thread_fun(unsigned int cpu)
645{
646 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
647 bool bringup = st->bringup;
648 enum cpuhp_state state;
649
650 if (WARN_ON_ONCE(!st->should_run))
651 return;
652
653
654
655
656
657 smp_mb();
658
659
660
661
662
663
664 lockdep_acquire_cpus_lock();
665 cpuhp_lock_acquire(bringup);
666
667 if (st->single) {
668 state = st->cb_state;
669 st->should_run = false;
670 } else {
671 if (bringup) {
672 st->state++;
673 state = st->state;
674 st->should_run = (st->state < st->target);
675 WARN_ON_ONCE(st->state > st->target);
676 } else {
677 state = st->state;
678 st->state--;
679 st->should_run = (st->state > st->target);
680 WARN_ON_ONCE(st->state < st->target);
681 }
682 }
683
684 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
685
686 if (cpuhp_is_atomic_state(state)) {
687 local_irq_disable();
688 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
689 local_irq_enable();
690
691
692
693
694 WARN_ON_ONCE(st->result);
695 } else {
696 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
697 }
698
699 if (st->result) {
700
701
702
703
704
705 WARN_ON_ONCE(st->rollback);
706 st->should_run = false;
707 }
708
709 cpuhp_lock_release(bringup);
710 lockdep_release_cpus_lock();
711
712 if (!st->should_run)
713 complete_ap_thread(st, bringup);
714}
715
716
717static int
718cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
719 struct hlist_node *node)
720{
721 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
722 int ret;
723
724 if (!cpu_online(cpu))
725 return 0;
726
727 cpuhp_lock_acquire(false);
728 cpuhp_lock_release(false);
729
730 cpuhp_lock_acquire(true);
731 cpuhp_lock_release(true);
732
733
734
735
736
737 if (!st->thread)
738 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
739
740 st->rollback = false;
741 st->last = NULL;
742
743 st->node = node;
744 st->bringup = bringup;
745 st->cb_state = state;
746 st->single = true;
747
748 __cpuhp_kick_ap(st);
749
750
751
752
753 if ((ret = st->result) && st->last) {
754 st->rollback = true;
755 st->bringup = !bringup;
756
757 __cpuhp_kick_ap(st);
758 }
759
760
761
762
763
764 st->node = st->last = NULL;
765 return ret;
766}
767
768static int cpuhp_kick_ap_work(unsigned int cpu)
769{
770 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
771 enum cpuhp_state prev_state = st->state;
772 int ret;
773
774 cpuhp_lock_acquire(false);
775 cpuhp_lock_release(false);
776
777 cpuhp_lock_acquire(true);
778 cpuhp_lock_release(true);
779
780 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
781 ret = cpuhp_kick_ap(st, st->target);
782 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
783
784 return ret;
785}
786
787static struct smp_hotplug_thread cpuhp_threads = {
788 .store = &cpuhp_state.thread,
789 .create = &cpuhp_create,
790 .thread_should_run = cpuhp_should_run,
791 .thread_fn = cpuhp_thread_fun,
792 .thread_comm = "cpuhp/%u",
793 .selfparking = true,
794};
795
796void __init cpuhp_threads_init(void)
797{
798 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
799 kthread_unpark(this_cpu_read(cpuhp_state.thread));
800}
801
802#ifdef CONFIG_HOTPLUG_CPU
803
804
805
806
807
808
809
810
811
812
813
814
815void clear_tasks_mm_cpumask(int cpu)
816{
817 struct task_struct *p;
818
819
820
821
822
823
824
825
826 WARN_ON(cpu_online(cpu));
827 rcu_read_lock();
828 for_each_process(p) {
829 struct task_struct *t;
830
831
832
833
834
835 t = find_lock_task_mm(p);
836 if (!t)
837 continue;
838 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
839 task_unlock(t);
840 }
841 rcu_read_unlock();
842}
843
844
845static int take_cpu_down(void *_param)
846{
847 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
848 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
849 int err, cpu = smp_processor_id();
850 int ret;
851
852
853 err = __cpu_disable();
854 if (err < 0)
855 return err;
856
857
858
859
860
861 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
862 st->state--;
863
864 for (; st->state > target; st->state--) {
865 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
866
867
868
869 WARN_ON_ONCE(ret);
870 }
871
872
873 tick_handover_do_timer();
874
875 tick_offline_cpu(cpu);
876
877 stop_machine_park(cpu);
878 return 0;
879}
880
881static int takedown_cpu(unsigned int cpu)
882{
883 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
884 int err;
885
886
887 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
888
889
890
891
892
893 irq_lock_sparse();
894
895
896
897
898 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
899 if (err) {
900
901 irq_unlock_sparse();
902
903 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
904 return err;
905 }
906 BUG_ON(cpu_online(cpu));
907
908
909
910
911
912
913
914
915 wait_for_ap_thread(st, false);
916 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
917
918
919 irq_unlock_sparse();
920
921 hotplug_cpu__broadcast_tick_pull(cpu);
922
923 __cpu_die(cpu);
924
925 tick_cleanup_dead_cpu(cpu);
926 rcutree_migrate_callbacks(cpu);
927 return 0;
928}
929
930static void cpuhp_complete_idle_dead(void *arg)
931{
932 struct cpuhp_cpu_state *st = arg;
933
934 complete_ap_thread(st, false);
935}
936
937void cpuhp_report_idle_dead(void)
938{
939 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
940
941 BUG_ON(st->state != CPUHP_AP_OFFLINE);
942 rcu_report_dead(smp_processor_id());
943 st->state = CPUHP_AP_IDLE_DEAD;
944
945
946
947
948 smp_call_function_single(cpumask_first(cpu_online_mask),
949 cpuhp_complete_idle_dead, st, 0);
950}
951
952static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
953{
954 for (st->state++; st->state < st->target; st->state++)
955 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
956}
957
958static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
959 enum cpuhp_state target)
960{
961 enum cpuhp_state prev_state = st->state;
962 int ret = 0;
963
964 for (; st->state > target; st->state--) {
965 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
966 if (ret) {
967 st->target = prev_state;
968 if (st->state < prev_state)
969 undo_cpu_down(cpu, st);
970 break;
971 }
972 }
973 return ret;
974}
975
976
977static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
978 enum cpuhp_state target)
979{
980 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
981 int prev_state, ret = 0;
982
983 if (num_online_cpus() == 1)
984 return -EBUSY;
985
986 if (!cpu_present(cpu))
987 return -EINVAL;
988
989 cpus_write_lock();
990
991 cpuhp_tasks_frozen = tasks_frozen;
992
993 prev_state = cpuhp_set_state(st, target);
994
995
996
997
998 if (st->state > CPUHP_TEARDOWN_CPU) {
999 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1000 ret = cpuhp_kick_ap_work(cpu);
1001
1002
1003
1004
1005 if (ret)
1006 goto out;
1007
1008
1009
1010
1011
1012 if (st->state > CPUHP_TEARDOWN_CPU)
1013 goto out;
1014
1015 st->target = target;
1016 }
1017
1018
1019
1020
1021 ret = cpuhp_down_callbacks(cpu, st, target);
1022 if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1023 cpuhp_reset_state(st, prev_state);
1024 __cpuhp_kick_ap(st);
1025 }
1026
1027out:
1028 cpus_write_unlock();
1029
1030
1031
1032
1033 lockup_detector_cleanup();
1034 arch_smt_update();
1035 return ret;
1036}
1037
1038static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1039{
1040 if (cpu_hotplug_disabled)
1041 return -EBUSY;
1042 return _cpu_down(cpu, 0, target);
1043}
1044
1045static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1046{
1047 int err;
1048
1049 cpu_maps_update_begin();
1050 err = cpu_down_maps_locked(cpu, target);
1051 cpu_maps_update_done();
1052 return err;
1053}
1054
1055int cpu_down(unsigned int cpu)
1056{
1057 return do_cpu_down(cpu, CPUHP_OFFLINE);
1058}
1059EXPORT_SYMBOL(cpu_down);
1060
1061#else
1062#define takedown_cpu NULL
1063#endif
1064
1065
1066
1067
1068
1069
1070
1071
1072void notify_cpu_starting(unsigned int cpu)
1073{
1074 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1075 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1076 int ret;
1077
1078 rcu_cpu_starting(cpu);
1079 cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1080 while (st->state < target) {
1081 st->state++;
1082 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1083
1084
1085
1086 WARN_ON_ONCE(ret);
1087 }
1088}
1089
1090
1091
1092
1093
1094
1095void cpuhp_online_idle(enum cpuhp_state state)
1096{
1097 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1098
1099
1100 if (state != CPUHP_AP_ONLINE_IDLE)
1101 return;
1102
1103 st->state = CPUHP_AP_ONLINE_IDLE;
1104 complete_ap_thread(st, true);
1105}
1106
1107
1108static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1109{
1110 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1111 struct task_struct *idle;
1112 int ret = 0;
1113
1114 cpus_write_lock();
1115
1116 if (!cpu_present(cpu)) {
1117 ret = -EINVAL;
1118 goto out;
1119 }
1120
1121
1122
1123
1124
1125 if (st->state >= target)
1126 goto out;
1127
1128 if (st->state == CPUHP_OFFLINE) {
1129
1130 idle = idle_thread_get(cpu);
1131 if (IS_ERR(idle)) {
1132 ret = PTR_ERR(idle);
1133 goto out;
1134 }
1135 }
1136
1137 cpuhp_tasks_frozen = tasks_frozen;
1138
1139 cpuhp_set_state(st, target);
1140
1141
1142
1143
1144 if (st->state > CPUHP_BRINGUP_CPU) {
1145 ret = cpuhp_kick_ap_work(cpu);
1146
1147
1148
1149
1150 if (ret)
1151 goto out;
1152 }
1153
1154
1155
1156
1157
1158
1159 target = min((int)target, CPUHP_BRINGUP_CPU);
1160 ret = cpuhp_up_callbacks(cpu, st, target);
1161out:
1162 cpus_write_unlock();
1163 arch_smt_update();
1164 return ret;
1165}
1166
1167static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1168{
1169 int err = 0;
1170
1171 if (!cpu_possible(cpu)) {
1172 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1173 cpu);
1174#if defined(CONFIG_IA64)
1175 pr_err("please check additional_cpus= boot parameter\n");
1176#endif
1177 return -EINVAL;
1178 }
1179
1180 err = try_online_node(cpu_to_node(cpu));
1181 if (err)
1182 return err;
1183
1184 cpu_maps_update_begin();
1185
1186 if (cpu_hotplug_disabled) {
1187 err = -EBUSY;
1188 goto out;
1189 }
1190 if (!cpu_smt_allowed(cpu)) {
1191 err = -EPERM;
1192 goto out;
1193 }
1194
1195 err = _cpu_up(cpu, 0, target);
1196out:
1197 cpu_maps_update_done();
1198 return err;
1199}
1200
1201int cpu_up(unsigned int cpu)
1202{
1203 return do_cpu_up(cpu, CPUHP_ONLINE);
1204}
1205EXPORT_SYMBOL_GPL(cpu_up);
1206
1207#ifdef CONFIG_PM_SLEEP_SMP
1208static cpumask_var_t frozen_cpus;
1209
1210int freeze_secondary_cpus(int primary)
1211{
1212 int cpu, error = 0;
1213
1214 cpu_maps_update_begin();
1215 if (primary == -1) {
1216 primary = cpumask_first(cpu_online_mask);
1217 if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1218 primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1219 } else {
1220 if (!cpu_online(primary))
1221 primary = cpumask_first(cpu_online_mask);
1222 }
1223
1224
1225
1226
1227
1228 cpumask_clear(frozen_cpus);
1229
1230 pr_info("Disabling non-boot CPUs ...\n");
1231 for_each_online_cpu(cpu) {
1232 if (cpu == primary)
1233 continue;
1234
1235 if (pm_wakeup_pending()) {
1236 pr_info("Wakeup pending. Abort CPU freeze\n");
1237 error = -EBUSY;
1238 break;
1239 }
1240
1241 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1242 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1243 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1244 if (!error)
1245 cpumask_set_cpu(cpu, frozen_cpus);
1246 else {
1247 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1248 break;
1249 }
1250 }
1251
1252 if (!error)
1253 BUG_ON(num_online_cpus() > 1);
1254 else
1255 pr_err("Non-boot CPUs are not disabled\n");
1256
1257
1258
1259
1260
1261
1262 cpu_hotplug_disabled++;
1263
1264 cpu_maps_update_done();
1265 return error;
1266}
1267
1268void __weak arch_enable_nonboot_cpus_begin(void)
1269{
1270}
1271
1272void __weak arch_enable_nonboot_cpus_end(void)
1273{
1274}
1275
1276void enable_nonboot_cpus(void)
1277{
1278 int cpu, error;
1279
1280
1281 cpu_maps_update_begin();
1282 __cpu_hotplug_enable();
1283 if (cpumask_empty(frozen_cpus))
1284 goto out;
1285
1286 pr_info("Enabling non-boot CPUs ...\n");
1287
1288 arch_enable_nonboot_cpus_begin();
1289
1290 for_each_cpu(cpu, frozen_cpus) {
1291 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1292 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1293 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1294 if (!error) {
1295 pr_info("CPU%d is up\n", cpu);
1296 continue;
1297 }
1298 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1299 }
1300
1301 arch_enable_nonboot_cpus_end();
1302
1303 cpumask_clear(frozen_cpus);
1304out:
1305 cpu_maps_update_done();
1306}
1307
1308static int __init alloc_frozen_cpus(void)
1309{
1310 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1311 return -ENOMEM;
1312 return 0;
1313}
1314core_initcall(alloc_frozen_cpus);
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327static int
1328cpu_hotplug_pm_callback(struct notifier_block *nb,
1329 unsigned long action, void *ptr)
1330{
1331 switch (action) {
1332
1333 case PM_SUSPEND_PREPARE:
1334 case PM_HIBERNATION_PREPARE:
1335 cpu_hotplug_disable();
1336 break;
1337
1338 case PM_POST_SUSPEND:
1339 case PM_POST_HIBERNATION:
1340 cpu_hotplug_enable();
1341 break;
1342
1343 default:
1344 return NOTIFY_DONE;
1345 }
1346
1347 return NOTIFY_OK;
1348}
1349
1350
1351static int __init cpu_hotplug_pm_sync_init(void)
1352{
1353
1354
1355
1356
1357
1358 pm_notifier(cpu_hotplug_pm_callback, 0);
1359 return 0;
1360}
1361core_initcall(cpu_hotplug_pm_sync_init);
1362
1363#endif
1364
1365int __boot_cpu_id;
1366
1367#endif
1368
1369
1370static struct cpuhp_step cpuhp_hp_states[] = {
1371 [CPUHP_OFFLINE] = {
1372 .name = "offline",
1373 .startup.single = NULL,
1374 .teardown.single = NULL,
1375 },
1376#ifdef CONFIG_SMP
1377 [CPUHP_CREATE_THREADS]= {
1378 .name = "threads:prepare",
1379 .startup.single = smpboot_create_threads,
1380 .teardown.single = NULL,
1381 .cant_stop = true,
1382 },
1383 [CPUHP_PERF_PREPARE] = {
1384 .name = "perf:prepare",
1385 .startup.single = perf_event_init_cpu,
1386 .teardown.single = perf_event_exit_cpu,
1387 },
1388 [CPUHP_WORKQUEUE_PREP] = {
1389 .name = "workqueue:prepare",
1390 .startup.single = workqueue_prepare_cpu,
1391 .teardown.single = NULL,
1392 },
1393 [CPUHP_HRTIMERS_PREPARE] = {
1394 .name = "hrtimers:prepare",
1395 .startup.single = hrtimers_prepare_cpu,
1396 .teardown.single = hrtimers_dead_cpu,
1397 },
1398 [CPUHP_SMPCFD_PREPARE] = {
1399 .name = "smpcfd:prepare",
1400 .startup.single = smpcfd_prepare_cpu,
1401 .teardown.single = smpcfd_dead_cpu,
1402 },
1403 [CPUHP_RELAY_PREPARE] = {
1404 .name = "relay:prepare",
1405 .startup.single = relay_prepare_cpu,
1406 .teardown.single = NULL,
1407 },
1408 [CPUHP_SLAB_PREPARE] = {
1409 .name = "slab:prepare",
1410 .startup.single = slab_prepare_cpu,
1411 .teardown.single = slab_dead_cpu,
1412 },
1413 [CPUHP_RCUTREE_PREP] = {
1414 .name = "RCU/tree:prepare",
1415 .startup.single = rcutree_prepare_cpu,
1416 .teardown.single = rcutree_dead_cpu,
1417 },
1418
1419
1420
1421
1422
1423 [CPUHP_TIMERS_PREPARE] = {
1424 .name = "timers:prepare",
1425 .startup.single = timers_prepare_cpu,
1426 .teardown.single = timers_dead_cpu,
1427 },
1428
1429 [CPUHP_BRINGUP_CPU] = {
1430 .name = "cpu:bringup",
1431 .startup.single = bringup_cpu,
1432 .teardown.single = NULL,
1433 .cant_stop = true,
1434 },
1435
1436 [CPUHP_AP_IDLE_DEAD] = {
1437 .name = "idle:dead",
1438 },
1439
1440
1441
1442
1443 [CPUHP_AP_OFFLINE] = {
1444 .name = "ap:offline",
1445 .cant_stop = true,
1446 },
1447
1448 [CPUHP_AP_SCHED_STARTING] = {
1449 .name = "sched:starting",
1450 .startup.single = sched_cpu_starting,
1451 .teardown.single = sched_cpu_dying,
1452 },
1453 [CPUHP_AP_RCUTREE_DYING] = {
1454 .name = "RCU/tree:dying",
1455 .startup.single = NULL,
1456 .teardown.single = rcutree_dying_cpu,
1457 },
1458 [CPUHP_AP_SMPCFD_DYING] = {
1459 .name = "smpcfd:dying",
1460 .startup.single = NULL,
1461 .teardown.single = smpcfd_dying_cpu,
1462 },
1463
1464
1465 [CPUHP_AP_ONLINE] = {
1466 .name = "ap:online",
1467 },
1468
1469
1470
1471
1472 [CPUHP_TEARDOWN_CPU] = {
1473 .name = "cpu:teardown",
1474 .startup.single = NULL,
1475 .teardown.single = takedown_cpu,
1476 .cant_stop = true,
1477 },
1478
1479 [CPUHP_AP_SMPBOOT_THREADS] = {
1480 .name = "smpboot/threads:online",
1481 .startup.single = smpboot_unpark_threads,
1482 .teardown.single = smpboot_park_threads,
1483 },
1484 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1485 .name = "irq/affinity:online",
1486 .startup.single = irq_affinity_online_cpu,
1487 .teardown.single = NULL,
1488 },
1489 [CPUHP_AP_PERF_ONLINE] = {
1490 .name = "perf:online",
1491 .startup.single = perf_event_init_cpu,
1492 .teardown.single = perf_event_exit_cpu,
1493 },
1494 [CPUHP_AP_WATCHDOG_ONLINE] = {
1495 .name = "lockup_detector:online",
1496 .startup.single = lockup_detector_online_cpu,
1497 .teardown.single = lockup_detector_offline_cpu,
1498 },
1499 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1500 .name = "workqueue:online",
1501 .startup.single = workqueue_online_cpu,
1502 .teardown.single = workqueue_offline_cpu,
1503 },
1504 [CPUHP_AP_RCUTREE_ONLINE] = {
1505 .name = "RCU/tree:online",
1506 .startup.single = rcutree_online_cpu,
1507 .teardown.single = rcutree_offline_cpu,
1508 },
1509#endif
1510
1511
1512
1513
1514#ifdef CONFIG_SMP
1515
1516 [CPUHP_AP_ACTIVE] = {
1517 .name = "sched:active",
1518 .startup.single = sched_cpu_activate,
1519 .teardown.single = sched_cpu_deactivate,
1520 },
1521#endif
1522
1523
1524 [CPUHP_ONLINE] = {
1525 .name = "online",
1526 .startup.single = NULL,
1527 .teardown.single = NULL,
1528 },
1529};
1530
1531
1532static int cpuhp_cb_check(enum cpuhp_state state)
1533{
1534 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1535 return -EINVAL;
1536 return 0;
1537}
1538
1539
1540
1541
1542
1543
1544static int cpuhp_reserve_state(enum cpuhp_state state)
1545{
1546 enum cpuhp_state i, end;
1547 struct cpuhp_step *step;
1548
1549 switch (state) {
1550 case CPUHP_AP_ONLINE_DYN:
1551 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1552 end = CPUHP_AP_ONLINE_DYN_END;
1553 break;
1554 case CPUHP_BP_PREPARE_DYN:
1555 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1556 end = CPUHP_BP_PREPARE_DYN_END;
1557 break;
1558 default:
1559 return -EINVAL;
1560 }
1561
1562 for (i = state; i <= end; i++, step++) {
1563 if (!step->name)
1564 return i;
1565 }
1566 WARN(1, "No more dynamic states available for CPU hotplug\n");
1567 return -ENOSPC;
1568}
1569
1570static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1571 int (*startup)(unsigned int cpu),
1572 int (*teardown)(unsigned int cpu),
1573 bool multi_instance)
1574{
1575
1576 struct cpuhp_step *sp;
1577 int ret = 0;
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1589 state == CPUHP_BP_PREPARE_DYN)) {
1590 ret = cpuhp_reserve_state(state);
1591 if (ret < 0)
1592 return ret;
1593 state = ret;
1594 }
1595 sp = cpuhp_get_step(state);
1596 if (name && sp->name)
1597 return -EBUSY;
1598
1599 sp->startup.single = startup;
1600 sp->teardown.single = teardown;
1601 sp->name = name;
1602 sp->multi_instance = multi_instance;
1603 INIT_HLIST_HEAD(&sp->list);
1604 return ret;
1605}
1606
1607static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1608{
1609 return cpuhp_get_step(state)->teardown.single;
1610}
1611
1612
1613
1614
1615
1616static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1617 struct hlist_node *node)
1618{
1619 struct cpuhp_step *sp = cpuhp_get_step(state);
1620 int ret;
1621
1622
1623
1624
1625
1626 if ((bringup && !sp->startup.single) ||
1627 (!bringup && !sp->teardown.single))
1628 return 0;
1629
1630
1631
1632
1633#ifdef CONFIG_SMP
1634 if (cpuhp_is_ap_state(state))
1635 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1636 else
1637 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1638#else
1639 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1640#endif
1641 BUG_ON(ret && !bringup);
1642 return ret;
1643}
1644
1645
1646
1647
1648
1649
1650static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1651 struct hlist_node *node)
1652{
1653 int cpu;
1654
1655
1656 for_each_present_cpu(cpu) {
1657 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1658 int cpustate = st->state;
1659
1660 if (cpu >= failedcpu)
1661 break;
1662
1663
1664 if (cpustate >= state)
1665 cpuhp_issue_call(cpu, state, false, node);
1666 }
1667}
1668
1669int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1670 struct hlist_node *node,
1671 bool invoke)
1672{
1673 struct cpuhp_step *sp;
1674 int cpu;
1675 int ret;
1676
1677 lockdep_assert_cpus_held();
1678
1679 sp = cpuhp_get_step(state);
1680 if (sp->multi_instance == false)
1681 return -EINVAL;
1682
1683 mutex_lock(&cpuhp_state_mutex);
1684
1685 if (!invoke || !sp->startup.multi)
1686 goto add_node;
1687
1688
1689
1690
1691
1692 for_each_present_cpu(cpu) {
1693 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1694 int cpustate = st->state;
1695
1696 if (cpustate < state)
1697 continue;
1698
1699 ret = cpuhp_issue_call(cpu, state, true, node);
1700 if (ret) {
1701 if (sp->teardown.multi)
1702 cpuhp_rollback_install(cpu, state, node);
1703 goto unlock;
1704 }
1705 }
1706add_node:
1707 ret = 0;
1708 hlist_add_head(node, &sp->list);
1709unlock:
1710 mutex_unlock(&cpuhp_state_mutex);
1711 return ret;
1712}
1713
1714int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1715 bool invoke)
1716{
1717 int ret;
1718
1719 cpus_read_lock();
1720 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1721 cpus_read_unlock();
1722 return ret;
1723}
1724EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1744 const char *name, bool invoke,
1745 int (*startup)(unsigned int cpu),
1746 int (*teardown)(unsigned int cpu),
1747 bool multi_instance)
1748{
1749 int cpu, ret = 0;
1750 bool dynstate;
1751
1752 lockdep_assert_cpus_held();
1753
1754 if (cpuhp_cb_check(state) || !name)
1755 return -EINVAL;
1756
1757 mutex_lock(&cpuhp_state_mutex);
1758
1759 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1760 multi_instance);
1761
1762 dynstate = state == CPUHP_AP_ONLINE_DYN;
1763 if (ret > 0 && dynstate) {
1764 state = ret;
1765 ret = 0;
1766 }
1767
1768 if (ret || !invoke || !startup)
1769 goto out;
1770
1771
1772
1773
1774
1775 for_each_present_cpu(cpu) {
1776 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1777 int cpustate = st->state;
1778
1779 if (cpustate < state)
1780 continue;
1781
1782 ret = cpuhp_issue_call(cpu, state, true, NULL);
1783 if (ret) {
1784 if (teardown)
1785 cpuhp_rollback_install(cpu, state, NULL);
1786 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1787 goto out;
1788 }
1789 }
1790out:
1791 mutex_unlock(&cpuhp_state_mutex);
1792
1793
1794
1795
1796 if (!ret && dynstate)
1797 return state;
1798 return ret;
1799}
1800EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1801
1802int __cpuhp_setup_state(enum cpuhp_state state,
1803 const char *name, bool invoke,
1804 int (*startup)(unsigned int cpu),
1805 int (*teardown)(unsigned int cpu),
1806 bool multi_instance)
1807{
1808 int ret;
1809
1810 cpus_read_lock();
1811 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1812 teardown, multi_instance);
1813 cpus_read_unlock();
1814 return ret;
1815}
1816EXPORT_SYMBOL(__cpuhp_setup_state);
1817
1818int __cpuhp_state_remove_instance(enum cpuhp_state state,
1819 struct hlist_node *node, bool invoke)
1820{
1821 struct cpuhp_step *sp = cpuhp_get_step(state);
1822 int cpu;
1823
1824 BUG_ON(cpuhp_cb_check(state));
1825
1826 if (!sp->multi_instance)
1827 return -EINVAL;
1828
1829 cpus_read_lock();
1830 mutex_lock(&cpuhp_state_mutex);
1831
1832 if (!invoke || !cpuhp_get_teardown_cb(state))
1833 goto remove;
1834
1835
1836
1837
1838
1839 for_each_present_cpu(cpu) {
1840 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1841 int cpustate = st->state;
1842
1843 if (cpustate >= state)
1844 cpuhp_issue_call(cpu, state, false, node);
1845 }
1846
1847remove:
1848 hlist_del(node);
1849 mutex_unlock(&cpuhp_state_mutex);
1850 cpus_read_unlock();
1851
1852 return 0;
1853}
1854EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
1867{
1868 struct cpuhp_step *sp = cpuhp_get_step(state);
1869 int cpu;
1870
1871 BUG_ON(cpuhp_cb_check(state));
1872
1873 lockdep_assert_cpus_held();
1874
1875 mutex_lock(&cpuhp_state_mutex);
1876 if (sp->multi_instance) {
1877 WARN(!hlist_empty(&sp->list),
1878 "Error: Removing state %d which has instances left.\n",
1879 state);
1880 goto remove;
1881 }
1882
1883 if (!invoke || !cpuhp_get_teardown_cb(state))
1884 goto remove;
1885
1886
1887
1888
1889
1890
1891 for_each_present_cpu(cpu) {
1892 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1893 int cpustate = st->state;
1894
1895 if (cpustate >= state)
1896 cpuhp_issue_call(cpu, state, false, NULL);
1897 }
1898remove:
1899 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1900 mutex_unlock(&cpuhp_state_mutex);
1901}
1902EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1903
1904void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1905{
1906 cpus_read_lock();
1907 __cpuhp_remove_state_cpuslocked(state, invoke);
1908 cpus_read_unlock();
1909}
1910EXPORT_SYMBOL(__cpuhp_remove_state);
1911
1912#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1913static ssize_t show_cpuhp_state(struct device *dev,
1914 struct device_attribute *attr, char *buf)
1915{
1916 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1917
1918 return sprintf(buf, "%d\n", st->state);
1919}
1920static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1921
1922static ssize_t write_cpuhp_target(struct device *dev,
1923 struct device_attribute *attr,
1924 const char *buf, size_t count)
1925{
1926 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1927 struct cpuhp_step *sp;
1928 int target, ret;
1929
1930 ret = kstrtoint(buf, 10, &target);
1931 if (ret)
1932 return ret;
1933
1934#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1935 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1936 return -EINVAL;
1937#else
1938 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1939 return -EINVAL;
1940#endif
1941
1942 ret = lock_device_hotplug_sysfs();
1943 if (ret)
1944 return ret;
1945
1946 mutex_lock(&cpuhp_state_mutex);
1947 sp = cpuhp_get_step(target);
1948 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1949 mutex_unlock(&cpuhp_state_mutex);
1950 if (ret)
1951 goto out;
1952
1953 if (st->state < target)
1954 ret = do_cpu_up(dev->id, target);
1955 else
1956 ret = do_cpu_down(dev->id, target);
1957out:
1958 unlock_device_hotplug();
1959 return ret ? ret : count;
1960}
1961
1962static ssize_t show_cpuhp_target(struct device *dev,
1963 struct device_attribute *attr, char *buf)
1964{
1965 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1966
1967 return sprintf(buf, "%d\n", st->target);
1968}
1969static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1970
1971
1972static ssize_t write_cpuhp_fail(struct device *dev,
1973 struct device_attribute *attr,
1974 const char *buf, size_t count)
1975{
1976 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1977 struct cpuhp_step *sp;
1978 int fail, ret;
1979
1980 ret = kstrtoint(buf, 10, &fail);
1981 if (ret)
1982 return ret;
1983
1984 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
1985 return -EINVAL;
1986
1987
1988
1989
1990 if (cpuhp_is_atomic_state(fail))
1991 return -EINVAL;
1992
1993
1994
1995
1996 mutex_lock(&cpuhp_state_mutex);
1997 sp = cpuhp_get_step(fail);
1998 if (!sp->startup.single && !sp->teardown.single)
1999 ret = -EINVAL;
2000 mutex_unlock(&cpuhp_state_mutex);
2001 if (ret)
2002 return ret;
2003
2004 st->fail = fail;
2005
2006 return count;
2007}
2008
2009static ssize_t show_cpuhp_fail(struct device *dev,
2010 struct device_attribute *attr, char *buf)
2011{
2012 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2013
2014 return sprintf(buf, "%d\n", st->fail);
2015}
2016
2017static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2018
2019static struct attribute *cpuhp_cpu_attrs[] = {
2020 &dev_attr_state.attr,
2021 &dev_attr_target.attr,
2022 &dev_attr_fail.attr,
2023 NULL
2024};
2025
2026static const struct attribute_group cpuhp_cpu_attr_group = {
2027 .attrs = cpuhp_cpu_attrs,
2028 .name = "hotplug",
2029 NULL
2030};
2031
2032static ssize_t show_cpuhp_states(struct device *dev,
2033 struct device_attribute *attr, char *buf)
2034{
2035 ssize_t cur, res = 0;
2036 int i;
2037
2038 mutex_lock(&cpuhp_state_mutex);
2039 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2040 struct cpuhp_step *sp = cpuhp_get_step(i);
2041
2042 if (sp->name) {
2043 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2044 buf += cur;
2045 res += cur;
2046 }
2047 }
2048 mutex_unlock(&cpuhp_state_mutex);
2049 return res;
2050}
2051static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2052
2053static struct attribute *cpuhp_cpu_root_attrs[] = {
2054 &dev_attr_states.attr,
2055 NULL
2056};
2057
2058static const struct attribute_group cpuhp_cpu_root_attr_group = {
2059 .attrs = cpuhp_cpu_root_attrs,
2060 .name = "hotplug",
2061 NULL
2062};
2063
2064#ifdef CONFIG_HOTPLUG_SMT
2065
2066static void cpuhp_offline_cpu_device(unsigned int cpu)
2067{
2068 struct device *dev = get_cpu_device(cpu);
2069
2070 dev->offline = true;
2071
2072 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2073}
2074
2075static void cpuhp_online_cpu_device(unsigned int cpu)
2076{
2077 struct device *dev = get_cpu_device(cpu);
2078
2079 dev->offline = false;
2080
2081 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2082}
2083
2084int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2085{
2086 int cpu, ret = 0;
2087
2088 cpu_maps_update_begin();
2089 for_each_online_cpu(cpu) {
2090 if (topology_is_primary_thread(cpu))
2091 continue;
2092 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2093 if (ret)
2094 break;
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108 cpuhp_offline_cpu_device(cpu);
2109 }
2110 if (!ret)
2111 cpu_smt_control = ctrlval;
2112 cpu_maps_update_done();
2113 return ret;
2114}
2115
2116int cpuhp_smt_enable(void)
2117{
2118 int cpu, ret = 0;
2119
2120 cpu_maps_update_begin();
2121 cpu_smt_control = CPU_SMT_ENABLED;
2122 for_each_present_cpu(cpu) {
2123
2124 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2125 continue;
2126 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2127 if (ret)
2128 break;
2129
2130 cpuhp_online_cpu_device(cpu);
2131 }
2132 cpu_maps_update_done();
2133 return ret;
2134}
2135
2136
2137static ssize_t
2138__store_smt_control(struct device *dev, struct device_attribute *attr,
2139 const char *buf, size_t count)
2140{
2141 int ctrlval, ret;
2142
2143 if (sysfs_streq(buf, "on"))
2144 ctrlval = CPU_SMT_ENABLED;
2145 else if (sysfs_streq(buf, "off"))
2146 ctrlval = CPU_SMT_DISABLED;
2147 else if (sysfs_streq(buf, "forceoff"))
2148 ctrlval = CPU_SMT_FORCE_DISABLED;
2149 else
2150 return -EINVAL;
2151
2152 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2153 return -EPERM;
2154
2155 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2156 return -ENODEV;
2157
2158 ret = lock_device_hotplug_sysfs();
2159 if (ret)
2160 return ret;
2161
2162 if (ctrlval != cpu_smt_control) {
2163 switch (ctrlval) {
2164 case CPU_SMT_ENABLED:
2165 ret = cpuhp_smt_enable();
2166 break;
2167 case CPU_SMT_DISABLED:
2168 case CPU_SMT_FORCE_DISABLED:
2169 ret = cpuhp_smt_disable(ctrlval);
2170 break;
2171 }
2172 }
2173
2174 unlock_device_hotplug();
2175 return ret ? ret : count;
2176}
2177
2178#else
2179static ssize_t
2180__store_smt_control(struct device *dev, struct device_attribute *attr,
2181 const char *buf, size_t count)
2182{
2183 return -ENODEV;
2184}
2185#endif
2186
2187static const char *smt_states[] = {
2188 [CPU_SMT_ENABLED] = "on",
2189 [CPU_SMT_DISABLED] = "off",
2190 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2191 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2192 [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
2193};
2194
2195static ssize_t
2196show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2197{
2198 const char *state = smt_states[cpu_smt_control];
2199
2200 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2201}
2202
2203static ssize_t
2204store_smt_control(struct device *dev, struct device_attribute *attr,
2205 const char *buf, size_t count)
2206{
2207 return __store_smt_control(dev, attr, buf, count);
2208}
2209static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2210
2211static ssize_t
2212show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2213{
2214 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2215}
2216static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2217
2218static struct attribute *cpuhp_smt_attrs[] = {
2219 &dev_attr_control.attr,
2220 &dev_attr_active.attr,
2221 NULL
2222};
2223
2224static const struct attribute_group cpuhp_smt_attr_group = {
2225 .attrs = cpuhp_smt_attrs,
2226 .name = "smt",
2227 NULL
2228};
2229
2230static int __init cpu_smt_sysfs_init(void)
2231{
2232 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2233 &cpuhp_smt_attr_group);
2234}
2235
2236static int __init cpuhp_sysfs_init(void)
2237{
2238 int cpu, ret;
2239
2240 ret = cpu_smt_sysfs_init();
2241 if (ret)
2242 return ret;
2243
2244 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2245 &cpuhp_cpu_root_attr_group);
2246 if (ret)
2247 return ret;
2248
2249 for_each_possible_cpu(cpu) {
2250 struct device *dev = get_cpu_device(cpu);
2251
2252 if (!dev)
2253 continue;
2254 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2255 if (ret)
2256 return ret;
2257 }
2258 return 0;
2259}
2260device_initcall(cpuhp_sysfs_init);
2261#endif
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2273#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2274#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2275#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2276
2277const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2278
2279 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2280 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2281#if BITS_PER_LONG > 32
2282 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2283 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2284#endif
2285};
2286EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2287
2288const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2289EXPORT_SYMBOL(cpu_all_bits);
2290
2291#ifdef CONFIG_INIT_ALL_POSSIBLE
2292struct cpumask __cpu_possible_mask __read_mostly
2293 = {CPU_BITS_ALL};
2294#else
2295struct cpumask __cpu_possible_mask __read_mostly;
2296#endif
2297EXPORT_SYMBOL(__cpu_possible_mask);
2298
2299struct cpumask __cpu_online_mask __read_mostly;
2300EXPORT_SYMBOL(__cpu_online_mask);
2301
2302struct cpumask __cpu_present_mask __read_mostly;
2303EXPORT_SYMBOL(__cpu_present_mask);
2304
2305struct cpumask __cpu_active_mask __read_mostly;
2306EXPORT_SYMBOL(__cpu_active_mask);
2307
2308atomic_t __num_online_cpus __read_mostly;
2309EXPORT_SYMBOL(__num_online_cpus);
2310
2311void init_cpu_present(const struct cpumask *src)
2312{
2313 cpumask_copy(&__cpu_present_mask, src);
2314}
2315
2316void init_cpu_possible(const struct cpumask *src)
2317{
2318 cpumask_copy(&__cpu_possible_mask, src);
2319}
2320
2321void init_cpu_online(const struct cpumask *src)
2322{
2323 cpumask_copy(&__cpu_online_mask, src);
2324}
2325
2326void set_cpu_online(unsigned int cpu, bool online)
2327{
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338 if (online) {
2339 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2340 atomic_inc(&__num_online_cpus);
2341 } else {
2342 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2343 atomic_dec(&__num_online_cpus);
2344 }
2345}
2346
2347
2348
2349
2350void __init boot_cpu_init(void)
2351{
2352 int cpu = smp_processor_id();
2353
2354
2355 set_cpu_online(cpu, true);
2356 set_cpu_active(cpu, true);
2357 set_cpu_present(cpu, true);
2358 set_cpu_possible(cpu, true);
2359
2360#ifdef CONFIG_SMP
2361 __boot_cpu_id = cpu;
2362#endif
2363}
2364
2365
2366
2367
2368void __init boot_cpu_hotplug_init(void)
2369{
2370#ifdef CONFIG_SMP
2371 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2372#endif
2373 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2374}
2375
2376
2377
2378
2379
2380enum cpu_mitigations {
2381 CPU_MITIGATIONS_OFF,
2382 CPU_MITIGATIONS_AUTO,
2383 CPU_MITIGATIONS_AUTO_NOSMT,
2384};
2385
2386static enum cpu_mitigations cpu_mitigations __ro_after_init =
2387 CPU_MITIGATIONS_AUTO;
2388
2389static int __init mitigations_parse_cmdline(char *arg)
2390{
2391 if (!strcmp(arg, "off"))
2392 cpu_mitigations = CPU_MITIGATIONS_OFF;
2393 else if (!strcmp(arg, "auto"))
2394 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2395 else if (!strcmp(arg, "auto,nosmt"))
2396 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2397 else
2398 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2399 arg);
2400
2401 return 0;
2402}
2403early_param("mitigations", mitigations_parse_cmdline);
2404
2405
2406bool cpu_mitigations_off(void)
2407{
2408 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2409}
2410EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2411
2412
2413bool cpu_mitigations_auto_nosmt(void)
2414{
2415 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2416}
2417EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
2418