linux/kernel/cpu.c
<<
>>
Prefs
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/sched/mm.h>
   7#include <linux/proc_fs.h>
   8#include <linux/smp.h>
   9#include <linux/init.h>
  10#include <linux/notifier.h>
  11#include <linux/sched/signal.h>
  12#include <linux/sched/hotplug.h>
  13#include <linux/sched/isolation.h>
  14#include <linux/sched/task.h>
  15#include <linux/sched/smt.h>
  16#include <linux/unistd.h>
  17#include <linux/cpu.h>
  18#include <linux/oom.h>
  19#include <linux/rcupdate.h>
  20#include <linux/export.h>
  21#include <linux/bug.h>
  22#include <linux/kthread.h>
  23#include <linux/stop_machine.h>
  24#include <linux/mutex.h>
  25#include <linux/gfp.h>
  26#include <linux/suspend.h>
  27#include <linux/lockdep.h>
  28#include <linux/tick.h>
  29#include <linux/irq.h>
  30#include <linux/nmi.h>
  31#include <linux/smpboot.h>
  32#include <linux/relay.h>
  33#include <linux/slab.h>
  34#include <linux/percpu-rwsem.h>
  35
  36#include <trace/events/power.h>
  37#define CREATE_TRACE_POINTS
  38#include <trace/events/cpuhp.h>
  39
  40#include "smpboot.h"
  41
  42/**
  43 * cpuhp_cpu_state - Per cpu hotplug state storage
  44 * @state:      The current cpu state
  45 * @target:     The target state
  46 * @thread:     Pointer to the hotplug thread
  47 * @should_run: Thread should execute
  48 * @rollback:   Perform a rollback
  49 * @single:     Single callback invocation
  50 * @bringup:    Single callback bringup or teardown selector
  51 * @cb_state:   The state for a single callback (install/uninstall)
  52 * @result:     Result of the operation
  53 * @done_up:    Signal completion to the issuer of the task for cpu-up
  54 * @done_down:  Signal completion to the issuer of the task for cpu-down
  55 */
  56struct cpuhp_cpu_state {
  57        enum cpuhp_state        state;
  58        enum cpuhp_state        target;
  59        enum cpuhp_state        fail;
  60#ifdef CONFIG_SMP
  61        struct task_struct      *thread;
  62        bool                    should_run;
  63        bool                    rollback;
  64        bool                    single;
  65        bool                    bringup;
  66        struct hlist_node       *node;
  67        struct hlist_node       *last;
  68        enum cpuhp_state        cb_state;
  69        int                     result;
  70        struct completion       done_up;
  71        struct completion       done_down;
  72#endif
  73};
  74
  75static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
  76        .fail = CPUHP_INVALID,
  77};
  78
  79#ifdef CONFIG_SMP
  80cpumask_t cpus_booted_once_mask;
  81#endif
  82
  83#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
  84static struct lockdep_map cpuhp_state_up_map =
  85        STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
  86static struct lockdep_map cpuhp_state_down_map =
  87        STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
  88
  89
  90static inline void cpuhp_lock_acquire(bool bringup)
  91{
  92        lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  93}
  94
  95static inline void cpuhp_lock_release(bool bringup)
  96{
  97        lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
  98}
  99#else
 100
 101static inline void cpuhp_lock_acquire(bool bringup) { }
 102static inline void cpuhp_lock_release(bool bringup) { }
 103
 104#endif
 105
 106/**
 107 * cpuhp_step - Hotplug state machine step
 108 * @name:       Name of the step
 109 * @startup:    Startup function of the step
 110 * @teardown:   Teardown function of the step
 111 * @cant_stop:  Bringup/teardown can't be stopped at this step
 112 */
 113struct cpuhp_step {
 114        const char              *name;
 115        union {
 116                int             (*single)(unsigned int cpu);
 117                int             (*multi)(unsigned int cpu,
 118                                         struct hlist_node *node);
 119        } startup;
 120        union {
 121                int             (*single)(unsigned int cpu);
 122                int             (*multi)(unsigned int cpu,
 123                                         struct hlist_node *node);
 124        } teardown;
 125        struct hlist_head       list;
 126        bool                    cant_stop;
 127        bool                    multi_instance;
 128};
 129
 130static DEFINE_MUTEX(cpuhp_state_mutex);
 131static struct cpuhp_step cpuhp_hp_states[];
 132
 133static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
 134{
 135        return cpuhp_hp_states + state;
 136}
 137
 138/**
 139 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
 140 * @cpu:        The cpu for which the callback should be invoked
 141 * @state:      The state to do callbacks for
 142 * @bringup:    True if the bringup callback should be invoked
 143 * @node:       For multi-instance, do a single entry callback for install/remove
 144 * @lastp:      For multi-instance rollback, remember how far we got
 145 *
 146 * Called from cpu hotplug and from the state register machinery.
 147 */
 148static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
 149                                 bool bringup, struct hlist_node *node,
 150                                 struct hlist_node **lastp)
 151{
 152        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 153        struct cpuhp_step *step = cpuhp_get_step(state);
 154        int (*cbm)(unsigned int cpu, struct hlist_node *node);
 155        int (*cb)(unsigned int cpu);
 156        int ret, cnt;
 157
 158        if (st->fail == state) {
 159                st->fail = CPUHP_INVALID;
 160
 161                if (!(bringup ? step->startup.single : step->teardown.single))
 162                        return 0;
 163
 164                return -EAGAIN;
 165        }
 166
 167        if (!step->multi_instance) {
 168                WARN_ON_ONCE(lastp && *lastp);
 169                cb = bringup ? step->startup.single : step->teardown.single;
 170                if (!cb)
 171                        return 0;
 172                trace_cpuhp_enter(cpu, st->target, state, cb);
 173                ret = cb(cpu);
 174                trace_cpuhp_exit(cpu, st->state, state, ret);
 175                return ret;
 176        }
 177        cbm = bringup ? step->startup.multi : step->teardown.multi;
 178        if (!cbm)
 179                return 0;
 180
 181        /* Single invocation for instance add/remove */
 182        if (node) {
 183                WARN_ON_ONCE(lastp && *lastp);
 184                trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 185                ret = cbm(cpu, node);
 186                trace_cpuhp_exit(cpu, st->state, state, ret);
 187                return ret;
 188        }
 189
 190        /* State transition. Invoke on all instances */
 191        cnt = 0;
 192        hlist_for_each(node, &step->list) {
 193                if (lastp && node == *lastp)
 194                        break;
 195
 196                trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 197                ret = cbm(cpu, node);
 198                trace_cpuhp_exit(cpu, st->state, state, ret);
 199                if (ret) {
 200                        if (!lastp)
 201                                goto err;
 202
 203                        *lastp = node;
 204                        return ret;
 205                }
 206                cnt++;
 207        }
 208        if (lastp)
 209                *lastp = NULL;
 210        return 0;
 211err:
 212        /* Rollback the instances if one failed */
 213        cbm = !bringup ? step->startup.multi : step->teardown.multi;
 214        if (!cbm)
 215                return ret;
 216
 217        hlist_for_each(node, &step->list) {
 218                if (!cnt--)
 219                        break;
 220
 221                trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
 222                ret = cbm(cpu, node);
 223                trace_cpuhp_exit(cpu, st->state, state, ret);
 224                /*
 225                 * Rollback must not fail,
 226                 */
 227                WARN_ON_ONCE(ret);
 228        }
 229        return ret;
 230}
 231
 232#ifdef CONFIG_SMP
 233static bool cpuhp_is_ap_state(enum cpuhp_state state)
 234{
 235        /*
 236         * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
 237         * purposes as that state is handled explicitly in cpu_down.
 238         */
 239        return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
 240}
 241
 242static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 243{
 244        struct completion *done = bringup ? &st->done_up : &st->done_down;
 245        wait_for_completion(done);
 246}
 247
 248static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
 249{
 250        struct completion *done = bringup ? &st->done_up : &st->done_down;
 251        complete(done);
 252}
 253
 254/*
 255 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
 256 */
 257static bool cpuhp_is_atomic_state(enum cpuhp_state state)
 258{
 259        return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
 260}
 261
 262/* Serializes the updates to cpu_online_mask, cpu_present_mask */
 263static DEFINE_MUTEX(cpu_add_remove_lock);
 264bool cpuhp_tasks_frozen;
 265EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
 266
 267/*
 268 * The following two APIs (cpu_maps_update_begin/done) must be used when
 269 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
 270 */
 271void cpu_maps_update_begin(void)
 272{
 273        mutex_lock(&cpu_add_remove_lock);
 274}
 275
 276void cpu_maps_update_done(void)
 277{
 278        mutex_unlock(&cpu_add_remove_lock);
 279}
 280
 281/*
 282 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
 283 * Should always be manipulated under cpu_add_remove_lock
 284 */
 285static int cpu_hotplug_disabled;
 286
 287#ifdef CONFIG_HOTPLUG_CPU
 288
 289DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
 290
 291void cpus_read_lock(void)
 292{
 293        percpu_down_read(&cpu_hotplug_lock);
 294}
 295EXPORT_SYMBOL_GPL(cpus_read_lock);
 296
 297int cpus_read_trylock(void)
 298{
 299        return percpu_down_read_trylock(&cpu_hotplug_lock);
 300}
 301EXPORT_SYMBOL_GPL(cpus_read_trylock);
 302
 303void cpus_read_unlock(void)
 304{
 305        percpu_up_read(&cpu_hotplug_lock);
 306}
 307EXPORT_SYMBOL_GPL(cpus_read_unlock);
 308
 309void cpus_write_lock(void)
 310{
 311        percpu_down_write(&cpu_hotplug_lock);
 312}
 313
 314void cpus_write_unlock(void)
 315{
 316        percpu_up_write(&cpu_hotplug_lock);
 317}
 318
 319void lockdep_assert_cpus_held(void)
 320{
 321        /*
 322         * We can't have hotplug operations before userspace starts running,
 323         * and some init codepaths will knowingly not take the hotplug lock.
 324         * This is all valid, so mute lockdep until it makes sense to report
 325         * unheld locks.
 326         */
 327        if (system_state < SYSTEM_RUNNING)
 328                return;
 329
 330        percpu_rwsem_assert_held(&cpu_hotplug_lock);
 331}
 332
 333static void lockdep_acquire_cpus_lock(void)
 334{
 335        rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
 336}
 337
 338static void lockdep_release_cpus_lock(void)
 339{
 340        rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
 341}
 342
 343/*
 344 * Wait for currently running CPU hotplug operations to complete (if any) and
 345 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 346 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 347 * hotplug path before performing hotplug operations. So acquiring that lock
 348 * guarantees mutual exclusion from any currently running hotplug operations.
 349 */
 350void cpu_hotplug_disable(void)
 351{
 352        cpu_maps_update_begin();
 353        cpu_hotplug_disabled++;
 354        cpu_maps_update_done();
 355}
 356EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 357
 358static void __cpu_hotplug_enable(void)
 359{
 360        if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
 361                return;
 362        cpu_hotplug_disabled--;
 363}
 364
 365void cpu_hotplug_enable(void)
 366{
 367        cpu_maps_update_begin();
 368        __cpu_hotplug_enable();
 369        cpu_maps_update_done();
 370}
 371EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 372
 373#else
 374
 375static void lockdep_acquire_cpus_lock(void)
 376{
 377}
 378
 379static void lockdep_release_cpus_lock(void)
 380{
 381}
 382
 383#endif  /* CONFIG_HOTPLUG_CPU */
 384
 385/*
 386 * Architectures that need SMT-specific errata handling during SMT hotplug
 387 * should override this.
 388 */
 389void __weak arch_smt_update(void) { }
 390
 391#ifdef CONFIG_HOTPLUG_SMT
 392enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 393
 394void __init cpu_smt_disable(bool force)
 395{
 396        if (!cpu_smt_possible())
 397                return;
 398
 399        if (force) {
 400                pr_info("SMT: Force disabled\n");
 401                cpu_smt_control = CPU_SMT_FORCE_DISABLED;
 402        } else {
 403                pr_info("SMT: disabled\n");
 404                cpu_smt_control = CPU_SMT_DISABLED;
 405        }
 406}
 407
 408/*
 409 * The decision whether SMT is supported can only be done after the full
 410 * CPU identification. Called from architecture code.
 411 */
 412void __init cpu_smt_check_topology(void)
 413{
 414        if (!topology_smt_supported())
 415                cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
 416}
 417
 418static int __init smt_cmdline_disable(char *str)
 419{
 420        cpu_smt_disable(str && !strcmp(str, "force"));
 421        return 0;
 422}
 423early_param("nosmt", smt_cmdline_disable);
 424
 425static inline bool cpu_smt_allowed(unsigned int cpu)
 426{
 427        if (cpu_smt_control == CPU_SMT_ENABLED)
 428                return true;
 429
 430        if (topology_is_primary_thread(cpu))
 431                return true;
 432
 433        /*
 434         * On x86 it's required to boot all logical CPUs at least once so
 435         * that the init code can get a chance to set CR4.MCE on each
 436         * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
 437         * core will shutdown the machine.
 438         */
 439        return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
 440}
 441
 442/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
 443bool cpu_smt_possible(void)
 444{
 445        return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
 446                cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
 447}
 448EXPORT_SYMBOL_GPL(cpu_smt_possible);
 449#else
 450static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
 451#endif
 452
 453static inline enum cpuhp_state
 454cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
 455{
 456        enum cpuhp_state prev_state = st->state;
 457
 458        st->rollback = false;
 459        st->last = NULL;
 460
 461        st->target = target;
 462        st->single = false;
 463        st->bringup = st->state < target;
 464
 465        return prev_state;
 466}
 467
 468static inline void
 469cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
 470{
 471        st->rollback = true;
 472
 473        /*
 474         * If we have st->last we need to undo partial multi_instance of this
 475         * state first. Otherwise start undo at the previous state.
 476         */
 477        if (!st->last) {
 478                if (st->bringup)
 479                        st->state--;
 480                else
 481                        st->state++;
 482        }
 483
 484        st->target = prev_state;
 485        st->bringup = !st->bringup;
 486}
 487
 488/* Regular hotplug invocation of the AP hotplug thread */
 489static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
 490{
 491        if (!st->single && st->state == st->target)
 492                return;
 493
 494        st->result = 0;
 495        /*
 496         * Make sure the above stores are visible before should_run becomes
 497         * true. Paired with the mb() above in cpuhp_thread_fun()
 498         */
 499        smp_mb();
 500        st->should_run = true;
 501        wake_up_process(st->thread);
 502        wait_for_ap_thread(st, st->bringup);
 503}
 504
 505static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
 506{
 507        enum cpuhp_state prev_state;
 508        int ret;
 509
 510        prev_state = cpuhp_set_state(st, target);
 511        __cpuhp_kick_ap(st);
 512        if ((ret = st->result)) {
 513                cpuhp_reset_state(st, prev_state);
 514                __cpuhp_kick_ap(st);
 515        }
 516
 517        return ret;
 518}
 519
 520static int bringup_wait_for_ap(unsigned int cpu)
 521{
 522        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 523
 524        /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
 525        wait_for_ap_thread(st, true);
 526        if (WARN_ON_ONCE((!cpu_online(cpu))))
 527                return -ECANCELED;
 528
 529        /* Unpark the hotplug thread of the target cpu */
 530        kthread_unpark(st->thread);
 531
 532        /*
 533         * SMT soft disabling on X86 requires to bring the CPU out of the
 534         * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
 535         * CPU marked itself as booted_once in notify_cpu_starting() so the
 536         * cpu_smt_allowed() check will now return false if this is not the
 537         * primary sibling.
 538         */
 539        if (!cpu_smt_allowed(cpu))
 540                return -ECANCELED;
 541
 542        if (st->target <= CPUHP_AP_ONLINE_IDLE)
 543                return 0;
 544
 545        return cpuhp_kick_ap(st, st->target);
 546}
 547
 548static int bringup_cpu(unsigned int cpu)
 549{
 550        struct task_struct *idle = idle_thread_get(cpu);
 551        int ret;
 552
 553        /*
 554         * Some architectures have to walk the irq descriptors to
 555         * setup the vector space for the cpu which comes online.
 556         * Prevent irq alloc/free across the bringup.
 557         */
 558        irq_lock_sparse();
 559
 560        /* Arch-specific enabling code. */
 561        ret = __cpu_up(cpu, idle);
 562        irq_unlock_sparse();
 563        if (ret)
 564                return ret;
 565        return bringup_wait_for_ap(cpu);
 566}
 567
 568static int finish_cpu(unsigned int cpu)
 569{
 570        struct task_struct *idle = idle_thread_get(cpu);
 571        struct mm_struct *mm = idle->active_mm;
 572
 573        /*
 574         * idle_task_exit() will have switched to &init_mm, now
 575         * clean up any remaining active_mm state.
 576         */
 577        if (mm != &init_mm)
 578                idle->active_mm = &init_mm;
 579        mmdrop(mm);
 580        return 0;
 581}
 582
 583/*
 584 * Hotplug state machine related functions
 585 */
 586
 587static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
 588{
 589        for (st->state--; st->state > st->target; st->state--)
 590                cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 591}
 592
 593static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
 594{
 595        if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
 596                return true;
 597        /*
 598         * When CPU hotplug is disabled, then taking the CPU down is not
 599         * possible because takedown_cpu() and the architecture and
 600         * subsystem specific mechanisms are not available. So the CPU
 601         * which would be completely unplugged again needs to stay around
 602         * in the current state.
 603         */
 604        return st->state <= CPUHP_BRINGUP_CPU;
 605}
 606
 607static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 608                              enum cpuhp_state target)
 609{
 610        enum cpuhp_state prev_state = st->state;
 611        int ret = 0;
 612
 613        while (st->state < target) {
 614                st->state++;
 615                ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
 616                if (ret) {
 617                        if (can_rollback_cpu(st)) {
 618                                st->target = prev_state;
 619                                undo_cpu_up(cpu, st);
 620                        }
 621                        break;
 622                }
 623        }
 624        return ret;
 625}
 626
 627/*
 628 * The cpu hotplug threads manage the bringup and teardown of the cpus
 629 */
 630static void cpuhp_create(unsigned int cpu)
 631{
 632        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 633
 634        init_completion(&st->done_up);
 635        init_completion(&st->done_down);
 636}
 637
 638static int cpuhp_should_run(unsigned int cpu)
 639{
 640        struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 641
 642        return st->should_run;
 643}
 644
 645/*
 646 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
 647 * callbacks when a state gets [un]installed at runtime.
 648 *
 649 * Each invocation of this function by the smpboot thread does a single AP
 650 * state callback.
 651 *
 652 * It has 3 modes of operation:
 653 *  - single: runs st->cb_state
 654 *  - up:     runs ++st->state, while st->state < st->target
 655 *  - down:   runs st->state--, while st->state > st->target
 656 *
 657 * When complete or on error, should_run is cleared and the completion is fired.
 658 */
 659static void cpuhp_thread_fun(unsigned int cpu)
 660{
 661        struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 662        bool bringup = st->bringup;
 663        enum cpuhp_state state;
 664
 665        if (WARN_ON_ONCE(!st->should_run))
 666                return;
 667
 668        /*
 669         * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
 670         * that if we see ->should_run we also see the rest of the state.
 671         */
 672        smp_mb();
 673
 674        /*
 675         * The BP holds the hotplug lock, but we're now running on the AP,
 676         * ensure that anybody asserting the lock is held, will actually find
 677         * it so.
 678         */
 679        lockdep_acquire_cpus_lock();
 680        cpuhp_lock_acquire(bringup);
 681
 682        if (st->single) {
 683                state = st->cb_state;
 684                st->should_run = false;
 685        } else {
 686                if (bringup) {
 687                        st->state++;
 688                        state = st->state;
 689                        st->should_run = (st->state < st->target);
 690                        WARN_ON_ONCE(st->state > st->target);
 691                } else {
 692                        state = st->state;
 693                        st->state--;
 694                        st->should_run = (st->state > st->target);
 695                        WARN_ON_ONCE(st->state < st->target);
 696                }
 697        }
 698
 699        WARN_ON_ONCE(!cpuhp_is_ap_state(state));
 700
 701        if (cpuhp_is_atomic_state(state)) {
 702                local_irq_disable();
 703                st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 704                local_irq_enable();
 705
 706                /*
 707                 * STARTING/DYING must not fail!
 708                 */
 709                WARN_ON_ONCE(st->result);
 710        } else {
 711                st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
 712        }
 713
 714        if (st->result) {
 715                /*
 716                 * If we fail on a rollback, we're up a creek without no
 717                 * paddle, no way forward, no way back. We loose, thanks for
 718                 * playing.
 719                 */
 720                WARN_ON_ONCE(st->rollback);
 721                st->should_run = false;
 722        }
 723
 724        cpuhp_lock_release(bringup);
 725        lockdep_release_cpus_lock();
 726
 727        if (!st->should_run)
 728                complete_ap_thread(st, bringup);
 729}
 730
 731/* Invoke a single callback on a remote cpu */
 732static int
 733cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
 734                         struct hlist_node *node)
 735{
 736        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 737        int ret;
 738
 739        if (!cpu_online(cpu))
 740                return 0;
 741
 742        cpuhp_lock_acquire(false);
 743        cpuhp_lock_release(false);
 744
 745        cpuhp_lock_acquire(true);
 746        cpuhp_lock_release(true);
 747
 748        /*
 749         * If we are up and running, use the hotplug thread. For early calls
 750         * we invoke the thread function directly.
 751         */
 752        if (!st->thread)
 753                return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
 754
 755        st->rollback = false;
 756        st->last = NULL;
 757
 758        st->node = node;
 759        st->bringup = bringup;
 760        st->cb_state = state;
 761        st->single = true;
 762
 763        __cpuhp_kick_ap(st);
 764
 765        /*
 766         * If we failed and did a partial, do a rollback.
 767         */
 768        if ((ret = st->result) && st->last) {
 769                st->rollback = true;
 770                st->bringup = !bringup;
 771
 772                __cpuhp_kick_ap(st);
 773        }
 774
 775        /*
 776         * Clean up the leftovers so the next hotplug operation wont use stale
 777         * data.
 778         */
 779        st->node = st->last = NULL;
 780        return ret;
 781}
 782
 783static int cpuhp_kick_ap_work(unsigned int cpu)
 784{
 785        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 786        enum cpuhp_state prev_state = st->state;
 787        int ret;
 788
 789        cpuhp_lock_acquire(false);
 790        cpuhp_lock_release(false);
 791
 792        cpuhp_lock_acquire(true);
 793        cpuhp_lock_release(true);
 794
 795        trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
 796        ret = cpuhp_kick_ap(st, st->target);
 797        trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 798
 799        return ret;
 800}
 801
 802static struct smp_hotplug_thread cpuhp_threads = {
 803        .store                  = &cpuhp_state.thread,
 804        .create                 = &cpuhp_create,
 805        .thread_should_run      = cpuhp_should_run,
 806        .thread_fn              = cpuhp_thread_fun,
 807        .thread_comm            = "cpuhp/%u",
 808        .selfparking            = true,
 809};
 810
 811void __init cpuhp_threads_init(void)
 812{
 813        BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
 814        kthread_unpark(this_cpu_read(cpuhp_state.thread));
 815}
 816
 817#ifdef CONFIG_HOTPLUG_CPU
 818#ifndef arch_clear_mm_cpumask_cpu
 819#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
 820#endif
 821
 822/**
 823 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 824 * @cpu: a CPU id
 825 *
 826 * This function walks all processes, finds a valid mm struct for each one and
 827 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 828 * trivial, there are various non-obvious corner cases, which this function
 829 * tries to solve in a safe manner.
 830 *
 831 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 832 * be called only for an already offlined CPU.
 833 */
 834void clear_tasks_mm_cpumask(int cpu)
 835{
 836        struct task_struct *p;
 837
 838        /*
 839         * This function is called after the cpu is taken down and marked
 840         * offline, so its not like new tasks will ever get this cpu set in
 841         * their mm mask. -- Peter Zijlstra
 842         * Thus, we may use rcu_read_lock() here, instead of grabbing
 843         * full-fledged tasklist_lock.
 844         */
 845        WARN_ON(cpu_online(cpu));
 846        rcu_read_lock();
 847        for_each_process(p) {
 848                struct task_struct *t;
 849
 850                /*
 851                 * Main thread might exit, but other threads may still have
 852                 * a valid mm. Find one.
 853                 */
 854                t = find_lock_task_mm(p);
 855                if (!t)
 856                        continue;
 857                arch_clear_mm_cpumask_cpu(cpu, t->mm);
 858                task_unlock(t);
 859        }
 860        rcu_read_unlock();
 861}
 862
 863/* Take this CPU down. */
 864static int take_cpu_down(void *_param)
 865{
 866        struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 867        enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
 868        int err, cpu = smp_processor_id();
 869        int ret;
 870
 871        /* Ensure this CPU doesn't handle any more interrupts. */
 872        err = __cpu_disable();
 873        if (err < 0)
 874                return err;
 875
 876        /*
 877         * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
 878         * do this step again.
 879         */
 880        WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
 881        st->state--;
 882        /* Invoke the former CPU_DYING callbacks */
 883        for (; st->state > target; st->state--) {
 884                ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 885                /*
 886                 * DYING must not fail!
 887                 */
 888                WARN_ON_ONCE(ret);
 889        }
 890
 891        /* Give up timekeeping duties */
 892        tick_handover_do_timer();
 893        /* Remove CPU from timer broadcasting */
 894        tick_offline_cpu(cpu);
 895        /* Park the stopper thread */
 896        stop_machine_park(cpu);
 897        return 0;
 898}
 899
 900static int takedown_cpu(unsigned int cpu)
 901{
 902        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
 903        int err;
 904
 905        /* Park the smpboot threads */
 906        kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
 907
 908        /*
 909         * Prevent irq alloc/free while the dying cpu reorganizes the
 910         * interrupt affinities.
 911         */
 912        irq_lock_sparse();
 913
 914        /*
 915         * So now all preempt/rcu users must observe !cpu_active().
 916         */
 917        err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
 918        if (err) {
 919                /* CPU refused to die */
 920                irq_unlock_sparse();
 921                /* Unpark the hotplug thread so we can rollback there */
 922                kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
 923                return err;
 924        }
 925        BUG_ON(cpu_online(cpu));
 926
 927        /*
 928         * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
 929         * all runnable tasks from the CPU, there's only the idle task left now
 930         * that the migration thread is done doing the stop_machine thing.
 931         *
 932         * Wait for the stop thread to go away.
 933         */
 934        wait_for_ap_thread(st, false);
 935        BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
 936
 937        /* Interrupts are moved away from the dying cpu, reenable alloc/free */
 938        irq_unlock_sparse();
 939
 940        hotplug_cpu__broadcast_tick_pull(cpu);
 941        /* This actually kills the CPU. */
 942        __cpu_die(cpu);
 943
 944        tick_cleanup_dead_cpu(cpu);
 945        rcutree_migrate_callbacks(cpu);
 946        return 0;
 947}
 948
 949static void cpuhp_complete_idle_dead(void *arg)
 950{
 951        struct cpuhp_cpu_state *st = arg;
 952
 953        complete_ap_thread(st, false);
 954}
 955
 956void cpuhp_report_idle_dead(void)
 957{
 958        struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
 959
 960        BUG_ON(st->state != CPUHP_AP_OFFLINE);
 961        rcu_report_dead(smp_processor_id());
 962        st->state = CPUHP_AP_IDLE_DEAD;
 963        /*
 964         * We cannot call complete after rcu_report_dead() so we delegate it
 965         * to an online cpu.
 966         */
 967        smp_call_function_single(cpumask_first(cpu_online_mask),
 968                                 cpuhp_complete_idle_dead, st, 0);
 969}
 970
 971static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
 972{
 973        for (st->state++; st->state < st->target; st->state++)
 974                cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
 975}
 976
 977static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
 978                                enum cpuhp_state target)
 979{
 980        enum cpuhp_state prev_state = st->state;
 981        int ret = 0;
 982
 983        for (; st->state > target; st->state--) {
 984                ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 985                if (ret) {
 986                        st->target = prev_state;
 987                        if (st->state < prev_state)
 988                                undo_cpu_down(cpu, st);
 989                        break;
 990                }
 991        }
 992        return ret;
 993}
 994
 995/* Requires cpu_add_remove_lock to be held */
 996static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 997                           enum cpuhp_state target)
 998{
 999        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1000        int prev_state, ret = 0;
1001
1002        if (num_online_cpus() == 1)
1003                return -EBUSY;
1004
1005        if (!cpu_present(cpu))
1006                return -EINVAL;
1007
1008        cpus_write_lock();
1009
1010        cpuhp_tasks_frozen = tasks_frozen;
1011
1012        prev_state = cpuhp_set_state(st, target);
1013        /*
1014         * If the current CPU state is in the range of the AP hotplug thread,
1015         * then we need to kick the thread.
1016         */
1017        if (st->state > CPUHP_TEARDOWN_CPU) {
1018                st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1019                ret = cpuhp_kick_ap_work(cpu);
1020                /*
1021                 * The AP side has done the error rollback already. Just
1022                 * return the error code..
1023                 */
1024                if (ret)
1025                        goto out;
1026
1027                /*
1028                 * We might have stopped still in the range of the AP hotplug
1029                 * thread. Nothing to do anymore.
1030                 */
1031                if (st->state > CPUHP_TEARDOWN_CPU)
1032                        goto out;
1033
1034                st->target = target;
1035        }
1036        /*
1037         * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1038         * to do the further cleanups.
1039         */
1040        ret = cpuhp_down_callbacks(cpu, st, target);
1041        if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1042                cpuhp_reset_state(st, prev_state);
1043                __cpuhp_kick_ap(st);
1044        }
1045
1046out:
1047        cpus_write_unlock();
1048        /*
1049         * Do post unplug cleanup. This is still protected against
1050         * concurrent CPU hotplug via cpu_add_remove_lock.
1051         */
1052        lockup_detector_cleanup();
1053        arch_smt_update();
1054        return ret;
1055}
1056
1057static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1058{
1059        if (cpu_hotplug_disabled)
1060                return -EBUSY;
1061        return _cpu_down(cpu, 0, target);
1062}
1063
1064static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1065{
1066        int err;
1067
1068        cpu_maps_update_begin();
1069        err = cpu_down_maps_locked(cpu, target);
1070        cpu_maps_update_done();
1071        return err;
1072}
1073
1074/**
1075 * cpu_device_down - Bring down a cpu device
1076 * @dev: Pointer to the cpu device to offline
1077 *
1078 * This function is meant to be used by device core cpu subsystem only.
1079 *
1080 * Other subsystems should use remove_cpu() instead.
1081 */
1082int cpu_device_down(struct device *dev)
1083{
1084        return cpu_down(dev->id, CPUHP_OFFLINE);
1085}
1086
1087int remove_cpu(unsigned int cpu)
1088{
1089        int ret;
1090
1091        lock_device_hotplug();
1092        ret = device_offline(get_cpu_device(cpu));
1093        unlock_device_hotplug();
1094
1095        return ret;
1096}
1097EXPORT_SYMBOL_GPL(remove_cpu);
1098
1099void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1100{
1101        unsigned int cpu;
1102        int error;
1103
1104        cpu_maps_update_begin();
1105
1106        /*
1107         * Make certain the cpu I'm about to reboot on is online.
1108         *
1109         * This is inline to what migrate_to_reboot_cpu() already do.
1110         */
1111        if (!cpu_online(primary_cpu))
1112                primary_cpu = cpumask_first(cpu_online_mask);
1113
1114        for_each_online_cpu(cpu) {
1115                if (cpu == primary_cpu)
1116                        continue;
1117
1118                error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1119                if (error) {
1120                        pr_err("Failed to offline CPU%d - error=%d",
1121                                cpu, error);
1122                        break;
1123                }
1124        }
1125
1126        /*
1127         * Ensure all but the reboot CPU are offline.
1128         */
1129        BUG_ON(num_online_cpus() > 1);
1130
1131        /*
1132         * Make sure the CPUs won't be enabled by someone else after this
1133         * point. Kexec will reboot to a new kernel shortly resetting
1134         * everything along the way.
1135         */
1136        cpu_hotplug_disabled++;
1137
1138        cpu_maps_update_done();
1139}
1140
1141#else
1142#define takedown_cpu            NULL
1143#endif /*CONFIG_HOTPLUG_CPU*/
1144
1145/**
1146 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1147 * @cpu: cpu that just started
1148 *
1149 * It must be called by the arch code on the new cpu, before the new cpu
1150 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1151 */
1152void notify_cpu_starting(unsigned int cpu)
1153{
1154        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1155        enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1156        int ret;
1157
1158        rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
1159        cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1160        while (st->state < target) {
1161                st->state++;
1162                ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1163                /*
1164                 * STARTING must not fail!
1165                 */
1166                WARN_ON_ONCE(ret);
1167        }
1168}
1169
1170/*
1171 * Called from the idle task. Wake up the controlling task which brings the
1172 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1173 * online bringup to the hotplug thread.
1174 */
1175void cpuhp_online_idle(enum cpuhp_state state)
1176{
1177        struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1178
1179        /* Happens for the boot cpu */
1180        if (state != CPUHP_AP_ONLINE_IDLE)
1181                return;
1182
1183        /*
1184         * Unpart the stopper thread before we start the idle loop (and start
1185         * scheduling); this ensures the stopper task is always available.
1186         */
1187        stop_machine_unpark(smp_processor_id());
1188
1189        st->state = CPUHP_AP_ONLINE_IDLE;
1190        complete_ap_thread(st, true);
1191}
1192
1193/* Requires cpu_add_remove_lock to be held */
1194static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1195{
1196        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1197        struct task_struct *idle;
1198        int ret = 0;
1199
1200        cpus_write_lock();
1201
1202        if (!cpu_present(cpu)) {
1203                ret = -EINVAL;
1204                goto out;
1205        }
1206
1207        /*
1208         * The caller of cpu_up() might have raced with another
1209         * caller. Nothing to do.
1210         */
1211        if (st->state >= target)
1212                goto out;
1213
1214        if (st->state == CPUHP_OFFLINE) {
1215                /* Let it fail before we try to bring the cpu up */
1216                idle = idle_thread_get(cpu);
1217                if (IS_ERR(idle)) {
1218                        ret = PTR_ERR(idle);
1219                        goto out;
1220                }
1221        }
1222
1223        cpuhp_tasks_frozen = tasks_frozen;
1224
1225        cpuhp_set_state(st, target);
1226        /*
1227         * If the current CPU state is in the range of the AP hotplug thread,
1228         * then we need to kick the thread once more.
1229         */
1230        if (st->state > CPUHP_BRINGUP_CPU) {
1231                ret = cpuhp_kick_ap_work(cpu);
1232                /*
1233                 * The AP side has done the error rollback already. Just
1234                 * return the error code..
1235                 */
1236                if (ret)
1237                        goto out;
1238        }
1239
1240        /*
1241         * Try to reach the target state. We max out on the BP at
1242         * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1243         * responsible for bringing it up to the target state.
1244         */
1245        target = min((int)target, CPUHP_BRINGUP_CPU);
1246        ret = cpuhp_up_callbacks(cpu, st, target);
1247out:
1248        cpus_write_unlock();
1249        arch_smt_update();
1250        return ret;
1251}
1252
1253static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1254{
1255        int err = 0;
1256
1257        if (!cpu_possible(cpu)) {
1258                pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1259                       cpu);
1260#if defined(CONFIG_IA64)
1261                pr_err("please check additional_cpus= boot parameter\n");
1262#endif
1263                return -EINVAL;
1264        }
1265
1266        err = try_online_node(cpu_to_node(cpu));
1267        if (err)
1268                return err;
1269
1270        cpu_maps_update_begin();
1271
1272        if (cpu_hotplug_disabled) {
1273                err = -EBUSY;
1274                goto out;
1275        }
1276        if (!cpu_smt_allowed(cpu)) {
1277                err = -EPERM;
1278                goto out;
1279        }
1280
1281        err = _cpu_up(cpu, 0, target);
1282out:
1283        cpu_maps_update_done();
1284        return err;
1285}
1286
1287/**
1288 * cpu_device_up - Bring up a cpu device
1289 * @dev: Pointer to the cpu device to online
1290 *
1291 * This function is meant to be used by device core cpu subsystem only.
1292 *
1293 * Other subsystems should use add_cpu() instead.
1294 */
1295int cpu_device_up(struct device *dev)
1296{
1297        return cpu_up(dev->id, CPUHP_ONLINE);
1298}
1299
1300int add_cpu(unsigned int cpu)
1301{
1302        int ret;
1303
1304        lock_device_hotplug();
1305        ret = device_online(get_cpu_device(cpu));
1306        unlock_device_hotplug();
1307
1308        return ret;
1309}
1310EXPORT_SYMBOL_GPL(add_cpu);
1311
1312/**
1313 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1314 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1315 *
1316 * On some architectures like arm64, we can hibernate on any CPU, but on
1317 * wake up the CPU we hibernated on might be offline as a side effect of
1318 * using maxcpus= for example.
1319 */
1320int bringup_hibernate_cpu(unsigned int sleep_cpu)
1321{
1322        int ret;
1323
1324        if (!cpu_online(sleep_cpu)) {
1325                pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1326                ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1327                if (ret) {
1328                        pr_err("Failed to bring hibernate-CPU up!\n");
1329                        return ret;
1330                }
1331        }
1332        return 0;
1333}
1334
1335void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1336{
1337        unsigned int cpu;
1338
1339        for_each_present_cpu(cpu) {
1340                if (num_online_cpus() >= setup_max_cpus)
1341                        break;
1342                if (!cpu_online(cpu))
1343                        cpu_up(cpu, CPUHP_ONLINE);
1344        }
1345}
1346
1347#ifdef CONFIG_PM_SLEEP_SMP
1348static cpumask_var_t frozen_cpus;
1349
1350int freeze_secondary_cpus(int primary)
1351{
1352        int cpu, error = 0;
1353
1354        cpu_maps_update_begin();
1355        if (primary == -1) {
1356                primary = cpumask_first(cpu_online_mask);
1357                if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1358                        primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1359        } else {
1360                if (!cpu_online(primary))
1361                        primary = cpumask_first(cpu_online_mask);
1362        }
1363
1364        /*
1365         * We take down all of the non-boot CPUs in one shot to avoid races
1366         * with the userspace trying to use the CPU hotplug at the same time
1367         */
1368        cpumask_clear(frozen_cpus);
1369
1370        pr_info("Disabling non-boot CPUs ...\n");
1371        for_each_online_cpu(cpu) {
1372                if (cpu == primary)
1373                        continue;
1374
1375                if (pm_wakeup_pending()) {
1376                        pr_info("Wakeup pending. Abort CPU freeze\n");
1377                        error = -EBUSY;
1378                        break;
1379                }
1380
1381                trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1382                error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1383                trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1384                if (!error)
1385                        cpumask_set_cpu(cpu, frozen_cpus);
1386                else {
1387                        pr_err("Error taking CPU%d down: %d\n", cpu, error);
1388                        break;
1389                }
1390        }
1391
1392        if (!error)
1393                BUG_ON(num_online_cpus() > 1);
1394        else
1395                pr_err("Non-boot CPUs are not disabled\n");
1396
1397        /*
1398         * Make sure the CPUs won't be enabled by someone else. We need to do
1399         * this even in case of failure as all freeze_secondary_cpus() users are
1400         * supposed to do thaw_secondary_cpus() on the failure path.
1401         */
1402        cpu_hotplug_disabled++;
1403
1404        cpu_maps_update_done();
1405        return error;
1406}
1407
1408void __weak arch_thaw_secondary_cpus_begin(void)
1409{
1410}
1411
1412void __weak arch_thaw_secondary_cpus_end(void)
1413{
1414}
1415
1416void thaw_secondary_cpus(void)
1417{
1418        int cpu, error;
1419
1420        /* Allow everyone to use the CPU hotplug again */
1421        cpu_maps_update_begin();
1422        __cpu_hotplug_enable();
1423        if (cpumask_empty(frozen_cpus))
1424                goto out;
1425
1426        pr_info("Enabling non-boot CPUs ...\n");
1427
1428        arch_thaw_secondary_cpus_begin();
1429
1430        for_each_cpu(cpu, frozen_cpus) {
1431                trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1432                error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1433                trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1434                if (!error) {
1435                        pr_info("CPU%d is up\n", cpu);
1436                        continue;
1437                }
1438                pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1439        }
1440
1441        arch_thaw_secondary_cpus_end();
1442
1443        cpumask_clear(frozen_cpus);
1444out:
1445        cpu_maps_update_done();
1446}
1447
1448static int __init alloc_frozen_cpus(void)
1449{
1450        if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1451                return -ENOMEM;
1452        return 0;
1453}
1454core_initcall(alloc_frozen_cpus);
1455
1456/*
1457 * When callbacks for CPU hotplug notifications are being executed, we must
1458 * ensure that the state of the system with respect to the tasks being frozen
1459 * or not, as reported by the notification, remains unchanged *throughout the
1460 * duration* of the execution of the callbacks.
1461 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1462 *
1463 * This synchronization is implemented by mutually excluding regular CPU
1464 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1465 * Hibernate notifications.
1466 */
1467static int
1468cpu_hotplug_pm_callback(struct notifier_block *nb,
1469                        unsigned long action, void *ptr)
1470{
1471        switch (action) {
1472
1473        case PM_SUSPEND_PREPARE:
1474        case PM_HIBERNATION_PREPARE:
1475                cpu_hotplug_disable();
1476                break;
1477
1478        case PM_POST_SUSPEND:
1479        case PM_POST_HIBERNATION:
1480                cpu_hotplug_enable();
1481                break;
1482
1483        default:
1484                return NOTIFY_DONE;
1485        }
1486
1487        return NOTIFY_OK;
1488}
1489
1490
1491static int __init cpu_hotplug_pm_sync_init(void)
1492{
1493        /*
1494         * cpu_hotplug_pm_callback has higher priority than x86
1495         * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1496         * to disable cpu hotplug to avoid cpu hotplug race.
1497         */
1498        pm_notifier(cpu_hotplug_pm_callback, 0);
1499        return 0;
1500}
1501core_initcall(cpu_hotplug_pm_sync_init);
1502
1503#endif /* CONFIG_PM_SLEEP_SMP */
1504
1505int __boot_cpu_id;
1506
1507#endif /* CONFIG_SMP */
1508
1509/* Boot processor state steps */
1510static struct cpuhp_step cpuhp_hp_states[] = {
1511        [CPUHP_OFFLINE] = {
1512                .name                   = "offline",
1513                .startup.single         = NULL,
1514                .teardown.single        = NULL,
1515        },
1516#ifdef CONFIG_SMP
1517        [CPUHP_CREATE_THREADS]= {
1518                .name                   = "threads:prepare",
1519                .startup.single         = smpboot_create_threads,
1520                .teardown.single        = NULL,
1521                .cant_stop              = true,
1522        },
1523        [CPUHP_PERF_PREPARE] = {
1524                .name                   = "perf:prepare",
1525                .startup.single         = perf_event_init_cpu,
1526                .teardown.single        = perf_event_exit_cpu,
1527        },
1528        [CPUHP_WORKQUEUE_PREP] = {
1529                .name                   = "workqueue:prepare",
1530                .startup.single         = workqueue_prepare_cpu,
1531                .teardown.single        = NULL,
1532        },
1533        [CPUHP_HRTIMERS_PREPARE] = {
1534                .name                   = "hrtimers:prepare",
1535                .startup.single         = hrtimers_prepare_cpu,
1536                .teardown.single        = hrtimers_dead_cpu,
1537        },
1538        [CPUHP_SMPCFD_PREPARE] = {
1539                .name                   = "smpcfd:prepare",
1540                .startup.single         = smpcfd_prepare_cpu,
1541                .teardown.single        = smpcfd_dead_cpu,
1542        },
1543        [CPUHP_RELAY_PREPARE] = {
1544                .name                   = "relay:prepare",
1545                .startup.single         = relay_prepare_cpu,
1546                .teardown.single        = NULL,
1547        },
1548        [CPUHP_SLAB_PREPARE] = {
1549                .name                   = "slab:prepare",
1550                .startup.single         = slab_prepare_cpu,
1551                .teardown.single        = slab_dead_cpu,
1552        },
1553        [CPUHP_RCUTREE_PREP] = {
1554                .name                   = "RCU/tree:prepare",
1555                .startup.single         = rcutree_prepare_cpu,
1556                .teardown.single        = rcutree_dead_cpu,
1557        },
1558        /*
1559         * On the tear-down path, timers_dead_cpu() must be invoked
1560         * before blk_mq_queue_reinit_notify() from notify_dead(),
1561         * otherwise a RCU stall occurs.
1562         */
1563        [CPUHP_TIMERS_PREPARE] = {
1564                .name                   = "timers:prepare",
1565                .startup.single         = timers_prepare_cpu,
1566                .teardown.single        = timers_dead_cpu,
1567        },
1568        /* Kicks the plugged cpu into life */
1569        [CPUHP_BRINGUP_CPU] = {
1570                .name                   = "cpu:bringup",
1571                .startup.single         = bringup_cpu,
1572                .teardown.single        = finish_cpu,
1573                .cant_stop              = true,
1574        },
1575        /* Final state before CPU kills itself */
1576        [CPUHP_AP_IDLE_DEAD] = {
1577                .name                   = "idle:dead",
1578        },
1579        /*
1580         * Last state before CPU enters the idle loop to die. Transient state
1581         * for synchronization.
1582         */
1583        [CPUHP_AP_OFFLINE] = {
1584                .name                   = "ap:offline",
1585                .cant_stop              = true,
1586        },
1587        /* First state is scheduler control. Interrupts are disabled */
1588        [CPUHP_AP_SCHED_STARTING] = {
1589                .name                   = "sched:starting",
1590                .startup.single         = sched_cpu_starting,
1591                .teardown.single        = sched_cpu_dying,
1592        },
1593        [CPUHP_AP_RCUTREE_DYING] = {
1594                .name                   = "RCU/tree:dying",
1595                .startup.single         = NULL,
1596                .teardown.single        = rcutree_dying_cpu,
1597        },
1598        [CPUHP_AP_SMPCFD_DYING] = {
1599                .name                   = "smpcfd:dying",
1600                .startup.single         = NULL,
1601                .teardown.single        = smpcfd_dying_cpu,
1602        },
1603        /* Entry state on starting. Interrupts enabled from here on. Transient
1604         * state for synchronsization */
1605        [CPUHP_AP_ONLINE] = {
1606                .name                   = "ap:online",
1607        },
1608        /*
1609         * Handled on control processor until the plugged processor manages
1610         * this itself.
1611         */
1612        [CPUHP_TEARDOWN_CPU] = {
1613                .name                   = "cpu:teardown",
1614                .startup.single         = NULL,
1615                .teardown.single        = takedown_cpu,
1616                .cant_stop              = true,
1617        },
1618
1619        [CPUHP_AP_SCHED_WAIT_EMPTY] = {
1620                .name                   = "sched:waitempty",
1621                .startup.single         = NULL,
1622                .teardown.single        = sched_cpu_wait_empty,
1623        },
1624
1625        /* Handle smpboot threads park/unpark */
1626        [CPUHP_AP_SMPBOOT_THREADS] = {
1627                .name                   = "smpboot/threads:online",
1628                .startup.single         = smpboot_unpark_threads,
1629                .teardown.single        = smpboot_park_threads,
1630        },
1631        [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1632                .name                   = "irq/affinity:online",
1633                .startup.single         = irq_affinity_online_cpu,
1634                .teardown.single        = NULL,
1635        },
1636        [CPUHP_AP_PERF_ONLINE] = {
1637                .name                   = "perf:online",
1638                .startup.single         = perf_event_init_cpu,
1639                .teardown.single        = perf_event_exit_cpu,
1640        },
1641        [CPUHP_AP_WATCHDOG_ONLINE] = {
1642                .name                   = "lockup_detector:online",
1643                .startup.single         = lockup_detector_online_cpu,
1644                .teardown.single        = lockup_detector_offline_cpu,
1645        },
1646        [CPUHP_AP_WORKQUEUE_ONLINE] = {
1647                .name                   = "workqueue:online",
1648                .startup.single         = workqueue_online_cpu,
1649                .teardown.single        = workqueue_offline_cpu,
1650        },
1651        [CPUHP_AP_RCUTREE_ONLINE] = {
1652                .name                   = "RCU/tree:online",
1653                .startup.single         = rcutree_online_cpu,
1654                .teardown.single        = rcutree_offline_cpu,
1655        },
1656#endif
1657        /*
1658         * The dynamically registered state space is here
1659         */
1660
1661#ifdef CONFIG_SMP
1662        /* Last state is scheduler control setting the cpu active */
1663        [CPUHP_AP_ACTIVE] = {
1664                .name                   = "sched:active",
1665                .startup.single         = sched_cpu_activate,
1666                .teardown.single        = sched_cpu_deactivate,
1667        },
1668#endif
1669
1670        /* CPU is fully up and running. */
1671        [CPUHP_ONLINE] = {
1672                .name                   = "online",
1673                .startup.single         = NULL,
1674                .teardown.single        = NULL,
1675        },
1676};
1677
1678/* Sanity check for callbacks */
1679static int cpuhp_cb_check(enum cpuhp_state state)
1680{
1681        if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1682                return -EINVAL;
1683        return 0;
1684}
1685
1686/*
1687 * Returns a free for dynamic slot assignment of the Online state. The states
1688 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1689 * by having no name assigned.
1690 */
1691static int cpuhp_reserve_state(enum cpuhp_state state)
1692{
1693        enum cpuhp_state i, end;
1694        struct cpuhp_step *step;
1695
1696        switch (state) {
1697        case CPUHP_AP_ONLINE_DYN:
1698                step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1699                end = CPUHP_AP_ONLINE_DYN_END;
1700                break;
1701        case CPUHP_BP_PREPARE_DYN:
1702                step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1703                end = CPUHP_BP_PREPARE_DYN_END;
1704                break;
1705        default:
1706                return -EINVAL;
1707        }
1708
1709        for (i = state; i <= end; i++, step++) {
1710                if (!step->name)
1711                        return i;
1712        }
1713        WARN(1, "No more dynamic states available for CPU hotplug\n");
1714        return -ENOSPC;
1715}
1716
1717static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1718                                 int (*startup)(unsigned int cpu),
1719                                 int (*teardown)(unsigned int cpu),
1720                                 bool multi_instance)
1721{
1722        /* (Un)Install the callbacks for further cpu hotplug operations */
1723        struct cpuhp_step *sp;
1724        int ret = 0;
1725
1726        /*
1727         * If name is NULL, then the state gets removed.
1728         *
1729         * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1730         * the first allocation from these dynamic ranges, so the removal
1731         * would trigger a new allocation and clear the wrong (already
1732         * empty) state, leaving the callbacks of the to be cleared state
1733         * dangling, which causes wreckage on the next hotplug operation.
1734         */
1735        if (name && (state == CPUHP_AP_ONLINE_DYN ||
1736                     state == CPUHP_BP_PREPARE_DYN)) {
1737                ret = cpuhp_reserve_state(state);
1738                if (ret < 0)
1739                        return ret;
1740                state = ret;
1741        }
1742        sp = cpuhp_get_step(state);
1743        if (name && sp->name)
1744                return -EBUSY;
1745
1746        sp->startup.single = startup;
1747        sp->teardown.single = teardown;
1748        sp->name = name;
1749        sp->multi_instance = multi_instance;
1750        INIT_HLIST_HEAD(&sp->list);
1751        return ret;
1752}
1753
1754static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1755{
1756        return cpuhp_get_step(state)->teardown.single;
1757}
1758
1759/*
1760 * Call the startup/teardown function for a step either on the AP or
1761 * on the current CPU.
1762 */
1763static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1764                            struct hlist_node *node)
1765{
1766        struct cpuhp_step *sp = cpuhp_get_step(state);
1767        int ret;
1768
1769        /*
1770         * If there's nothing to do, we done.
1771         * Relies on the union for multi_instance.
1772         */
1773        if ((bringup && !sp->startup.single) ||
1774            (!bringup && !sp->teardown.single))
1775                return 0;
1776        /*
1777         * The non AP bound callbacks can fail on bringup. On teardown
1778         * e.g. module removal we crash for now.
1779         */
1780#ifdef CONFIG_SMP
1781        if (cpuhp_is_ap_state(state))
1782                ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1783        else
1784                ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1785#else
1786        ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1787#endif
1788        BUG_ON(ret && !bringup);
1789        return ret;
1790}
1791
1792/*
1793 * Called from __cpuhp_setup_state on a recoverable failure.
1794 *
1795 * Note: The teardown callbacks for rollback are not allowed to fail!
1796 */
1797static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1798                                   struct hlist_node *node)
1799{
1800        int cpu;
1801
1802        /* Roll back the already executed steps on the other cpus */
1803        for_each_present_cpu(cpu) {
1804                struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1805                int cpustate = st->state;
1806
1807                if (cpu >= failedcpu)
1808                        break;
1809
1810                /* Did we invoke the startup call on that cpu ? */
1811                if (cpustate >= state)
1812                        cpuhp_issue_call(cpu, state, false, node);
1813        }
1814}
1815
1816int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1817                                          struct hlist_node *node,
1818                                          bool invoke)
1819{
1820        struct cpuhp_step *sp;
1821        int cpu;
1822        int ret;
1823
1824        lockdep_assert_cpus_held();
1825
1826        sp = cpuhp_get_step(state);
1827        if (sp->multi_instance == false)
1828                return -EINVAL;
1829
1830        mutex_lock(&cpuhp_state_mutex);
1831
1832        if (!invoke || !sp->startup.multi)
1833                goto add_node;
1834
1835        /*
1836         * Try to call the startup callback for each present cpu
1837         * depending on the hotplug state of the cpu.
1838         */
1839        for_each_present_cpu(cpu) {
1840                struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1841                int cpustate = st->state;
1842
1843                if (cpustate < state)
1844                        continue;
1845
1846                ret = cpuhp_issue_call(cpu, state, true, node);
1847                if (ret) {
1848                        if (sp->teardown.multi)
1849                                cpuhp_rollback_install(cpu, state, node);
1850                        goto unlock;
1851                }
1852        }
1853add_node:
1854        ret = 0;
1855        hlist_add_head(node, &sp->list);
1856unlock:
1857        mutex_unlock(&cpuhp_state_mutex);
1858        return ret;
1859}
1860
1861int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1862                               bool invoke)
1863{
1864        int ret;
1865
1866        cpus_read_lock();
1867        ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1868        cpus_read_unlock();
1869        return ret;
1870}
1871EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1872
1873/**
1874 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1875 * @state:              The state to setup
1876 * @invoke:             If true, the startup function is invoked for cpus where
1877 *                      cpu state >= @state
1878 * @startup:            startup callback function
1879 * @teardown:           teardown callback function
1880 * @multi_instance:     State is set up for multiple instances which get
1881 *                      added afterwards.
1882 *
1883 * The caller needs to hold cpus read locked while calling this function.
1884 * Returns:
1885 *   On success:
1886 *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
1887 *      0 for all other states
1888 *   On failure: proper (negative) error code
1889 */
1890int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1891                                   const char *name, bool invoke,
1892                                   int (*startup)(unsigned int cpu),
1893                                   int (*teardown)(unsigned int cpu),
1894                                   bool multi_instance)
1895{
1896        int cpu, ret = 0;
1897        bool dynstate;
1898
1899        lockdep_assert_cpus_held();
1900
1901        if (cpuhp_cb_check(state) || !name)
1902                return -EINVAL;
1903
1904        mutex_lock(&cpuhp_state_mutex);
1905
1906        ret = cpuhp_store_callbacks(state, name, startup, teardown,
1907                                    multi_instance);
1908
1909        dynstate = state == CPUHP_AP_ONLINE_DYN;
1910        if (ret > 0 && dynstate) {
1911                state = ret;
1912                ret = 0;
1913        }
1914
1915        if (ret || !invoke || !startup)
1916                goto out;
1917
1918        /*
1919         * Try to call the startup callback for each present cpu
1920         * depending on the hotplug state of the cpu.
1921         */
1922        for_each_present_cpu(cpu) {
1923                struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1924                int cpustate = st->state;
1925
1926                if (cpustate < state)
1927                        continue;
1928
1929                ret = cpuhp_issue_call(cpu, state, true, NULL);
1930                if (ret) {
1931                        if (teardown)
1932                                cpuhp_rollback_install(cpu, state, NULL);
1933                        cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1934                        goto out;
1935                }
1936        }
1937out:
1938        mutex_unlock(&cpuhp_state_mutex);
1939        /*
1940         * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1941         * dynamically allocated state in case of success.
1942         */
1943        if (!ret && dynstate)
1944                return state;
1945        return ret;
1946}
1947EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1948
1949int __cpuhp_setup_state(enum cpuhp_state state,
1950                        const char *name, bool invoke,
1951                        int (*startup)(unsigned int cpu),
1952                        int (*teardown)(unsigned int cpu),
1953                        bool multi_instance)
1954{
1955        int ret;
1956
1957        cpus_read_lock();
1958        ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1959                                             teardown, multi_instance);
1960        cpus_read_unlock();
1961        return ret;
1962}
1963EXPORT_SYMBOL(__cpuhp_setup_state);
1964
1965int __cpuhp_state_remove_instance(enum cpuhp_state state,
1966                                  struct hlist_node *node, bool invoke)
1967{
1968        struct cpuhp_step *sp = cpuhp_get_step(state);
1969        int cpu;
1970
1971        BUG_ON(cpuhp_cb_check(state));
1972
1973        if (!sp->multi_instance)
1974                return -EINVAL;
1975
1976        cpus_read_lock();
1977        mutex_lock(&cpuhp_state_mutex);
1978
1979        if (!invoke || !cpuhp_get_teardown_cb(state))
1980                goto remove;
1981        /*
1982         * Call the teardown callback for each present cpu depending
1983         * on the hotplug state of the cpu. This function is not
1984         * allowed to fail currently!
1985         */
1986        for_each_present_cpu(cpu) {
1987                struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1988                int cpustate = st->state;
1989
1990                if (cpustate >= state)
1991                        cpuhp_issue_call(cpu, state, false, node);
1992        }
1993
1994remove:
1995        hlist_del(node);
1996        mutex_unlock(&cpuhp_state_mutex);
1997        cpus_read_unlock();
1998
1999        return 0;
2000}
2001EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
2002
2003/**
2004 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
2005 * @state:      The state to remove
2006 * @invoke:     If true, the teardown function is invoked for cpus where
2007 *              cpu state >= @state
2008 *
2009 * The caller needs to hold cpus read locked while calling this function.
2010 * The teardown callback is currently not allowed to fail. Think
2011 * about module removal!
2012 */
2013void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2014{
2015        struct cpuhp_step *sp = cpuhp_get_step(state);
2016        int cpu;
2017
2018        BUG_ON(cpuhp_cb_check(state));
2019
2020        lockdep_assert_cpus_held();
2021
2022        mutex_lock(&cpuhp_state_mutex);
2023        if (sp->multi_instance) {
2024                WARN(!hlist_empty(&sp->list),
2025                     "Error: Removing state %d which has instances left.\n",
2026                     state);
2027                goto remove;
2028        }
2029
2030        if (!invoke || !cpuhp_get_teardown_cb(state))
2031                goto remove;
2032
2033        /*
2034         * Call the teardown callback for each present cpu depending
2035         * on the hotplug state of the cpu. This function is not
2036         * allowed to fail currently!
2037         */
2038        for_each_present_cpu(cpu) {
2039                struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2040                int cpustate = st->state;
2041
2042                if (cpustate >= state)
2043                        cpuhp_issue_call(cpu, state, false, NULL);
2044        }
2045remove:
2046        cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2047        mutex_unlock(&cpuhp_state_mutex);
2048}
2049EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2050
2051void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2052{
2053        cpus_read_lock();
2054        __cpuhp_remove_state_cpuslocked(state, invoke);
2055        cpus_read_unlock();
2056}
2057EXPORT_SYMBOL(__cpuhp_remove_state);
2058
2059#ifdef CONFIG_HOTPLUG_SMT
2060static void cpuhp_offline_cpu_device(unsigned int cpu)
2061{
2062        struct device *dev = get_cpu_device(cpu);
2063
2064        dev->offline = true;
2065        /* Tell user space about the state change */
2066        kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2067}
2068
2069static void cpuhp_online_cpu_device(unsigned int cpu)
2070{
2071        struct device *dev = get_cpu_device(cpu);
2072
2073        dev->offline = false;
2074        /* Tell user space about the state change */
2075        kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2076}
2077
2078int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2079{
2080        int cpu, ret = 0;
2081
2082        cpu_maps_update_begin();
2083        for_each_online_cpu(cpu) {
2084                if (topology_is_primary_thread(cpu))
2085                        continue;
2086                ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2087                if (ret)
2088                        break;
2089                /*
2090                 * As this needs to hold the cpu maps lock it's impossible
2091                 * to call device_offline() because that ends up calling
2092                 * cpu_down() which takes cpu maps lock. cpu maps lock
2093                 * needs to be held as this might race against in kernel
2094                 * abusers of the hotplug machinery (thermal management).
2095                 *
2096                 * So nothing would update device:offline state. That would
2097                 * leave the sysfs entry stale and prevent onlining after
2098                 * smt control has been changed to 'off' again. This is
2099                 * called under the sysfs hotplug lock, so it is properly
2100                 * serialized against the regular offline usage.
2101                 */
2102                cpuhp_offline_cpu_device(cpu);
2103        }
2104        if (!ret)
2105                cpu_smt_control = ctrlval;
2106        cpu_maps_update_done();
2107        return ret;
2108}
2109
2110int cpuhp_smt_enable(void)
2111{
2112        int cpu, ret = 0;
2113
2114        cpu_maps_update_begin();
2115        cpu_smt_control = CPU_SMT_ENABLED;
2116        for_each_present_cpu(cpu) {
2117                /* Skip online CPUs and CPUs on offline nodes */
2118                if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2119                        continue;
2120                ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2121                if (ret)
2122                        break;
2123                /* See comment in cpuhp_smt_disable() */
2124                cpuhp_online_cpu_device(cpu);
2125        }
2126        cpu_maps_update_done();
2127        return ret;
2128}
2129#endif
2130
2131#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2132static ssize_t show_cpuhp_state(struct device *dev,
2133                                struct device_attribute *attr, char *buf)
2134{
2135        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2136
2137        return sprintf(buf, "%d\n", st->state);
2138}
2139static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2140
2141static ssize_t write_cpuhp_target(struct device *dev,
2142                                  struct device_attribute *attr,
2143                                  const char *buf, size_t count)
2144{
2145        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2146        struct cpuhp_step *sp;
2147        int target, ret;
2148
2149        ret = kstrtoint(buf, 10, &target);
2150        if (ret)
2151                return ret;
2152
2153#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2154        if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2155                return -EINVAL;
2156#else
2157        if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2158                return -EINVAL;
2159#endif
2160
2161        ret = lock_device_hotplug_sysfs();
2162        if (ret)
2163                return ret;
2164
2165        mutex_lock(&cpuhp_state_mutex);
2166        sp = cpuhp_get_step(target);
2167        ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2168        mutex_unlock(&cpuhp_state_mutex);
2169        if (ret)
2170                goto out;
2171
2172        if (st->state < target)
2173                ret = cpu_up(dev->id, target);
2174        else
2175                ret = cpu_down(dev->id, target);
2176out:
2177        unlock_device_hotplug();
2178        return ret ? ret : count;
2179}
2180
2181static ssize_t show_cpuhp_target(struct device *dev,
2182                                 struct device_attribute *attr, char *buf)
2183{
2184        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2185
2186        return sprintf(buf, "%d\n", st->target);
2187}
2188static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2189
2190
2191static ssize_t write_cpuhp_fail(struct device *dev,
2192                                struct device_attribute *attr,
2193                                const char *buf, size_t count)
2194{
2195        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2196        struct cpuhp_step *sp;
2197        int fail, ret;
2198
2199        ret = kstrtoint(buf, 10, &fail);
2200        if (ret)
2201                return ret;
2202
2203        if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2204                return -EINVAL;
2205
2206        /*
2207         * Cannot fail STARTING/DYING callbacks.
2208         */
2209        if (cpuhp_is_atomic_state(fail))
2210                return -EINVAL;
2211
2212        /*
2213         * Cannot fail anything that doesn't have callbacks.
2214         */
2215        mutex_lock(&cpuhp_state_mutex);
2216        sp = cpuhp_get_step(fail);
2217        if (!sp->startup.single && !sp->teardown.single)
2218                ret = -EINVAL;
2219        mutex_unlock(&cpuhp_state_mutex);
2220        if (ret)
2221                return ret;
2222
2223        st->fail = fail;
2224
2225        return count;
2226}
2227
2228static ssize_t show_cpuhp_fail(struct device *dev,
2229                               struct device_attribute *attr, char *buf)
2230{
2231        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2232
2233        return sprintf(buf, "%d\n", st->fail);
2234}
2235
2236static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2237
2238static struct attribute *cpuhp_cpu_attrs[] = {
2239        &dev_attr_state.attr,
2240        &dev_attr_target.attr,
2241        &dev_attr_fail.attr,
2242        NULL
2243};
2244
2245static const struct attribute_group cpuhp_cpu_attr_group = {
2246        .attrs = cpuhp_cpu_attrs,
2247        .name = "hotplug",
2248        NULL
2249};
2250
2251static ssize_t show_cpuhp_states(struct device *dev,
2252                                 struct device_attribute *attr, char *buf)
2253{
2254        ssize_t cur, res = 0;
2255        int i;
2256
2257        mutex_lock(&cpuhp_state_mutex);
2258        for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2259                struct cpuhp_step *sp = cpuhp_get_step(i);
2260
2261                if (sp->name) {
2262                        cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2263                        buf += cur;
2264                        res += cur;
2265                }
2266        }
2267        mutex_unlock(&cpuhp_state_mutex);
2268        return res;
2269}
2270static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2271
2272static struct attribute *cpuhp_cpu_root_attrs[] = {
2273        &dev_attr_states.attr,
2274        NULL
2275};
2276
2277static const struct attribute_group cpuhp_cpu_root_attr_group = {
2278        .attrs = cpuhp_cpu_root_attrs,
2279        .name = "hotplug",
2280        NULL
2281};
2282
2283#ifdef CONFIG_HOTPLUG_SMT
2284
2285static ssize_t
2286__store_smt_control(struct device *dev, struct device_attribute *attr,
2287                    const char *buf, size_t count)
2288{
2289        int ctrlval, ret;
2290
2291        if (sysfs_streq(buf, "on"))
2292                ctrlval = CPU_SMT_ENABLED;
2293        else if (sysfs_streq(buf, "off"))
2294                ctrlval = CPU_SMT_DISABLED;
2295        else if (sysfs_streq(buf, "forceoff"))
2296                ctrlval = CPU_SMT_FORCE_DISABLED;
2297        else
2298                return -EINVAL;
2299
2300        if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2301                return -EPERM;
2302
2303        if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2304                return -ENODEV;
2305
2306        ret = lock_device_hotplug_sysfs();
2307        if (ret)
2308                return ret;
2309
2310        if (ctrlval != cpu_smt_control) {
2311                switch (ctrlval) {
2312                case CPU_SMT_ENABLED:
2313                        ret = cpuhp_smt_enable();
2314                        break;
2315                case CPU_SMT_DISABLED:
2316                case CPU_SMT_FORCE_DISABLED:
2317                        ret = cpuhp_smt_disable(ctrlval);
2318                        break;
2319                }
2320        }
2321
2322        unlock_device_hotplug();
2323        return ret ? ret : count;
2324}
2325
2326#else /* !CONFIG_HOTPLUG_SMT */
2327static ssize_t
2328__store_smt_control(struct device *dev, struct device_attribute *attr,
2329                    const char *buf, size_t count)
2330{
2331        return -ENODEV;
2332}
2333#endif /* CONFIG_HOTPLUG_SMT */
2334
2335static const char *smt_states[] = {
2336        [CPU_SMT_ENABLED]               = "on",
2337        [CPU_SMT_DISABLED]              = "off",
2338        [CPU_SMT_FORCE_DISABLED]        = "forceoff",
2339        [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
2340        [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
2341};
2342
2343static ssize_t
2344show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2345{
2346        const char *state = smt_states[cpu_smt_control];
2347
2348        return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2349}
2350
2351static ssize_t
2352store_smt_control(struct device *dev, struct device_attribute *attr,
2353                  const char *buf, size_t count)
2354{
2355        return __store_smt_control(dev, attr, buf, count);
2356}
2357static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2358
2359static ssize_t
2360show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2361{
2362        return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2363}
2364static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2365
2366static struct attribute *cpuhp_smt_attrs[] = {
2367        &dev_attr_control.attr,
2368        &dev_attr_active.attr,
2369        NULL
2370};
2371
2372static const struct attribute_group cpuhp_smt_attr_group = {
2373        .attrs = cpuhp_smt_attrs,
2374        .name = "smt",
2375        NULL
2376};
2377
2378static int __init cpu_smt_sysfs_init(void)
2379{
2380        return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2381                                  &cpuhp_smt_attr_group);
2382}
2383
2384static int __init cpuhp_sysfs_init(void)
2385{
2386        int cpu, ret;
2387
2388        ret = cpu_smt_sysfs_init();
2389        if (ret)
2390                return ret;
2391
2392        ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2393                                 &cpuhp_cpu_root_attr_group);
2394        if (ret)
2395                return ret;
2396
2397        for_each_possible_cpu(cpu) {
2398                struct device *dev = get_cpu_device(cpu);
2399
2400                if (!dev)
2401                        continue;
2402                ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2403                if (ret)
2404                        return ret;
2405        }
2406        return 0;
2407}
2408device_initcall(cpuhp_sysfs_init);
2409#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2410
2411/*
2412 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2413 * represents all NR_CPUS bits binary values of 1<<nr.
2414 *
2415 * It is used by cpumask_of() to get a constant address to a CPU
2416 * mask value that has a single bit set only.
2417 */
2418
2419/* cpu_bit_bitmap[0] is empty - so we can back into it */
2420#define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
2421#define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2422#define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2423#define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2424
2425const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2426
2427        MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
2428        MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
2429#if BITS_PER_LONG > 32
2430        MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
2431        MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
2432#endif
2433};
2434EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2435
2436const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2437EXPORT_SYMBOL(cpu_all_bits);
2438
2439#ifdef CONFIG_INIT_ALL_POSSIBLE
2440struct cpumask __cpu_possible_mask __read_mostly
2441        = {CPU_BITS_ALL};
2442#else
2443struct cpumask __cpu_possible_mask __read_mostly;
2444#endif
2445EXPORT_SYMBOL(__cpu_possible_mask);
2446
2447struct cpumask __cpu_online_mask __read_mostly;
2448EXPORT_SYMBOL(__cpu_online_mask);
2449
2450struct cpumask __cpu_present_mask __read_mostly;
2451EXPORT_SYMBOL(__cpu_present_mask);
2452
2453struct cpumask __cpu_active_mask __read_mostly;
2454EXPORT_SYMBOL(__cpu_active_mask);
2455
2456atomic_t __num_online_cpus __read_mostly;
2457EXPORT_SYMBOL(__num_online_cpus);
2458
2459void init_cpu_present(const struct cpumask *src)
2460{
2461        cpumask_copy(&__cpu_present_mask, src);
2462}
2463
2464void init_cpu_possible(const struct cpumask *src)
2465{
2466        cpumask_copy(&__cpu_possible_mask, src);
2467}
2468
2469void init_cpu_online(const struct cpumask *src)
2470{
2471        cpumask_copy(&__cpu_online_mask, src);
2472}
2473
2474void set_cpu_online(unsigned int cpu, bool online)
2475{
2476        /*
2477         * atomic_inc/dec() is required to handle the horrid abuse of this
2478         * function by the reboot and kexec code which invoke it from
2479         * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2480         * regular CPU hotplug is properly serialized.
2481         *
2482         * Note, that the fact that __num_online_cpus is of type atomic_t
2483         * does not protect readers which are not serialized against
2484         * concurrent hotplug operations.
2485         */
2486        if (online) {
2487                if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2488                        atomic_inc(&__num_online_cpus);
2489        } else {
2490                if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2491                        atomic_dec(&__num_online_cpus);
2492        }
2493}
2494
2495/*
2496 * Activate the first processor.
2497 */
2498void __init boot_cpu_init(void)
2499{
2500        int cpu = smp_processor_id();
2501
2502        /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2503        set_cpu_online(cpu, true);
2504        set_cpu_active(cpu, true);
2505        set_cpu_present(cpu, true);
2506        set_cpu_possible(cpu, true);
2507
2508#ifdef CONFIG_SMP
2509        __boot_cpu_id = cpu;
2510#endif
2511}
2512
2513/*
2514 * Must be called _AFTER_ setting up the per_cpu areas
2515 */
2516void __init boot_cpu_hotplug_init(void)
2517{
2518#ifdef CONFIG_SMP
2519        cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2520#endif
2521        this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2522}
2523
2524/*
2525 * These are used for a global "mitigations=" cmdline option for toggling
2526 * optional CPU mitigations.
2527 */
2528enum cpu_mitigations {
2529        CPU_MITIGATIONS_OFF,
2530        CPU_MITIGATIONS_AUTO,
2531        CPU_MITIGATIONS_AUTO_NOSMT,
2532};
2533
2534static enum cpu_mitigations cpu_mitigations __ro_after_init =
2535        CPU_MITIGATIONS_AUTO;
2536
2537static int __init mitigations_parse_cmdline(char *arg)
2538{
2539        if (!strcmp(arg, "off"))
2540                cpu_mitigations = CPU_MITIGATIONS_OFF;
2541        else if (!strcmp(arg, "auto"))
2542                cpu_mitigations = CPU_MITIGATIONS_AUTO;
2543        else if (!strcmp(arg, "auto,nosmt"))
2544                cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2545        else
2546                pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2547                        arg);
2548
2549        return 0;
2550}
2551early_param("mitigations", mitigations_parse_cmdline);
2552
2553/* mitigations=off */
2554bool cpu_mitigations_off(void)
2555{
2556        return cpu_mitigations == CPU_MITIGATIONS_OFF;
2557}
2558EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2559
2560/* mitigations=auto,nosmt */
2561bool cpu_mitigations_auto_nosmt(void)
2562{
2563        return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2564}
2565EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);
2566