linux/kernel/cpu.c
<<
>>
Prefs
   1/* CPU control.
   2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
   3 *
   4 * This code is licenced under the GPL.
   5 */
   6#include <linux/proc_fs.h>
   7#include <linux/smp.h>
   8#include <linux/init.h>
   9#include <linux/notifier.h>
  10#include <linux/sched.h>
  11#include <linux/unistd.h>
  12#include <linux/cpu.h>
  13#include <linux/oom.h>
  14#include <linux/rcupdate.h>
  15#include <linux/export.h>
  16#include <linux/bug.h>
  17#include <linux/kthread.h>
  18#include <linux/stop_machine.h>
  19#include <linux/mutex.h>
  20#include <linux/gfp.h>
  21#include <linux/suspend.h>
  22#include <linux/lockdep.h>
  23#include <linux/tick.h>
  24#include <linux/irq.h>
  25#include <trace/events/power.h>
  26
  27#include "smpboot.h"
  28
  29#ifdef CONFIG_SMP
  30/* Serializes the updates to cpu_online_mask, cpu_present_mask */
  31static DEFINE_MUTEX(cpu_add_remove_lock);
  32
  33/*
  34 * The following two APIs (cpu_maps_update_begin/done) must be used when
  35 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
  36 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
  37 * hotplug callback (un)registration performed using __register_cpu_notifier()
  38 * or __unregister_cpu_notifier().
  39 */
  40void cpu_maps_update_begin(void)
  41{
  42        mutex_lock(&cpu_add_remove_lock);
  43}
  44EXPORT_SYMBOL(cpu_notifier_register_begin);
  45
  46void cpu_maps_update_done(void)
  47{
  48        mutex_unlock(&cpu_add_remove_lock);
  49}
  50EXPORT_SYMBOL(cpu_notifier_register_done);
  51
  52static RAW_NOTIFIER_HEAD(cpu_chain);
  53
  54/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  55 * Should always be manipulated under cpu_add_remove_lock
  56 */
  57static int cpu_hotplug_disabled;
  58
  59#ifdef CONFIG_HOTPLUG_CPU
  60
  61static struct {
  62        struct task_struct *active_writer;
  63        /* wait queue to wake up the active_writer */
  64        wait_queue_head_t wq;
  65        /* verifies that no writer will get active while readers are active */
  66        struct mutex lock;
  67        /*
  68         * Also blocks the new readers during
  69         * an ongoing cpu hotplug operation.
  70         */
  71        atomic_t refcount;
  72
  73#ifdef CONFIG_DEBUG_LOCK_ALLOC
  74        struct lockdep_map dep_map;
  75#endif
  76} cpu_hotplug = {
  77        .active_writer = NULL,
  78        .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
  79        .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
  80#ifdef CONFIG_DEBUG_LOCK_ALLOC
  81        .dep_map = {.name = "cpu_hotplug.lock" },
  82#endif
  83};
  84
  85/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
  86#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
  87#define cpuhp_lock_acquire_tryread() \
  88                                  lock_map_acquire_tryread(&cpu_hotplug.dep_map)
  89#define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
  90#define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
  91
  92
  93void get_online_cpus(void)
  94{
  95        might_sleep();
  96        if (cpu_hotplug.active_writer == current)
  97                return;
  98        cpuhp_lock_acquire_read();
  99        mutex_lock(&cpu_hotplug.lock);
 100        atomic_inc(&cpu_hotplug.refcount);
 101        mutex_unlock(&cpu_hotplug.lock);
 102}
 103EXPORT_SYMBOL_GPL(get_online_cpus);
 104
 105bool try_get_online_cpus(void)
 106{
 107        if (cpu_hotplug.active_writer == current)
 108                return true;
 109        if (!mutex_trylock(&cpu_hotplug.lock))
 110                return false;
 111        cpuhp_lock_acquire_tryread();
 112        atomic_inc(&cpu_hotplug.refcount);
 113        mutex_unlock(&cpu_hotplug.lock);
 114        return true;
 115}
 116EXPORT_SYMBOL_GPL(try_get_online_cpus);
 117
 118void put_online_cpus(void)
 119{
 120        int refcount;
 121
 122        if (cpu_hotplug.active_writer == current)
 123                return;
 124
 125        refcount = atomic_dec_return(&cpu_hotplug.refcount);
 126        if (WARN_ON(refcount < 0)) /* try to fix things up */
 127                atomic_inc(&cpu_hotplug.refcount);
 128
 129        if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
 130                wake_up(&cpu_hotplug.wq);
 131
 132        cpuhp_lock_release();
 133
 134}
 135EXPORT_SYMBOL_GPL(put_online_cpus);
 136
 137/*
 138 * This ensures that the hotplug operation can begin only when the
 139 * refcount goes to zero.
 140 *
 141 * Note that during a cpu-hotplug operation, the new readers, if any,
 142 * will be blocked by the cpu_hotplug.lock
 143 *
 144 * Since cpu_hotplug_begin() is always called after invoking
 145 * cpu_maps_update_begin(), we can be sure that only one writer is active.
 146 *
 147 * Note that theoretically, there is a possibility of a livelock:
 148 * - Refcount goes to zero, last reader wakes up the sleeping
 149 *   writer.
 150 * - Last reader unlocks the cpu_hotplug.lock.
 151 * - A new reader arrives at this moment, bumps up the refcount.
 152 * - The writer acquires the cpu_hotplug.lock finds the refcount
 153 *   non zero and goes to sleep again.
 154 *
 155 * However, this is very difficult to achieve in practice since
 156 * get_online_cpus() not an api which is called all that often.
 157 *
 158 */
 159void cpu_hotplug_begin(void)
 160{
 161        DEFINE_WAIT(wait);
 162
 163        cpu_hotplug.active_writer = current;
 164        cpuhp_lock_acquire();
 165
 166        for (;;) {
 167                mutex_lock(&cpu_hotplug.lock);
 168                prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
 169                if (likely(!atomic_read(&cpu_hotplug.refcount)))
 170                                break;
 171                mutex_unlock(&cpu_hotplug.lock);
 172                schedule();
 173        }
 174        finish_wait(&cpu_hotplug.wq, &wait);
 175}
 176
 177void cpu_hotplug_done(void)
 178{
 179        cpu_hotplug.active_writer = NULL;
 180        mutex_unlock(&cpu_hotplug.lock);
 181        cpuhp_lock_release();
 182}
 183
 184/*
 185 * Wait for currently running CPU hotplug operations to complete (if any) and
 186 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
 187 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
 188 * hotplug path before performing hotplug operations. So acquiring that lock
 189 * guarantees mutual exclusion from any currently running hotplug operations.
 190 */
 191void cpu_hotplug_disable(void)
 192{
 193        cpu_maps_update_begin();
 194        cpu_hotplug_disabled++;
 195        cpu_maps_update_done();
 196}
 197EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
 198
 199void cpu_hotplug_enable(void)
 200{
 201        cpu_maps_update_begin();
 202        WARN_ON(--cpu_hotplug_disabled < 0);
 203        cpu_maps_update_done();
 204}
 205EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
 206#endif  /* CONFIG_HOTPLUG_CPU */
 207
 208/* Need to know about CPUs going up/down? */
 209int register_cpu_notifier(struct notifier_block *nb)
 210{
 211        int ret;
 212        cpu_maps_update_begin();
 213        ret = raw_notifier_chain_register(&cpu_chain, nb);
 214        cpu_maps_update_done();
 215        return ret;
 216}
 217
 218int __register_cpu_notifier(struct notifier_block *nb)
 219{
 220        return raw_notifier_chain_register(&cpu_chain, nb);
 221}
 222
 223static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
 224                        int *nr_calls)
 225{
 226        int ret;
 227
 228        ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
 229                                        nr_calls);
 230
 231        return notifier_to_errno(ret);
 232}
 233
 234static int cpu_notify(unsigned long val, void *v)
 235{
 236        return __cpu_notify(val, v, -1, NULL);
 237}
 238
 239#ifdef CONFIG_HOTPLUG_CPU
 240
 241static void cpu_notify_nofail(unsigned long val, void *v)
 242{
 243        BUG_ON(cpu_notify(val, v));
 244}
 245EXPORT_SYMBOL(register_cpu_notifier);
 246EXPORT_SYMBOL(__register_cpu_notifier);
 247
 248void unregister_cpu_notifier(struct notifier_block *nb)
 249{
 250        cpu_maps_update_begin();
 251        raw_notifier_chain_unregister(&cpu_chain, nb);
 252        cpu_maps_update_done();
 253}
 254EXPORT_SYMBOL(unregister_cpu_notifier);
 255
 256void __unregister_cpu_notifier(struct notifier_block *nb)
 257{
 258        raw_notifier_chain_unregister(&cpu_chain, nb);
 259}
 260EXPORT_SYMBOL(__unregister_cpu_notifier);
 261
 262/**
 263 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
 264 * @cpu: a CPU id
 265 *
 266 * This function walks all processes, finds a valid mm struct for each one and
 267 * then clears a corresponding bit in mm's cpumask.  While this all sounds
 268 * trivial, there are various non-obvious corner cases, which this function
 269 * tries to solve in a safe manner.
 270 *
 271 * Also note that the function uses a somewhat relaxed locking scheme, so it may
 272 * be called only for an already offlined CPU.
 273 */
 274void clear_tasks_mm_cpumask(int cpu)
 275{
 276        struct task_struct *p;
 277
 278        /*
 279         * This function is called after the cpu is taken down and marked
 280         * offline, so its not like new tasks will ever get this cpu set in
 281         * their mm mask. -- Peter Zijlstra
 282         * Thus, we may use rcu_read_lock() here, instead of grabbing
 283         * full-fledged tasklist_lock.
 284         */
 285        WARN_ON(cpu_online(cpu));
 286        rcu_read_lock();
 287        for_each_process(p) {
 288                struct task_struct *t;
 289
 290                /*
 291                 * Main thread might exit, but other threads may still have
 292                 * a valid mm. Find one.
 293                 */
 294                t = find_lock_task_mm(p);
 295                if (!t)
 296                        continue;
 297                cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
 298                task_unlock(t);
 299        }
 300        rcu_read_unlock();
 301}
 302
 303static inline void check_for_tasks(int dead_cpu)
 304{
 305        struct task_struct *g, *p;
 306
 307        read_lock_irq(&tasklist_lock);
 308        do_each_thread(g, p) {
 309                if (!p->on_rq)
 310                        continue;
 311                /*
 312                 * We do the check with unlocked task_rq(p)->lock.
 313                 * Order the reading to do not warn about a task,
 314                 * which was running on this cpu in the past, and
 315                 * it's just been woken on another cpu.
 316                 */
 317                rmb();
 318                if (task_cpu(p) != dead_cpu)
 319                        continue;
 320
 321                pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
 322                        p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
 323        } while_each_thread(g, p);
 324        read_unlock_irq(&tasklist_lock);
 325}
 326
 327struct take_cpu_down_param {
 328        unsigned long mod;
 329        void *hcpu;
 330};
 331
 332/* Take this CPU down. */
 333static int take_cpu_down(void *_param)
 334{
 335        struct take_cpu_down_param *param = _param;
 336        int err;
 337
 338        /* Ensure this CPU doesn't handle any more interrupts. */
 339        err = __cpu_disable();
 340        if (err < 0)
 341                return err;
 342
 343        cpu_notify(CPU_DYING | param->mod, param->hcpu);
 344        /* Give up timekeeping duties */
 345        tick_handover_do_timer();
 346        /* Park the stopper thread */
 347        kthread_park(current);
 348        return 0;
 349}
 350
 351/* Requires cpu_add_remove_lock to be held */
 352static int _cpu_down(unsigned int cpu, int tasks_frozen)
 353{
 354        int err, nr_calls = 0;
 355        void *hcpu = (void *)(long)cpu;
 356        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 357        struct take_cpu_down_param tcd_param = {
 358                .mod = mod,
 359                .hcpu = hcpu,
 360        };
 361
 362        if (num_online_cpus() == 1)
 363                return -EBUSY;
 364
 365        if (!cpu_online(cpu))
 366                return -EINVAL;
 367
 368        cpu_hotplug_begin();
 369
 370        err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
 371        if (err) {
 372                nr_calls--;
 373                __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
 374                pr_warn("%s: attempt to take down CPU %u failed\n",
 375                        __func__, cpu);
 376                goto out_release;
 377        }
 378
 379        /*
 380         * By now we've cleared cpu_active_mask, wait for all preempt-disabled
 381         * and RCU users of this state to go away such that all new such users
 382         * will observe it.
 383         *
 384         * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
 385         * not imply sync_sched(), so wait for both.
 386         *
 387         * Do sync before park smpboot threads to take care the rcu boost case.
 388         */
 389        if (IS_ENABLED(CONFIG_PREEMPT))
 390                synchronize_rcu_mult(call_rcu, call_rcu_sched);
 391        else
 392                synchronize_rcu();
 393
 394        smpboot_park_threads(cpu);
 395
 396        /*
 397         * Prevent irq alloc/free while the dying cpu reorganizes the
 398         * interrupt affinities.
 399         */
 400        irq_lock_sparse();
 401
 402        /*
 403         * So now all preempt/rcu users must observe !cpu_active().
 404         */
 405        err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
 406        if (err) {
 407                /* CPU didn't die: tell everyone.  Can't complain. */
 408                cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
 409                irq_unlock_sparse();
 410                goto out_release;
 411        }
 412        BUG_ON(cpu_online(cpu));
 413
 414        /*
 415         * The migration_call() CPU_DYING callback will have removed all
 416         * runnable tasks from the cpu, there's only the idle task left now
 417         * that the migration thread is done doing the stop_machine thing.
 418         *
 419         * Wait for the stop thread to go away.
 420         */
 421        while (!per_cpu(cpu_dead_idle, cpu))
 422                cpu_relax();
 423        smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
 424        per_cpu(cpu_dead_idle, cpu) = false;
 425
 426        /* Interrupts are moved away from the dying cpu, reenable alloc/free */
 427        irq_unlock_sparse();
 428
 429        hotplug_cpu__broadcast_tick_pull(cpu);
 430        /* This actually kills the CPU. */
 431        __cpu_die(cpu);
 432
 433        /* CPU is completely dead: tell everyone.  Too late to complain. */
 434        tick_cleanup_dead_cpu(cpu);
 435        cpu_notify_nofail(CPU_DEAD | mod, hcpu);
 436
 437        check_for_tasks(cpu);
 438
 439out_release:
 440        cpu_hotplug_done();
 441        if (!err)
 442                cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
 443        return err;
 444}
 445
 446int cpu_down(unsigned int cpu)
 447{
 448        int err;
 449
 450        cpu_maps_update_begin();
 451
 452        if (cpu_hotplug_disabled) {
 453                err = -EBUSY;
 454                goto out;
 455        }
 456
 457        err = _cpu_down(cpu, 0);
 458
 459out:
 460        cpu_maps_update_done();
 461        return err;
 462}
 463EXPORT_SYMBOL(cpu_down);
 464#endif /*CONFIG_HOTPLUG_CPU*/
 465
 466/*
 467 * Unpark per-CPU smpboot kthreads at CPU-online time.
 468 */
 469static int smpboot_thread_call(struct notifier_block *nfb,
 470                               unsigned long action, void *hcpu)
 471{
 472        int cpu = (long)hcpu;
 473
 474        switch (action & ~CPU_TASKS_FROZEN) {
 475
 476        case CPU_DOWN_FAILED:
 477        case CPU_ONLINE:
 478                smpboot_unpark_threads(cpu);
 479                break;
 480
 481        default:
 482                break;
 483        }
 484
 485        return NOTIFY_OK;
 486}
 487
 488static struct notifier_block smpboot_thread_notifier = {
 489        .notifier_call = smpboot_thread_call,
 490        .priority = CPU_PRI_SMPBOOT,
 491};
 492
 493void smpboot_thread_init(void)
 494{
 495        register_cpu_notifier(&smpboot_thread_notifier);
 496}
 497
 498/* Requires cpu_add_remove_lock to be held */
 499static int _cpu_up(unsigned int cpu, int tasks_frozen)
 500{
 501        int ret, nr_calls = 0;
 502        void *hcpu = (void *)(long)cpu;
 503        unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 504        struct task_struct *idle;
 505
 506        cpu_hotplug_begin();
 507
 508        if (cpu_online(cpu) || !cpu_present(cpu)) {
 509                ret = -EINVAL;
 510                goto out;
 511        }
 512
 513        idle = idle_thread_get(cpu);
 514        if (IS_ERR(idle)) {
 515                ret = PTR_ERR(idle);
 516                goto out;
 517        }
 518
 519        ret = smpboot_create_threads(cpu);
 520        if (ret)
 521                goto out;
 522
 523        ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
 524        if (ret) {
 525                nr_calls--;
 526                pr_warn("%s: attempt to bring up CPU %u failed\n",
 527                        __func__, cpu);
 528                goto out_notify;
 529        }
 530
 531        /* Arch-specific enabling code. */
 532        ret = __cpu_up(cpu, idle);
 533
 534        if (ret != 0)
 535                goto out_notify;
 536        BUG_ON(!cpu_online(cpu));
 537
 538        /* Now call notifier in preparation. */
 539        cpu_notify(CPU_ONLINE | mod, hcpu);
 540
 541out_notify:
 542        if (ret != 0)
 543                __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
 544out:
 545        cpu_hotplug_done();
 546
 547        return ret;
 548}
 549
 550int cpu_up(unsigned int cpu)
 551{
 552        int err = 0;
 553
 554        if (!cpu_possible(cpu)) {
 555                pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
 556                       cpu);
 557#if defined(CONFIG_IA64)
 558                pr_err("please check additional_cpus= boot parameter\n");
 559#endif
 560                return -EINVAL;
 561        }
 562
 563        err = try_online_node(cpu_to_node(cpu));
 564        if (err)
 565                return err;
 566
 567        cpu_maps_update_begin();
 568
 569        if (cpu_hotplug_disabled) {
 570                err = -EBUSY;
 571                goto out;
 572        }
 573
 574        err = _cpu_up(cpu, 0);
 575
 576out:
 577        cpu_maps_update_done();
 578        return err;
 579}
 580EXPORT_SYMBOL_GPL(cpu_up);
 581
 582#ifdef CONFIG_PM_SLEEP_SMP
 583static cpumask_var_t frozen_cpus;
 584
 585int disable_nonboot_cpus(void)
 586{
 587        int cpu, first_cpu, error = 0;
 588
 589        cpu_maps_update_begin();
 590        first_cpu = cpumask_first(cpu_online_mask);
 591        /*
 592         * We take down all of the non-boot CPUs in one shot to avoid races
 593         * with the userspace trying to use the CPU hotplug at the same time
 594         */
 595        cpumask_clear(frozen_cpus);
 596
 597        pr_info("Disabling non-boot CPUs ...\n");
 598        for_each_online_cpu(cpu) {
 599                if (cpu == first_cpu)
 600                        continue;
 601                trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
 602                error = _cpu_down(cpu, 1);
 603                trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
 604                if (!error)
 605                        cpumask_set_cpu(cpu, frozen_cpus);
 606                else {
 607                        pr_err("Error taking CPU%d down: %d\n", cpu, error);
 608                        break;
 609                }
 610        }
 611
 612        if (!error)
 613                BUG_ON(num_online_cpus() > 1);
 614        else
 615                pr_err("Non-boot CPUs are not disabled\n");
 616
 617        /*
 618         * Make sure the CPUs won't be enabled by someone else. We need to do
 619         * this even in case of failure as all disable_nonboot_cpus() users are
 620         * supposed to do enable_nonboot_cpus() on the failure path.
 621         */
 622        cpu_hotplug_disabled++;
 623
 624        cpu_maps_update_done();
 625        return error;
 626}
 627
 628void __weak arch_enable_nonboot_cpus_begin(void)
 629{
 630}
 631
 632void __weak arch_enable_nonboot_cpus_end(void)
 633{
 634}
 635
 636void enable_nonboot_cpus(void)
 637{
 638        int cpu, error;
 639
 640        /* Allow everyone to use the CPU hotplug again */
 641        cpu_maps_update_begin();
 642        WARN_ON(--cpu_hotplug_disabled < 0);
 643        if (cpumask_empty(frozen_cpus))
 644                goto out;
 645
 646        pr_info("Enabling non-boot CPUs ...\n");
 647
 648        arch_enable_nonboot_cpus_begin();
 649
 650        for_each_cpu(cpu, frozen_cpus) {
 651                trace_suspend_resume(TPS("CPU_ON"), cpu, true);
 652                error = _cpu_up(cpu, 1);
 653                trace_suspend_resume(TPS("CPU_ON"), cpu, false);
 654                if (!error) {
 655                        pr_info("CPU%d is up\n", cpu);
 656                        continue;
 657                }
 658                pr_warn("Error taking CPU%d up: %d\n", cpu, error);
 659        }
 660
 661        arch_enable_nonboot_cpus_end();
 662
 663        cpumask_clear(frozen_cpus);
 664out:
 665        cpu_maps_update_done();
 666}
 667
 668static int __init alloc_frozen_cpus(void)
 669{
 670        if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
 671                return -ENOMEM;
 672        return 0;
 673}
 674core_initcall(alloc_frozen_cpus);
 675
 676/*
 677 * When callbacks for CPU hotplug notifications are being executed, we must
 678 * ensure that the state of the system with respect to the tasks being frozen
 679 * or not, as reported by the notification, remains unchanged *throughout the
 680 * duration* of the execution of the callbacks.
 681 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
 682 *
 683 * This synchronization is implemented by mutually excluding regular CPU
 684 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
 685 * Hibernate notifications.
 686 */
 687static int
 688cpu_hotplug_pm_callback(struct notifier_block *nb,
 689                        unsigned long action, void *ptr)
 690{
 691        switch (action) {
 692
 693        case PM_SUSPEND_PREPARE:
 694        case PM_HIBERNATION_PREPARE:
 695                cpu_hotplug_disable();
 696                break;
 697
 698        case PM_POST_SUSPEND:
 699        case PM_POST_HIBERNATION:
 700                cpu_hotplug_enable();
 701                break;
 702
 703        default:
 704                return NOTIFY_DONE;
 705        }
 706
 707        return NOTIFY_OK;
 708}
 709
 710
 711static int __init cpu_hotplug_pm_sync_init(void)
 712{
 713        /*
 714         * cpu_hotplug_pm_callback has higher priority than x86
 715         * bsp_pm_callback which depends on cpu_hotplug_pm_callback
 716         * to disable cpu hotplug to avoid cpu hotplug race.
 717         */
 718        pm_notifier(cpu_hotplug_pm_callback, 0);
 719        return 0;
 720}
 721core_initcall(cpu_hotplug_pm_sync_init);
 722
 723#endif /* CONFIG_PM_SLEEP_SMP */
 724
 725/**
 726 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
 727 * @cpu: cpu that just started
 728 *
 729 * This function calls the cpu_chain notifiers with CPU_STARTING.
 730 * It must be called by the arch code on the new cpu, before the new cpu
 731 * enables interrupts and before the "boot" cpu returns from __cpu_up().
 732 */
 733void notify_cpu_starting(unsigned int cpu)
 734{
 735        unsigned long val = CPU_STARTING;
 736
 737#ifdef CONFIG_PM_SLEEP_SMP
 738        if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
 739                val = CPU_STARTING_FROZEN;
 740#endif /* CONFIG_PM_SLEEP_SMP */
 741        cpu_notify(val, (void *)(long)cpu);
 742}
 743
 744#endif /* CONFIG_SMP */
 745
 746/*
 747 * cpu_bit_bitmap[] is a special, "compressed" data structure that
 748 * represents all NR_CPUS bits binary values of 1<<nr.
 749 *
 750 * It is used by cpumask_of() to get a constant address to a CPU
 751 * mask value that has a single bit set only.
 752 */
 753
 754/* cpu_bit_bitmap[0] is empty - so we can back into it */
 755#define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
 756#define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
 757#define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
 758#define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
 759
 760const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
 761
 762        MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
 763        MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
 764#if BITS_PER_LONG > 32
 765        MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
 766        MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
 767#endif
 768};
 769EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
 770
 771const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
 772EXPORT_SYMBOL(cpu_all_bits);
 773
 774#ifdef CONFIG_INIT_ALL_POSSIBLE
 775static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
 776        = CPU_BITS_ALL;
 777#else
 778static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
 779#endif
 780const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
 781EXPORT_SYMBOL(cpu_possible_mask);
 782
 783static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
 784const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
 785EXPORT_SYMBOL(cpu_online_mask);
 786
 787static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
 788const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
 789EXPORT_SYMBOL(cpu_present_mask);
 790
 791static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
 792const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
 793EXPORT_SYMBOL(cpu_active_mask);
 794
 795void set_cpu_possible(unsigned int cpu, bool possible)
 796{
 797        if (possible)
 798                cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
 799        else
 800                cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
 801}
 802
 803void set_cpu_present(unsigned int cpu, bool present)
 804{
 805        if (present)
 806                cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
 807        else
 808                cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
 809}
 810
 811void set_cpu_online(unsigned int cpu, bool online)
 812{
 813        if (online) {
 814                cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
 815                cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
 816        } else {
 817                cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
 818        }
 819}
 820
 821void set_cpu_active(unsigned int cpu, bool active)
 822{
 823        if (active)
 824                cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
 825        else
 826                cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
 827}
 828
 829void init_cpu_present(const struct cpumask *src)
 830{
 831        cpumask_copy(to_cpumask(cpu_present_bits), src);
 832}
 833
 834void init_cpu_possible(const struct cpumask *src)
 835{
 836        cpumask_copy(to_cpumask(cpu_possible_bits), src);
 837}
 838
 839void init_cpu_online(const struct cpumask *src)
 840{
 841        cpumask_copy(to_cpumask(cpu_online_bits), src);
 842}
 843