linux/kernel/stop_machine.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * kernel/stop_machine.c
   4 *
   5 * Copyright (C) 2008, 2005     IBM Corporation.
   6 * Copyright (C) 2008, 2005     Rusty Russell rusty@rustcorp.com.au
   7 * Copyright (C) 2010           SUSE Linux Products GmbH
   8 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
   9 */
  10#include <linux/compiler.h>
  11#include <linux/completion.h>
  12#include <linux/cpu.h>
  13#include <linux/init.h>
  14#include <linux/kthread.h>
  15#include <linux/export.h>
  16#include <linux/percpu.h>
  17#include <linux/sched.h>
  18#include <linux/stop_machine.h>
  19#include <linux/interrupt.h>
  20#include <linux/kallsyms.h>
  21#include <linux/smpboot.h>
  22#include <linux/atomic.h>
  23#include <linux/nmi.h>
  24#include <linux/sched/wake_q.h>
  25
  26/*
  27 * Structure to determine completion condition and record errors.  May
  28 * be shared by works on different cpus.
  29 */
  30struct cpu_stop_done {
  31        atomic_t                nr_todo;        /* nr left to execute */
  32        int                     ret;            /* collected return value */
  33        struct completion       completion;     /* fired if nr_todo reaches 0 */
  34};
  35
  36/* the actual stopper, one per every possible cpu, enabled on online cpus */
  37struct cpu_stopper {
  38        struct task_struct      *thread;
  39
  40        raw_spinlock_t          lock;
  41        bool                    enabled;        /* is this stopper enabled? */
  42        struct list_head        works;          /* list of pending works */
  43
  44        struct cpu_stop_work    stop_work;      /* for stop_cpus */
  45        unsigned long           caller;
  46        cpu_stop_fn_t           fn;
  47};
  48
  49static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
  50static bool stop_machine_initialized = false;
  51
  52void print_stop_info(const char *log_lvl, struct task_struct *task)
  53{
  54        /*
  55         * If @task is a stopper task, it cannot migrate and task_cpu() is
  56         * stable.
  57         */
  58        struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
  59
  60        if (task != stopper->thread)
  61                return;
  62
  63        printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
  64}
  65
  66/* static data for stop_cpus */
  67static DEFINE_MUTEX(stop_cpus_mutex);
  68static bool stop_cpus_in_progress;
  69
  70static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
  71{
  72        memset(done, 0, sizeof(*done));
  73        atomic_set(&done->nr_todo, nr_todo);
  74        init_completion(&done->completion);
  75}
  76
  77/* signal completion unless @done is NULL */
  78static void cpu_stop_signal_done(struct cpu_stop_done *done)
  79{
  80        if (atomic_dec_and_test(&done->nr_todo))
  81                complete(&done->completion);
  82}
  83
  84static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
  85                                        struct cpu_stop_work *work,
  86                                        struct wake_q_head *wakeq)
  87{
  88        list_add_tail(&work->list, &stopper->works);
  89        wake_q_add(wakeq, stopper->thread);
  90}
  91
  92/* queue @work to @stopper.  if offline, @work is completed immediately */
  93static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
  94{
  95        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
  96        DEFINE_WAKE_Q(wakeq);
  97        unsigned long flags;
  98        bool enabled;
  99
 100        preempt_disable();
 101        raw_spin_lock_irqsave(&stopper->lock, flags);
 102        enabled = stopper->enabled;
 103        if (enabled)
 104                __cpu_stop_queue_work(stopper, work, &wakeq);
 105        else if (work->done)
 106                cpu_stop_signal_done(work->done);
 107        raw_spin_unlock_irqrestore(&stopper->lock, flags);
 108
 109        wake_up_q(&wakeq);
 110        preempt_enable();
 111
 112        return enabled;
 113}
 114
 115/**
 116 * stop_one_cpu - stop a cpu
 117 * @cpu: cpu to stop
 118 * @fn: function to execute
 119 * @arg: argument to @fn
 120 *
 121 * Execute @fn(@arg) on @cpu.  @fn is run in a process context with
 122 * the highest priority preempting any task on the cpu and
 123 * monopolizing it.  This function returns after the execution is
 124 * complete.
 125 *
 126 * This function doesn't guarantee @cpu stays online till @fn
 127 * completes.  If @cpu goes down in the middle, execution may happen
 128 * partially or fully on different cpus.  @fn should either be ready
 129 * for that or the caller should ensure that @cpu stays online until
 130 * this function completes.
 131 *
 132 * CONTEXT:
 133 * Might sleep.
 134 *
 135 * RETURNS:
 136 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
 137 * otherwise, the return value of @fn.
 138 */
 139int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
 140{
 141        struct cpu_stop_done done;
 142        struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
 143
 144        cpu_stop_init_done(&done, 1);
 145        if (!cpu_stop_queue_work(cpu, &work))
 146                return -ENOENT;
 147        /*
 148         * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup
 149         * cycle by doing a preemption:
 150         */
 151        cond_resched();
 152        wait_for_completion(&done.completion);
 153        return done.ret;
 154}
 155
 156/* This controls the threads on each CPU. */
 157enum multi_stop_state {
 158        /* Dummy starting state for thread. */
 159        MULTI_STOP_NONE,
 160        /* Awaiting everyone to be scheduled. */
 161        MULTI_STOP_PREPARE,
 162        /* Disable interrupts. */
 163        MULTI_STOP_DISABLE_IRQ,
 164        /* Run the function */
 165        MULTI_STOP_RUN,
 166        /* Exit */
 167        MULTI_STOP_EXIT,
 168};
 169
 170struct multi_stop_data {
 171        cpu_stop_fn_t           fn;
 172        void                    *data;
 173        /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
 174        unsigned int            num_threads;
 175        const struct cpumask    *active_cpus;
 176
 177        enum multi_stop_state   state;
 178        atomic_t                thread_ack;
 179};
 180
 181static void set_state(struct multi_stop_data *msdata,
 182                      enum multi_stop_state newstate)
 183{
 184        /* Reset ack counter. */
 185        atomic_set(&msdata->thread_ack, msdata->num_threads);
 186        smp_wmb();
 187        WRITE_ONCE(msdata->state, newstate);
 188}
 189
 190/* Last one to ack a state moves to the next state. */
 191static void ack_state(struct multi_stop_data *msdata)
 192{
 193        if (atomic_dec_and_test(&msdata->thread_ack))
 194                set_state(msdata, msdata->state + 1);
 195}
 196
 197notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
 198{
 199        cpu_relax();
 200}
 201
 202/* This is the cpu_stop function which stops the CPU. */
 203static int multi_cpu_stop(void *data)
 204{
 205        struct multi_stop_data *msdata = data;
 206        enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
 207        int cpu = smp_processor_id(), err = 0;
 208        const struct cpumask *cpumask;
 209        unsigned long flags;
 210        bool is_active;
 211
 212        /*
 213         * When called from stop_machine_from_inactive_cpu(), irq might
 214         * already be disabled.  Save the state and restore it on exit.
 215         */
 216        local_save_flags(flags);
 217
 218        if (!msdata->active_cpus) {
 219                cpumask = cpu_online_mask;
 220                is_active = cpu == cpumask_first(cpumask);
 221        } else {
 222                cpumask = msdata->active_cpus;
 223                is_active = cpumask_test_cpu(cpu, cpumask);
 224        }
 225
 226        /* Simple state machine */
 227        do {
 228                /* Chill out and ensure we re-read multi_stop_state. */
 229                stop_machine_yield(cpumask);
 230                newstate = READ_ONCE(msdata->state);
 231                if (newstate != curstate) {
 232                        curstate = newstate;
 233                        switch (curstate) {
 234                        case MULTI_STOP_DISABLE_IRQ:
 235                                local_irq_disable();
 236                                hard_irq_disable();
 237                                break;
 238                        case MULTI_STOP_RUN:
 239                                if (is_active)
 240                                        err = msdata->fn(msdata->data);
 241                                break;
 242                        default:
 243                                break;
 244                        }
 245                        ack_state(msdata);
 246                } else if (curstate > MULTI_STOP_PREPARE) {
 247                        /*
 248                         * At this stage all other CPUs we depend on must spin
 249                         * in the same loop. Any reason for hard-lockup should
 250                         * be detected and reported on their side.
 251                         */
 252                        touch_nmi_watchdog();
 253                }
 254                rcu_momentary_dyntick_idle();
 255        } while (curstate != MULTI_STOP_EXIT);
 256
 257        local_irq_restore(flags);
 258        return err;
 259}
 260
 261static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
 262                                    int cpu2, struct cpu_stop_work *work2)
 263{
 264        struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
 265        struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
 266        DEFINE_WAKE_Q(wakeq);
 267        int err;
 268
 269retry:
 270        /*
 271         * The waking up of stopper threads has to happen in the same
 272         * scheduling context as the queueing.  Otherwise, there is a
 273         * possibility of one of the above stoppers being woken up by another
 274         * CPU, and preempting us. This will cause us to not wake up the other
 275         * stopper forever.
 276         */
 277        preempt_disable();
 278        raw_spin_lock_irq(&stopper1->lock);
 279        raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING);
 280
 281        if (!stopper1->enabled || !stopper2->enabled) {
 282                err = -ENOENT;
 283                goto unlock;
 284        }
 285
 286        /*
 287         * Ensure that if we race with __stop_cpus() the stoppers won't get
 288         * queued up in reverse order leading to system deadlock.
 289         *
 290         * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has
 291         * queued a work on cpu1 but not on cpu2, we hold both locks.
 292         *
 293         * It can be falsely true but it is safe to spin until it is cleared,
 294         * queue_stop_cpus_work() does everything under preempt_disable().
 295         */
 296        if (unlikely(stop_cpus_in_progress)) {
 297                err = -EDEADLK;
 298                goto unlock;
 299        }
 300
 301        err = 0;
 302        __cpu_stop_queue_work(stopper1, work1, &wakeq);
 303        __cpu_stop_queue_work(stopper2, work2, &wakeq);
 304
 305unlock:
 306        raw_spin_unlock(&stopper2->lock);
 307        raw_spin_unlock_irq(&stopper1->lock);
 308
 309        if (unlikely(err == -EDEADLK)) {
 310                preempt_enable();
 311
 312                while (stop_cpus_in_progress)
 313                        cpu_relax();
 314
 315                goto retry;
 316        }
 317
 318        wake_up_q(&wakeq);
 319        preempt_enable();
 320
 321        return err;
 322}
 323/**
 324 * stop_two_cpus - stops two cpus
 325 * @cpu1: the cpu to stop
 326 * @cpu2: the other cpu to stop
 327 * @fn: function to execute
 328 * @arg: argument to @fn
 329 *
 330 * Stops both the current and specified CPU and runs @fn on one of them.
 331 *
 332 * returns when both are completed.
 333 */
 334int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg)
 335{
 336        struct cpu_stop_done done;
 337        struct cpu_stop_work work1, work2;
 338        struct multi_stop_data msdata;
 339
 340        msdata = (struct multi_stop_data){
 341                .fn = fn,
 342                .data = arg,
 343                .num_threads = 2,
 344                .active_cpus = cpumask_of(cpu1),
 345        };
 346
 347        work1 = work2 = (struct cpu_stop_work){
 348                .fn = multi_cpu_stop,
 349                .arg = &msdata,
 350                .done = &done,
 351                .caller = _RET_IP_,
 352        };
 353
 354        cpu_stop_init_done(&done, 2);
 355        set_state(&msdata, MULTI_STOP_PREPARE);
 356
 357        if (cpu1 > cpu2)
 358                swap(cpu1, cpu2);
 359        if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2))
 360                return -ENOENT;
 361
 362        wait_for_completion(&done.completion);
 363        return done.ret;
 364}
 365
 366/**
 367 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
 368 * @cpu: cpu to stop
 369 * @fn: function to execute
 370 * @arg: argument to @fn
 371 * @work_buf: pointer to cpu_stop_work structure
 372 *
 373 * Similar to stop_one_cpu() but doesn't wait for completion.  The
 374 * caller is responsible for ensuring @work_buf is currently unused
 375 * and will remain untouched until stopper starts executing @fn.
 376 *
 377 * CONTEXT:
 378 * Don't care.
 379 *
 380 * RETURNS:
 381 * true if cpu_stop_work was queued successfully and @fn will be called,
 382 * false otherwise.
 383 */
 384bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
 385                        struct cpu_stop_work *work_buf)
 386{
 387        *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
 388        return cpu_stop_queue_work(cpu, work_buf);
 389}
 390
 391static bool queue_stop_cpus_work(const struct cpumask *cpumask,
 392                                 cpu_stop_fn_t fn, void *arg,
 393                                 struct cpu_stop_done *done)
 394{
 395        struct cpu_stop_work *work;
 396        unsigned int cpu;
 397        bool queued = false;
 398
 399        /*
 400         * Disable preemption while queueing to avoid getting
 401         * preempted by a stopper which might wait for other stoppers
 402         * to enter @fn which can lead to deadlock.
 403         */
 404        preempt_disable();
 405        stop_cpus_in_progress = true;
 406        barrier();
 407        for_each_cpu(cpu, cpumask) {
 408                work = &per_cpu(cpu_stopper.stop_work, cpu);
 409                work->fn = fn;
 410                work->arg = arg;
 411                work->done = done;
 412                work->caller = _RET_IP_;
 413                if (cpu_stop_queue_work(cpu, work))
 414                        queued = true;
 415        }
 416        barrier();
 417        stop_cpus_in_progress = false;
 418        preempt_enable();
 419
 420        return queued;
 421}
 422
 423static int __stop_cpus(const struct cpumask *cpumask,
 424                       cpu_stop_fn_t fn, void *arg)
 425{
 426        struct cpu_stop_done done;
 427
 428        cpu_stop_init_done(&done, cpumask_weight(cpumask));
 429        if (!queue_stop_cpus_work(cpumask, fn, arg, &done))
 430                return -ENOENT;
 431        wait_for_completion(&done.completion);
 432        return done.ret;
 433}
 434
 435/**
 436 * stop_cpus - stop multiple cpus
 437 * @cpumask: cpus to stop
 438 * @fn: function to execute
 439 * @arg: argument to @fn
 440 *
 441 * Execute @fn(@arg) on online cpus in @cpumask.  On each target cpu,
 442 * @fn is run in a process context with the highest priority
 443 * preempting any task on the cpu and monopolizing it.  This function
 444 * returns after all executions are complete.
 445 *
 446 * This function doesn't guarantee the cpus in @cpumask stay online
 447 * till @fn completes.  If some cpus go down in the middle, execution
 448 * on the cpu may happen partially or fully on different cpus.  @fn
 449 * should either be ready for that or the caller should ensure that
 450 * the cpus stay online until this function completes.
 451 *
 452 * All stop_cpus() calls are serialized making it safe for @fn to wait
 453 * for all cpus to start executing it.
 454 *
 455 * CONTEXT:
 456 * Might sleep.
 457 *
 458 * RETURNS:
 459 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
 460 * @cpumask were offline; otherwise, 0 if all executions of @fn
 461 * returned 0, any non zero return value if any returned non zero.
 462 */
 463static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
 464{
 465        int ret;
 466
 467        /* static works are used, process one request at a time */
 468        mutex_lock(&stop_cpus_mutex);
 469        ret = __stop_cpus(cpumask, fn, arg);
 470        mutex_unlock(&stop_cpus_mutex);
 471        return ret;
 472}
 473
 474static int cpu_stop_should_run(unsigned int cpu)
 475{
 476        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 477        unsigned long flags;
 478        int run;
 479
 480        raw_spin_lock_irqsave(&stopper->lock, flags);
 481        run = !list_empty(&stopper->works);
 482        raw_spin_unlock_irqrestore(&stopper->lock, flags);
 483        return run;
 484}
 485
 486static void cpu_stopper_thread(unsigned int cpu)
 487{
 488        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 489        struct cpu_stop_work *work;
 490
 491repeat:
 492        work = NULL;
 493        raw_spin_lock_irq(&stopper->lock);
 494        if (!list_empty(&stopper->works)) {
 495                work = list_first_entry(&stopper->works,
 496                                        struct cpu_stop_work, list);
 497                list_del_init(&work->list);
 498        }
 499        raw_spin_unlock_irq(&stopper->lock);
 500
 501        if (work) {
 502                cpu_stop_fn_t fn = work->fn;
 503                void *arg = work->arg;
 504                struct cpu_stop_done *done = work->done;
 505                int ret;
 506
 507                /* cpu stop callbacks must not sleep, make in_atomic() == T */
 508                stopper->caller = work->caller;
 509                stopper->fn = fn;
 510                preempt_count_inc();
 511                ret = fn(arg);
 512                if (done) {
 513                        if (ret)
 514                                done->ret = ret;
 515                        cpu_stop_signal_done(done);
 516                }
 517                preempt_count_dec();
 518                stopper->fn = NULL;
 519                stopper->caller = 0;
 520                WARN_ONCE(preempt_count(),
 521                          "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
 522                goto repeat;
 523        }
 524}
 525
 526void stop_machine_park(int cpu)
 527{
 528        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 529        /*
 530         * Lockless. cpu_stopper_thread() will take stopper->lock and flush
 531         * the pending works before it parks, until then it is fine to queue
 532         * the new works.
 533         */
 534        stopper->enabled = false;
 535        kthread_park(stopper->thread);
 536}
 537
 538extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 539
 540static void cpu_stop_create(unsigned int cpu)
 541{
 542        sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu));
 543}
 544
 545static void cpu_stop_park(unsigned int cpu)
 546{
 547        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 548
 549        WARN_ON(!list_empty(&stopper->works));
 550}
 551
 552void stop_machine_unpark(int cpu)
 553{
 554        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 555
 556        stopper->enabled = true;
 557        kthread_unpark(stopper->thread);
 558}
 559
 560static struct smp_hotplug_thread cpu_stop_threads = {
 561        .store                  = &cpu_stopper.thread,
 562        .thread_should_run      = cpu_stop_should_run,
 563        .thread_fn              = cpu_stopper_thread,
 564        .thread_comm            = "migration/%u",
 565        .create                 = cpu_stop_create,
 566        .park                   = cpu_stop_park,
 567        .selfparking            = true,
 568};
 569
 570static int __init cpu_stop_init(void)
 571{
 572        unsigned int cpu;
 573
 574        for_each_possible_cpu(cpu) {
 575                struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
 576
 577                raw_spin_lock_init(&stopper->lock);
 578                INIT_LIST_HEAD(&stopper->works);
 579        }
 580
 581        BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads));
 582        stop_machine_unpark(raw_smp_processor_id());
 583        stop_machine_initialized = true;
 584        return 0;
 585}
 586early_initcall(cpu_stop_init);
 587
 588int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
 589                            const struct cpumask *cpus)
 590{
 591        struct multi_stop_data msdata = {
 592                .fn = fn,
 593                .data = data,
 594                .num_threads = num_online_cpus(),
 595                .active_cpus = cpus,
 596        };
 597
 598        lockdep_assert_cpus_held();
 599
 600        if (!stop_machine_initialized) {
 601                /*
 602                 * Handle the case where stop_machine() is called
 603                 * early in boot before stop_machine() has been
 604                 * initialized.
 605                 */
 606                unsigned long flags;
 607                int ret;
 608
 609                WARN_ON_ONCE(msdata.num_threads != 1);
 610
 611                local_irq_save(flags);
 612                hard_irq_disable();
 613                ret = (*fn)(data);
 614                local_irq_restore(flags);
 615
 616                return ret;
 617        }
 618
 619        /* Set the initial state and stop all online cpus. */
 620        set_state(&msdata, MULTI_STOP_PREPARE);
 621        return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata);
 622}
 623
 624int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
 625{
 626        int ret;
 627
 628        /* No CPUs can come up or down during this. */
 629        cpus_read_lock();
 630        ret = stop_machine_cpuslocked(fn, data, cpus);
 631        cpus_read_unlock();
 632        return ret;
 633}
 634EXPORT_SYMBOL_GPL(stop_machine);
 635
 636/**
 637 * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
 638 * @fn: the function to run
 639 * @data: the data ptr for the @fn()
 640 * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
 641 *
 642 * This is identical to stop_machine() but can be called from a CPU which
 643 * is not active.  The local CPU is in the process of hotplug (so no other
 644 * CPU hotplug can start) and not marked active and doesn't have enough
 645 * context to sleep.
 646 *
 647 * This function provides stop_machine() functionality for such state by
 648 * using busy-wait for synchronization and executing @fn directly for local
 649 * CPU.
 650 *
 651 * CONTEXT:
 652 * Local CPU is inactive.  Temporarily stops all active CPUs.
 653 *
 654 * RETURNS:
 655 * 0 if all executions of @fn returned 0, any non zero return value if any
 656 * returned non zero.
 657 */
 658int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
 659                                  const struct cpumask *cpus)
 660{
 661        struct multi_stop_data msdata = { .fn = fn, .data = data,
 662                                            .active_cpus = cpus };
 663        struct cpu_stop_done done;
 664        int ret;
 665
 666        /* Local CPU must be inactive and CPU hotplug in progress. */
 667        BUG_ON(cpu_active(raw_smp_processor_id()));
 668        msdata.num_threads = num_active_cpus() + 1;     /* +1 for local */
 669
 670        /* No proper task established and can't sleep - busy wait for lock. */
 671        while (!mutex_trylock(&stop_cpus_mutex))
 672                cpu_relax();
 673
 674        /* Schedule work on other CPUs and execute directly for local CPU */
 675        set_state(&msdata, MULTI_STOP_PREPARE);
 676        cpu_stop_init_done(&done, num_active_cpus());
 677        queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata,
 678                             &done);
 679        ret = multi_cpu_stop(&msdata);
 680
 681        /* Busy wait for completion. */
 682        while (!completion_done(&done.completion))
 683                cpu_relax();
 684
 685        mutex_unlock(&stop_cpus_mutex);
 686        return ret ?: done.ret;
 687}
 688