linux/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * Generic helpers for smp ipi calls
   3 *
   4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
   5 */
   6#include <linux/irq_work.h>
   7#include <linux/rcupdate.h>
   8#include <linux/rculist.h>
   9#include <linux/kernel.h>
  10#include <linux/export.h>
  11#include <linux/percpu.h>
  12#include <linux/init.h>
  13#include <linux/gfp.h>
  14#include <linux/smp.h>
  15#include <linux/cpu.h>
  16#include <linux/sched.h>
  17
  18#include "smpboot.h"
  19
  20enum {
  21        CSD_FLAG_LOCK           = 0x01,
  22        CSD_FLAG_SYNCHRONOUS    = 0x02,
  23};
  24
  25struct call_function_data {
  26        struct call_single_data __percpu *csd;
  27        cpumask_var_t           cpumask;
  28};
  29
  30static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
  31
  32static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
  33
  34static void flush_smp_call_function_queue(bool warn_cpu_offline);
  35
  36static int
  37hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
  38{
  39        long cpu = (long)hcpu;
  40        struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  41
  42        switch (action) {
  43        case CPU_UP_PREPARE:
  44        case CPU_UP_PREPARE_FROZEN:
  45                if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
  46                                cpu_to_node(cpu)))
  47                        return notifier_from_errno(-ENOMEM);
  48                cfd->csd = alloc_percpu(struct call_single_data);
  49                if (!cfd->csd) {
  50                        free_cpumask_var(cfd->cpumask);
  51                        return notifier_from_errno(-ENOMEM);
  52                }
  53                break;
  54
  55#ifdef CONFIG_HOTPLUG_CPU
  56        case CPU_UP_CANCELED:
  57        case CPU_UP_CANCELED_FROZEN:
  58                /* Fall-through to the CPU_DEAD[_FROZEN] case. */
  59
  60        case CPU_DEAD:
  61        case CPU_DEAD_FROZEN:
  62                free_cpumask_var(cfd->cpumask);
  63                free_percpu(cfd->csd);
  64                break;
  65
  66        case CPU_DYING:
  67        case CPU_DYING_FROZEN:
  68                /*
  69                 * The IPIs for the smp-call-function callbacks queued by other
  70                 * CPUs might arrive late, either due to hardware latencies or
  71                 * because this CPU disabled interrupts (inside stop-machine)
  72                 * before the IPIs were sent. So flush out any pending callbacks
  73                 * explicitly (without waiting for the IPIs to arrive), to
  74                 * ensure that the outgoing CPU doesn't go offline with work
  75                 * still pending.
  76                 */
  77                flush_smp_call_function_queue(false);
  78                break;
  79#endif
  80        };
  81
  82        return NOTIFY_OK;
  83}
  84
  85static struct notifier_block hotplug_cfd_notifier = {
  86        .notifier_call          = hotplug_cfd,
  87};
  88
  89void __init call_function_init(void)
  90{
  91        void *cpu = (void *)(long)smp_processor_id();
  92        int i;
  93
  94        for_each_possible_cpu(i)
  95                init_llist_head(&per_cpu(call_single_queue, i));
  96
  97        hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
  98        register_cpu_notifier(&hotplug_cfd_notifier);
  99}
 100
 101/*
 102 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 103 *
 104 * For non-synchronous ipi calls the csd can still be in use by the
 105 * previous function call. For multi-cpu calls its even more interesting
 106 * as we'll have to ensure no other cpu is observing our csd.
 107 */
 108static __always_inline void csd_lock_wait(struct call_single_data *csd)
 109{
 110        smp_cond_acquire(!(csd->flags & CSD_FLAG_LOCK));
 111}
 112
 113static __always_inline void csd_lock(struct call_single_data *csd)
 114{
 115        csd_lock_wait(csd);
 116        csd->flags |= CSD_FLAG_LOCK;
 117
 118        /*
 119         * prevent CPU from reordering the above assignment
 120         * to ->flags with any subsequent assignments to other
 121         * fields of the specified call_single_data structure:
 122         */
 123        smp_wmb();
 124}
 125
 126static __always_inline void csd_unlock(struct call_single_data *csd)
 127{
 128        WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
 129
 130        /*
 131         * ensure we're all done before releasing data:
 132         */
 133        smp_store_release(&csd->flags, 0);
 134}
 135
 136static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
 137
 138/*
 139 * Insert a previously allocated call_single_data element
 140 * for execution on the given CPU. data must already have
 141 * ->func, ->info, and ->flags set.
 142 */
 143static int generic_exec_single(int cpu, struct call_single_data *csd,
 144                               smp_call_func_t func, void *info)
 145{
 146        if (cpu == smp_processor_id()) {
 147                unsigned long flags;
 148
 149                /*
 150                 * We can unlock early even for the synchronous on-stack case,
 151                 * since we're doing this from the same CPU..
 152                 */
 153                csd_unlock(csd);
 154                local_irq_save(flags);
 155                func(info);
 156                local_irq_restore(flags);
 157                return 0;
 158        }
 159
 160
 161        if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
 162                csd_unlock(csd);
 163                return -ENXIO;
 164        }
 165
 166        csd->func = func;
 167        csd->info = info;
 168
 169        /*
 170         * The list addition should be visible before sending the IPI
 171         * handler locks the list to pull the entry off it because of
 172         * normal cache coherency rules implied by spinlocks.
 173         *
 174         * If IPIs can go out of order to the cache coherency protocol
 175         * in an architecture, sufficient synchronisation should be added
 176         * to arch code to make it appear to obey cache coherency WRT
 177         * locking and barrier primitives. Generic code isn't really
 178         * equipped to do the right thing...
 179         */
 180        if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
 181                arch_send_call_function_single_ipi(cpu);
 182
 183        return 0;
 184}
 185
 186/**
 187 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
 188 *
 189 * Invoked by arch to handle an IPI for call function single.
 190 * Must be called with interrupts disabled.
 191 */
 192void generic_smp_call_function_single_interrupt(void)
 193{
 194        flush_smp_call_function_queue(true);
 195}
 196
 197/**
 198 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 199 *
 200 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 201 *                    offline CPU. Skip this check if set to 'false'.
 202 *
 203 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 204 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 205 * to ensure that all pending IPI callbacks are run before it goes completely
 206 * offline.
 207 *
 208 * Loop through the call_single_queue and run all the queued callbacks.
 209 * Must be called with interrupts disabled.
 210 */
 211static void flush_smp_call_function_queue(bool warn_cpu_offline)
 212{
 213        struct llist_head *head;
 214        struct llist_node *entry;
 215        struct call_single_data *csd, *csd_next;
 216        static bool warned;
 217
 218        WARN_ON(!irqs_disabled());
 219
 220        head = this_cpu_ptr(&call_single_queue);
 221        entry = llist_del_all(head);
 222        entry = llist_reverse_order(entry);
 223
 224        /* There shouldn't be any pending callbacks on an offline CPU. */
 225        if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
 226                     !warned && !llist_empty(head))) {
 227                warned = true;
 228                WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 229
 230                /*
 231                 * We don't have to use the _safe() variant here
 232                 * because we are not invoking the IPI handlers yet.
 233                 */
 234                llist_for_each_entry(csd, entry, llist)
 235                        pr_warn("IPI callback %pS sent to offline CPU\n",
 236                                csd->func);
 237        }
 238
 239        llist_for_each_entry_safe(csd, csd_next, entry, llist) {
 240                smp_call_func_t func = csd->func;
 241                void *info = csd->info;
 242
 243                /* Do we wait until *after* callback? */
 244                if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
 245                        func(info);
 246                        csd_unlock(csd);
 247                } else {
 248                        csd_unlock(csd);
 249                        func(info);
 250                }
 251        }
 252
 253        /*
 254         * Handle irq works queued remotely by irq_work_queue_on().
 255         * Smp functions above are typically synchronous so they
 256         * better run first since some other CPUs may be busy waiting
 257         * for them.
 258         */
 259        irq_work_run();
 260}
 261
 262/*
 263 * smp_call_function_single - Run a function on a specific CPU
 264 * @func: The function to run. This must be fast and non-blocking.
 265 * @info: An arbitrary pointer to pass to the function.
 266 * @wait: If true, wait until function has completed on other CPUs.
 267 *
 268 * Returns 0 on success, else a negative status code.
 269 */
 270int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 271                             int wait)
 272{
 273        struct call_single_data *csd;
 274        struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
 275        int this_cpu;
 276        int err;
 277
 278        /*
 279         * prevent preemption and reschedule on another processor,
 280         * as well as CPU removal
 281         */
 282        this_cpu = get_cpu();
 283
 284        /*
 285         * Can deadlock when called with interrupts disabled.
 286         * We allow cpu's that are not yet online though, as no one else can
 287         * send smp call function interrupt to this cpu and as such deadlocks
 288         * can't happen.
 289         */
 290        WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 291                     && !oops_in_progress);
 292
 293        csd = &csd_stack;
 294        if (!wait) {
 295                csd = this_cpu_ptr(&csd_data);
 296                csd_lock(csd);
 297        }
 298
 299        err = generic_exec_single(cpu, csd, func, info);
 300
 301        if (wait)
 302                csd_lock_wait(csd);
 303
 304        put_cpu();
 305
 306        return err;
 307}
 308EXPORT_SYMBOL(smp_call_function_single);
 309
 310/**
 311 * smp_call_function_single_async(): Run an asynchronous function on a
 312 *                               specific CPU.
 313 * @cpu: The CPU to run on.
 314 * @csd: Pre-allocated and setup data structure
 315 *
 316 * Like smp_call_function_single(), but the call is asynchonous and
 317 * can thus be done from contexts with disabled interrupts.
 318 *
 319 * The caller passes his own pre-allocated data structure
 320 * (ie: embedded in an object) and is responsible for synchronizing it
 321 * such that the IPIs performed on the @csd are strictly serialized.
 322 *
 323 * NOTE: Be careful, there is unfortunately no current debugging facility to
 324 * validate the correctness of this serialization.
 325 */
 326int smp_call_function_single_async(int cpu, struct call_single_data *csd)
 327{
 328        int err = 0;
 329
 330        preempt_disable();
 331
 332        /* We could deadlock if we have to wait here with interrupts disabled! */
 333        if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
 334                csd_lock_wait(csd);
 335
 336        csd->flags = CSD_FLAG_LOCK;
 337        smp_wmb();
 338
 339        err = generic_exec_single(cpu, csd, csd->func, csd->info);
 340        preempt_enable();
 341
 342        return err;
 343}
 344EXPORT_SYMBOL_GPL(smp_call_function_single_async);
 345
 346/*
 347 * smp_call_function_any - Run a function on any of the given cpus
 348 * @mask: The mask of cpus it can run on.
 349 * @func: The function to run. This must be fast and non-blocking.
 350 * @info: An arbitrary pointer to pass to the function.
 351 * @wait: If true, wait until function has completed.
 352 *
 353 * Returns 0 on success, else a negative status code (if no cpus were online).
 354 *
 355 * Selection preference:
 356 *      1) current cpu if in @mask
 357 *      2) any cpu of current node if in @mask
 358 *      3) any other online cpu in @mask
 359 */
 360int smp_call_function_any(const struct cpumask *mask,
 361                          smp_call_func_t func, void *info, int wait)
 362{
 363        unsigned int cpu;
 364        const struct cpumask *nodemask;
 365        int ret;
 366
 367        /* Try for same CPU (cheapest) */
 368        cpu = get_cpu();
 369        if (cpumask_test_cpu(cpu, mask))
 370                goto call;
 371
 372        /* Try for same node. */
 373        nodemask = cpumask_of_node(cpu_to_node(cpu));
 374        for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
 375             cpu = cpumask_next_and(cpu, nodemask, mask)) {
 376                if (cpu_online(cpu))
 377                        goto call;
 378        }
 379
 380        /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
 381        cpu = cpumask_any_and(mask, cpu_online_mask);
 382call:
 383        ret = smp_call_function_single(cpu, func, info, wait);
 384        put_cpu();
 385        return ret;
 386}
 387EXPORT_SYMBOL_GPL(smp_call_function_any);
 388
 389/**
 390 * smp_call_function_many(): Run a function on a set of other CPUs.
 391 * @mask: The set of cpus to run on (only runs on online subset).
 392 * @func: The function to run. This must be fast and non-blocking.
 393 * @info: An arbitrary pointer to pass to the function.
 394 * @wait: If true, wait (atomically) until function has completed
 395 *        on other CPUs.
 396 *
 397 * If @wait is true, then returns once @func has returned.
 398 *
 399 * You must not call this function with disabled interrupts or from a
 400 * hardware interrupt handler or from a bottom half handler. Preemption
 401 * must be disabled when calling this function.
 402 */
 403void smp_call_function_many(const struct cpumask *mask,
 404                            smp_call_func_t func, void *info, bool wait)
 405{
 406        struct call_function_data *cfd;
 407        int cpu, next_cpu, this_cpu = smp_processor_id();
 408
 409        /*
 410         * Can deadlock when called with interrupts disabled.
 411         * We allow cpu's that are not yet online though, as no one else can
 412         * send smp call function interrupt to this cpu and as such deadlocks
 413         * can't happen.
 414         */
 415        WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 416                     && !oops_in_progress && !early_boot_irqs_disabled);
 417
 418        /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
 419        cpu = cpumask_first_and(mask, cpu_online_mask);
 420        if (cpu == this_cpu)
 421                cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 422
 423        /* No online cpus?  We're done. */
 424        if (cpu >= nr_cpu_ids)
 425                return;
 426
 427        /* Do we have another CPU which isn't us? */
 428        next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 429        if (next_cpu == this_cpu)
 430                next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
 431
 432        /* Fastpath: do that cpu by itself. */
 433        if (next_cpu >= nr_cpu_ids) {
 434                smp_call_function_single(cpu, func, info, wait);
 435                return;
 436        }
 437
 438        cfd = this_cpu_ptr(&cfd_data);
 439
 440        cpumask_and(cfd->cpumask, mask, cpu_online_mask);
 441        cpumask_clear_cpu(this_cpu, cfd->cpumask);
 442
 443        /* Some callers race with other cpus changing the passed mask */
 444        if (unlikely(!cpumask_weight(cfd->cpumask)))
 445                return;
 446
 447        for_each_cpu(cpu, cfd->cpumask) {
 448                struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
 449
 450                csd_lock(csd);
 451                if (wait)
 452                        csd->flags |= CSD_FLAG_SYNCHRONOUS;
 453                csd->func = func;
 454                csd->info = info;
 455                llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
 456        }
 457
 458        /* Send a message to all CPUs in the map */
 459        arch_send_call_function_ipi_mask(cfd->cpumask);
 460
 461        if (wait) {
 462                for_each_cpu(cpu, cfd->cpumask) {
 463                        struct call_single_data *csd;
 464
 465                        csd = per_cpu_ptr(cfd->csd, cpu);
 466                        csd_lock_wait(csd);
 467                }
 468        }
 469}
 470EXPORT_SYMBOL(smp_call_function_many);
 471
 472/**
 473 * smp_call_function(): Run a function on all other CPUs.
 474 * @func: The function to run. This must be fast and non-blocking.
 475 * @info: An arbitrary pointer to pass to the function.
 476 * @wait: If true, wait (atomically) until function has completed
 477 *        on other CPUs.
 478 *
 479 * Returns 0.
 480 *
 481 * If @wait is true, then returns once @func has returned; otherwise
 482 * it returns just before the target cpu calls @func.
 483 *
 484 * You must not call this function with disabled interrupts or from a
 485 * hardware interrupt handler or from a bottom half handler.
 486 */
 487int smp_call_function(smp_call_func_t func, void *info, int wait)
 488{
 489        preempt_disable();
 490        smp_call_function_many(cpu_online_mask, func, info, wait);
 491        preempt_enable();
 492
 493        return 0;
 494}
 495EXPORT_SYMBOL(smp_call_function);
 496
 497/* Setup configured maximum number of CPUs to activate */
 498unsigned int setup_max_cpus = NR_CPUS;
 499EXPORT_SYMBOL(setup_max_cpus);
 500
 501
 502/*
 503 * Setup routine for controlling SMP activation
 504 *
 505 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
 506 * activation entirely (the MPS table probe still happens, though).
 507 *
 508 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
 509 * greater than 0, limits the maximum number of CPUs activated in
 510 * SMP mode to <NUM>.
 511 */
 512
 513void __weak arch_disable_smp_support(void) { }
 514
 515static int __init nosmp(char *str)
 516{
 517        setup_max_cpus = 0;
 518        arch_disable_smp_support();
 519
 520        return 0;
 521}
 522
 523early_param("nosmp", nosmp);
 524
 525/* this is hard limit */
 526static int __init nrcpus(char *str)
 527{
 528        int nr_cpus;
 529
 530        get_option(&str, &nr_cpus);
 531        if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
 532                nr_cpu_ids = nr_cpus;
 533
 534        return 0;
 535}
 536
 537early_param("nr_cpus", nrcpus);
 538
 539static int __init maxcpus(char *str)
 540{
 541        get_option(&str, &setup_max_cpus);
 542        if (setup_max_cpus == 0)
 543                arch_disable_smp_support();
 544
 545        return 0;
 546}
 547
 548early_param("maxcpus", maxcpus);
 549
 550/* Setup number of possible processor ids */
 551int nr_cpu_ids __read_mostly = NR_CPUS;
 552EXPORT_SYMBOL(nr_cpu_ids);
 553
 554/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
 555void __init setup_nr_cpu_ids(void)
 556{
 557        nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
 558}
 559
 560void __weak smp_announce(void)
 561{
 562        printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
 563}
 564
 565/* Called by boot processor to activate the rest. */
 566void __init smp_init(void)
 567{
 568        unsigned int cpu;
 569
 570        idle_threads_init();
 571        cpuhp_threads_init();
 572
 573        /* FIXME: This should be done in userspace --RR */
 574        for_each_present_cpu(cpu) {
 575                if (num_online_cpus() >= setup_max_cpus)
 576                        break;
 577                if (!cpu_online(cpu))
 578                        cpu_up(cpu);
 579        }
 580
 581        /* Any cleanup work */
 582        smp_announce();
 583        smp_cpus_done(setup_max_cpus);
 584}
 585
 586/*
 587 * Call a function on all processors.  May be used during early boot while
 588 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
 589 * of local_irq_disable/enable().
 590 */
 591int on_each_cpu(void (*func) (void *info), void *info, int wait)
 592{
 593        unsigned long flags;
 594        int ret = 0;
 595
 596        preempt_disable();
 597        ret = smp_call_function(func, info, wait);
 598        local_irq_save(flags);
 599        func(info);
 600        local_irq_restore(flags);
 601        preempt_enable();
 602        return ret;
 603}
 604EXPORT_SYMBOL(on_each_cpu);
 605
 606/**
 607 * on_each_cpu_mask(): Run a function on processors specified by
 608 * cpumask, which may include the local processor.
 609 * @mask: The set of cpus to run on (only runs on online subset).
 610 * @func: The function to run. This must be fast and non-blocking.
 611 * @info: An arbitrary pointer to pass to the function.
 612 * @wait: If true, wait (atomically) until function has completed
 613 *        on other CPUs.
 614 *
 615 * If @wait is true, then returns once @func has returned.
 616 *
 617 * You must not call this function with disabled interrupts or from a
 618 * hardware interrupt handler or from a bottom half handler.  The
 619 * exception is that it may be used during early boot while
 620 * early_boot_irqs_disabled is set.
 621 */
 622void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 623                        void *info, bool wait)
 624{
 625        int cpu = get_cpu();
 626
 627        smp_call_function_many(mask, func, info, wait);
 628        if (cpumask_test_cpu(cpu, mask)) {
 629                unsigned long flags;
 630                local_irq_save(flags);
 631                func(info);
 632                local_irq_restore(flags);
 633        }
 634        put_cpu();
 635}
 636EXPORT_SYMBOL(on_each_cpu_mask);
 637
 638/*
 639 * on_each_cpu_cond(): Call a function on each processor for which
 640 * the supplied function cond_func returns true, optionally waiting
 641 * for all the required CPUs to finish. This may include the local
 642 * processor.
 643 * @cond_func:  A callback function that is passed a cpu id and
 644 *              the the info parameter. The function is called
 645 *              with preemption disabled. The function should
 646 *              return a blooean value indicating whether to IPI
 647 *              the specified CPU.
 648 * @func:       The function to run on all applicable CPUs.
 649 *              This must be fast and non-blocking.
 650 * @info:       An arbitrary pointer to pass to both functions.
 651 * @wait:       If true, wait (atomically) until function has
 652 *              completed on other CPUs.
 653 * @gfp_flags:  GFP flags to use when allocating the cpumask
 654 *              used internally by the function.
 655 *
 656 * The function might sleep if the GFP flags indicates a non
 657 * atomic allocation is allowed.
 658 *
 659 * Preemption is disabled to protect against CPUs going offline but not online.
 660 * CPUs going online during the call will not be seen or sent an IPI.
 661 *
 662 * You must not call this function with disabled interrupts or
 663 * from a hardware interrupt handler or from a bottom half handler.
 664 */
 665void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
 666                        smp_call_func_t func, void *info, bool wait,
 667                        gfp_t gfp_flags)
 668{
 669        cpumask_var_t cpus;
 670        int cpu, ret;
 671
 672        might_sleep_if(gfpflags_allow_blocking(gfp_flags));
 673
 674        if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
 675                preempt_disable();
 676                for_each_online_cpu(cpu)
 677                        if (cond_func(cpu, info))
 678                                cpumask_set_cpu(cpu, cpus);
 679                on_each_cpu_mask(cpus, func, info, wait);
 680                preempt_enable();
 681                free_cpumask_var(cpus);
 682        } else {
 683                /*
 684                 * No free cpumask, bother. No matter, we'll
 685                 * just have to IPI them one by one.
 686                 */
 687                preempt_disable();
 688                for_each_online_cpu(cpu)
 689                        if (cond_func(cpu, info)) {
 690                                ret = smp_call_function_single(cpu, func,
 691                                                                info, wait);
 692                                WARN_ON_ONCE(ret);
 693                        }
 694                preempt_enable();
 695        }
 696}
 697EXPORT_SYMBOL(on_each_cpu_cond);
 698
 699static void do_nothing(void *unused)
 700{
 701}
 702
 703/**
 704 * kick_all_cpus_sync - Force all cpus out of idle
 705 *
 706 * Used to synchronize the update of pm_idle function pointer. It's
 707 * called after the pointer is updated and returns after the dummy
 708 * callback function has been executed on all cpus. The execution of
 709 * the function can only happen on the remote cpus after they have
 710 * left the idle function which had been called via pm_idle function
 711 * pointer. So it's guaranteed that nothing uses the previous pointer
 712 * anymore.
 713 */
 714void kick_all_cpus_sync(void)
 715{
 716        /* Make sure the change is visible before we kick the cpus */
 717        smp_mb();
 718        smp_call_function(do_nothing, NULL, 1);
 719}
 720EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
 721
 722/**
 723 * wake_up_all_idle_cpus - break all cpus out of idle
 724 * wake_up_all_idle_cpus try to break all cpus which is in idle state even
 725 * including idle polling cpus, for non-idle cpus, we will do nothing
 726 * for them.
 727 */
 728void wake_up_all_idle_cpus(void)
 729{
 730        int cpu;
 731
 732        preempt_disable();
 733        for_each_online_cpu(cpu) {
 734                if (cpu == smp_processor_id())
 735                        continue;
 736
 737                wake_up_if_idle(cpu);
 738        }
 739        preempt_enable();
 740}
 741EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
 742