linux/kernel/smp.c
<<
>>
Prefs
   1/*
   2 * Generic helpers for smp ipi calls
   3 *
   4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
   5 */
   6#include <linux/rcupdate.h>
   7#include <linux/rculist.h>
   8#include <linux/kernel.h>
   9#include <linux/export.h>
  10#include <linux/percpu.h>
  11#include <linux/init.h>
  12#include <linux/gfp.h>
  13#include <linux/smp.h>
  14#include <linux/cpu.h>
  15
  16#include "smpboot.h"
  17
  18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
  19static struct {
  20        struct list_head        queue;
  21        raw_spinlock_t          lock;
  22} call_function __cacheline_aligned_in_smp =
  23        {
  24                .queue          = LIST_HEAD_INIT(call_function.queue),
  25                .lock           = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
  26        };
  27
  28enum {
  29        CSD_FLAG_LOCK           = 0x01,
  30};
  31
  32struct call_function_data {
  33        struct call_single_data csd;
  34        atomic_t                refs;
  35        cpumask_var_t           cpumask;
  36        cpumask_var_t           cpumask_ipi;
  37};
  38
  39static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
  40
  41struct call_single_queue {
  42        struct list_head        list;
  43        raw_spinlock_t          lock;
  44};
  45
  46static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
  47
  48static int
  49hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
  50{
  51        long cpu = (long)hcpu;
  52        struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
  53
  54        switch (action) {
  55        case CPU_UP_PREPARE:
  56        case CPU_UP_PREPARE_FROZEN:
  57                if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
  58                                cpu_to_node(cpu)))
  59                        return notifier_from_errno(-ENOMEM);
  60                if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
  61                                cpu_to_node(cpu)))
  62                        return notifier_from_errno(-ENOMEM);
  63                break;
  64
  65#ifdef CONFIG_HOTPLUG_CPU
  66        case CPU_UP_CANCELED:
  67        case CPU_UP_CANCELED_FROZEN:
  68
  69        case CPU_DEAD:
  70        case CPU_DEAD_FROZEN:
  71                free_cpumask_var(cfd->cpumask);
  72                free_cpumask_var(cfd->cpumask_ipi);
  73                break;
  74#endif
  75        };
  76
  77        return NOTIFY_OK;
  78}
  79
  80static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
  81        .notifier_call          = hotplug_cfd,
  82};
  83
  84void __init call_function_init(void)
  85{
  86        void *cpu = (void *)(long)smp_processor_id();
  87        int i;
  88
  89        for_each_possible_cpu(i) {
  90                struct call_single_queue *q = &per_cpu(call_single_queue, i);
  91
  92                raw_spin_lock_init(&q->lock);
  93                INIT_LIST_HEAD(&q->list);
  94        }
  95
  96        hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
  97        register_cpu_notifier(&hotplug_cfd_notifier);
  98}
  99
 100/*
 101 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 102 *
 103 * For non-synchronous ipi calls the csd can still be in use by the
 104 * previous function call. For multi-cpu calls its even more interesting
 105 * as we'll have to ensure no other cpu is observing our csd.
 106 */
 107static void csd_lock_wait(struct call_single_data *data)
 108{
 109        while (data->flags & CSD_FLAG_LOCK)
 110                cpu_relax();
 111}
 112
 113static void csd_lock(struct call_single_data *data)
 114{
 115        csd_lock_wait(data);
 116        data->flags = CSD_FLAG_LOCK;
 117
 118        /*
 119         * prevent CPU from reordering the above assignment
 120         * to ->flags with any subsequent assignments to other
 121         * fields of the specified call_single_data structure:
 122         */
 123        smp_mb();
 124}
 125
 126static void csd_unlock(struct call_single_data *data)
 127{
 128        WARN_ON(!(data->flags & CSD_FLAG_LOCK));
 129
 130        /*
 131         * ensure we're all done before releasing data:
 132         */
 133        smp_mb();
 134
 135        data->flags &= ~CSD_FLAG_LOCK;
 136}
 137
 138/*
 139 * Insert a previously allocated call_single_data element
 140 * for execution on the given CPU. data must already have
 141 * ->func, ->info, and ->flags set.
 142 */
 143static
 144void generic_exec_single(int cpu, struct call_single_data *data, int wait)
 145{
 146        struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
 147        unsigned long flags;
 148        int ipi;
 149
 150        raw_spin_lock_irqsave(&dst->lock, flags);
 151        ipi = list_empty(&dst->list);
 152        list_add_tail(&data->list, &dst->list);
 153        raw_spin_unlock_irqrestore(&dst->lock, flags);
 154
 155        /*
 156         * The list addition should be visible before sending the IPI
 157         * handler locks the list to pull the entry off it because of
 158         * normal cache coherency rules implied by spinlocks.
 159         *
 160         * If IPIs can go out of order to the cache coherency protocol
 161         * in an architecture, sufficient synchronisation should be added
 162         * to arch code to make it appear to obey cache coherency WRT
 163         * locking and barrier primitives. Generic code isn't really
 164         * equipped to do the right thing...
 165         */
 166        if (ipi)
 167                arch_send_call_function_single_ipi(cpu);
 168
 169        if (wait)
 170                csd_lock_wait(data);
 171}
 172
 173/*
 174 * Invoked by arch to handle an IPI for call function. Must be called with
 175 * interrupts disabled.
 176 */
 177void generic_smp_call_function_interrupt(void)
 178{
 179        struct call_function_data *data;
 180        int cpu = smp_processor_id();
 181
 182        /*
 183         * Shouldn't receive this interrupt on a cpu that is not yet online.
 184         */
 185        WARN_ON_ONCE(!cpu_online(cpu));
 186
 187        /*
 188         * Ensure entry is visible on call_function_queue after we have
 189         * entered the IPI. See comment in smp_call_function_many.
 190         * If we don't have this, then we may miss an entry on the list
 191         * and never get another IPI to process it.
 192         */
 193        smp_mb();
 194
 195        /*
 196         * It's ok to use list_for_each_rcu() here even though we may
 197         * delete 'pos', since list_del_rcu() doesn't clear ->next
 198         */
 199        list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
 200                int refs;
 201                smp_call_func_t func;
 202
 203                /*
 204                 * Since we walk the list without any locks, we might
 205                 * see an entry that was completed, removed from the
 206                 * list and is in the process of being reused.
 207                 *
 208                 * We must check that the cpu is in the cpumask before
 209                 * checking the refs, and both must be set before
 210                 * executing the callback on this cpu.
 211                 */
 212
 213                if (!cpumask_test_cpu(cpu, data->cpumask))
 214                        continue;
 215
 216                smp_rmb();
 217
 218                if (atomic_read(&data->refs) == 0)
 219                        continue;
 220
 221                func = data->csd.func;          /* save for later warn */
 222                func(data->csd.info);
 223
 224                /*
 225                 * If the cpu mask is not still set then func enabled
 226                 * interrupts (BUG), and this cpu took another smp call
 227                 * function interrupt and executed func(info) twice
 228                 * on this cpu.  That nested execution decremented refs.
 229                 */
 230                if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
 231                        WARN(1, "%pf enabled interrupts and double executed\n", func);
 232                        continue;
 233                }
 234
 235                refs = atomic_dec_return(&data->refs);
 236                WARN_ON(refs < 0);
 237
 238                if (refs)
 239                        continue;
 240
 241                WARN_ON(!cpumask_empty(data->cpumask));
 242
 243                raw_spin_lock(&call_function.lock);
 244                list_del_rcu(&data->csd.list);
 245                raw_spin_unlock(&call_function.lock);
 246
 247                csd_unlock(&data->csd);
 248        }
 249
 250}
 251
 252/*
 253 * Invoked by arch to handle an IPI for call function single. Must be
 254 * called from the arch with interrupts disabled.
 255 */
 256void generic_smp_call_function_single_interrupt(void)
 257{
 258        struct call_single_queue *q = &__get_cpu_var(call_single_queue);
 259        unsigned int data_flags;
 260        LIST_HEAD(list);
 261
 262        /*
 263         * Shouldn't receive this interrupt on a cpu that is not yet online.
 264         */
 265        WARN_ON_ONCE(!cpu_online(smp_processor_id()));
 266
 267        raw_spin_lock(&q->lock);
 268        list_replace_init(&q->list, &list);
 269        raw_spin_unlock(&q->lock);
 270
 271        while (!list_empty(&list)) {
 272                struct call_single_data *data;
 273
 274                data = list_entry(list.next, struct call_single_data, list);
 275                list_del(&data->list);
 276
 277                /*
 278                 * 'data' can be invalid after this call if flags == 0
 279                 * (when called through generic_exec_single()),
 280                 * so save them away before making the call:
 281                 */
 282                data_flags = data->flags;
 283
 284                data->func(data->info);
 285
 286                /*
 287                 * Unlocked CSDs are valid through generic_exec_single():
 288                 */
 289                if (data_flags & CSD_FLAG_LOCK)
 290                        csd_unlock(data);
 291        }
 292}
 293
 294static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
 295
 296/*
 297 * smp_call_function_single - Run a function on a specific CPU
 298 * @func: The function to run. This must be fast and non-blocking.
 299 * @info: An arbitrary pointer to pass to the function.
 300 * @wait: If true, wait until function has completed on other CPUs.
 301 *
 302 * Returns 0 on success, else a negative status code.
 303 */
 304int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
 305                             int wait)
 306{
 307        struct call_single_data d = {
 308                .flags = 0,
 309        };
 310        unsigned long flags;
 311        int this_cpu;
 312        int err = 0;
 313
 314        /*
 315         * prevent preemption and reschedule on another processor,
 316         * as well as CPU removal
 317         */
 318        this_cpu = get_cpu();
 319
 320        /*
 321         * Can deadlock when called with interrupts disabled.
 322         * We allow cpu's that are not yet online though, as no one else can
 323         * send smp call function interrupt to this cpu and as such deadlocks
 324         * can't happen.
 325         */
 326        WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 327                     && !oops_in_progress);
 328
 329        if (cpu == this_cpu) {
 330                local_irq_save(flags);
 331                func(info);
 332                local_irq_restore(flags);
 333        } else {
 334                if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
 335                        struct call_single_data *data = &d;
 336
 337                        if (!wait)
 338                                data = &__get_cpu_var(csd_data);
 339
 340                        csd_lock(data);
 341
 342                        data->func = func;
 343                        data->info = info;
 344                        generic_exec_single(cpu, data, wait);
 345                } else {
 346                        err = -ENXIO;   /* CPU not online */
 347                }
 348        }
 349
 350        put_cpu();
 351
 352        return err;
 353}
 354EXPORT_SYMBOL(smp_call_function_single);
 355
 356/*
 357 * smp_call_function_any - Run a function on any of the given cpus
 358 * @mask: The mask of cpus it can run on.
 359 * @func: The function to run. This must be fast and non-blocking.
 360 * @info: An arbitrary pointer to pass to the function.
 361 * @wait: If true, wait until function has completed.
 362 *
 363 * Returns 0 on success, else a negative status code (if no cpus were online).
 364 * Note that @wait will be implicitly turned on in case of allocation failures,
 365 * since we fall back to on-stack allocation.
 366 *
 367 * Selection preference:
 368 *      1) current cpu if in @mask
 369 *      2) any cpu of current node if in @mask
 370 *      3) any other online cpu in @mask
 371 */
 372int smp_call_function_any(const struct cpumask *mask,
 373                          smp_call_func_t func, void *info, int wait)
 374{
 375        unsigned int cpu;
 376        const struct cpumask *nodemask;
 377        int ret;
 378
 379        /* Try for same CPU (cheapest) */
 380        cpu = get_cpu();
 381        if (cpumask_test_cpu(cpu, mask))
 382                goto call;
 383
 384        /* Try for same node. */
 385        nodemask = cpumask_of_node(cpu_to_node(cpu));
 386        for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
 387             cpu = cpumask_next_and(cpu, nodemask, mask)) {
 388                if (cpu_online(cpu))
 389                        goto call;
 390        }
 391
 392        /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
 393        cpu = cpumask_any_and(mask, cpu_online_mask);
 394call:
 395        ret = smp_call_function_single(cpu, func, info, wait);
 396        put_cpu();
 397        return ret;
 398}
 399EXPORT_SYMBOL_GPL(smp_call_function_any);
 400
 401/**
 402 * __smp_call_function_single(): Run a function on a specific CPU
 403 * @cpu: The CPU to run on.
 404 * @data: Pre-allocated and setup data structure
 405 * @wait: If true, wait until function has completed on specified CPU.
 406 *
 407 * Like smp_call_function_single(), but allow caller to pass in a
 408 * pre-allocated data structure. Useful for embedding @data inside
 409 * other structures, for instance.
 410 */
 411void __smp_call_function_single(int cpu, struct call_single_data *data,
 412                                int wait)
 413{
 414        unsigned int this_cpu;
 415        unsigned long flags;
 416
 417        this_cpu = get_cpu();
 418        /*
 419         * Can deadlock when called with interrupts disabled.
 420         * We allow cpu's that are not yet online though, as no one else can
 421         * send smp call function interrupt to this cpu and as such deadlocks
 422         * can't happen.
 423         */
 424        WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
 425                     && !oops_in_progress);
 426
 427        if (cpu == this_cpu) {
 428                local_irq_save(flags);
 429                data->func(data->info);
 430                local_irq_restore(flags);
 431        } else {
 432                csd_lock(data);
 433                generic_exec_single(cpu, data, wait);
 434        }
 435        put_cpu();
 436}
 437
 438/**
 439 * smp_call_function_many(): Run a function on a set of other CPUs.
 440 * @mask: The set of cpus to run on (only runs on online subset).
 441 * @func: The function to run. This must be fast and non-blocking.
 442 * @info: An arbitrary pointer to pass to the function.
 443 * @wait: If true, wait (atomically) until function has completed
 444 *        on other CPUs.
 445 *
 446 * If @wait is true, then returns once @func has returned.
 447 *
 448 * You must not call this function with disabled interrupts or from a
 449 * hardware interrupt handler or from a bottom half handler. Preemption
 450 * must be disabled when calling this function.
 451 */
 452void smp_call_function_many(const struct cpumask *mask,
 453                            smp_call_func_t func, void *info, bool wait)
 454{
 455        struct call_function_data *data;
 456        unsigned long flags;
 457        int refs, cpu, next_cpu, this_cpu = smp_processor_id();
 458
 459        /*
 460         * Can deadlock when called with interrupts disabled.
 461         * We allow cpu's that are not yet online though, as no one else can
 462         * send smp call function interrupt to this cpu and as such deadlocks
 463         * can't happen.
 464         */
 465        WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
 466                     && !oops_in_progress && !early_boot_irqs_disabled);
 467
 468        /* Try to fastpath.  So, what's a CPU they want? Ignoring this one. */
 469        cpu = cpumask_first_and(mask, cpu_online_mask);
 470        if (cpu == this_cpu)
 471                cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 472
 473        /* No online cpus?  We're done. */
 474        if (cpu >= nr_cpu_ids)
 475                return;
 476
 477        /* Do we have another CPU which isn't us? */
 478        next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
 479        if (next_cpu == this_cpu)
 480                next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
 481
 482        /* Fastpath: do that cpu by itself. */
 483        if (next_cpu >= nr_cpu_ids) {
 484                smp_call_function_single(cpu, func, info, wait);
 485                return;
 486        }
 487
 488        data = &__get_cpu_var(cfd_data);
 489        csd_lock(&data->csd);
 490
 491        /* This BUG_ON verifies our reuse assertions and can be removed */
 492        BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
 493
 494        /*
 495         * The global call function queue list add and delete are protected
 496         * by a lock, but the list is traversed without any lock, relying
 497         * on the rcu list add and delete to allow safe concurrent traversal.
 498         * We reuse the call function data without waiting for any grace
 499         * period after some other cpu removes it from the global queue.
 500         * This means a cpu might find our data block as it is being
 501         * filled out.
 502         *
 503         * We hold off the interrupt handler on the other cpu by
 504         * ordering our writes to the cpu mask vs our setting of the
 505         * refs counter.  We assert only the cpu owning the data block
 506         * will set a bit in cpumask, and each bit will only be cleared
 507         * by the subject cpu.  Each cpu must first find its bit is
 508         * set and then check that refs is set indicating the element is
 509         * ready to be processed, otherwise it must skip the entry.
 510         *
 511         * On the previous iteration refs was set to 0 by another cpu.
 512         * To avoid the use of transitivity, set the counter to 0 here
 513         * so the wmb will pair with the rmb in the interrupt handler.
 514         */
 515        atomic_set(&data->refs, 0);     /* convert 3rd to 1st party write */
 516
 517        data->csd.func = func;
 518        data->csd.info = info;
 519
 520        /* Ensure 0 refs is visible before mask.  Also orders func and info */
 521        smp_wmb();
 522
 523        /* We rely on the "and" being processed before the store */
 524        cpumask_and(data->cpumask, mask, cpu_online_mask);
 525        cpumask_clear_cpu(this_cpu, data->cpumask);
 526        refs = cpumask_weight(data->cpumask);
 527
 528        /* Some callers race with other cpus changing the passed mask */
 529        if (unlikely(!refs)) {
 530                csd_unlock(&data->csd);
 531                return;
 532        }
 533
 534        /*
 535         * After we put an entry into the list, data->cpumask
 536         * may be cleared again when another CPU sends another IPI for
 537         * a SMP function call, so data->cpumask will be zero.
 538         */
 539        cpumask_copy(data->cpumask_ipi, data->cpumask);
 540        raw_spin_lock_irqsave(&call_function.lock, flags);
 541        /*
 542         * Place entry at the _HEAD_ of the list, so that any cpu still
 543         * observing the entry in generic_smp_call_function_interrupt()
 544         * will not miss any other list entries:
 545         */
 546        list_add_rcu(&data->csd.list, &call_function.queue);
 547        /*
 548         * We rely on the wmb() in list_add_rcu to complete our writes
 549         * to the cpumask before this write to refs, which indicates
 550         * data is on the list and is ready to be processed.
 551         */
 552        atomic_set(&data->refs, refs);
 553        raw_spin_unlock_irqrestore(&call_function.lock, flags);
 554
 555        /*
 556         * Make the list addition visible before sending the ipi.
 557         * (IPIs must obey or appear to obey normal Linux cache
 558         * coherency rules -- see comment in generic_exec_single).
 559         */
 560        smp_mb();
 561
 562        /* Send a message to all CPUs in the map */
 563        arch_send_call_function_ipi_mask(data->cpumask_ipi);
 564
 565        /* Optionally wait for the CPUs to complete */
 566        if (wait)
 567                csd_lock_wait(&data->csd);
 568}
 569EXPORT_SYMBOL(smp_call_function_many);
 570
 571/**
 572 * smp_call_function(): Run a function on all other CPUs.
 573 * @func: The function to run. This must be fast and non-blocking.
 574 * @info: An arbitrary pointer to pass to the function.
 575 * @wait: If true, wait (atomically) until function has completed
 576 *        on other CPUs.
 577 *
 578 * Returns 0.
 579 *
 580 * If @wait is true, then returns once @func has returned; otherwise
 581 * it returns just before the target cpu calls @func.
 582 *
 583 * You must not call this function with disabled interrupts or from a
 584 * hardware interrupt handler or from a bottom half handler.
 585 */
 586int smp_call_function(smp_call_func_t func, void *info, int wait)
 587{
 588        preempt_disable();
 589        smp_call_function_many(cpu_online_mask, func, info, wait);
 590        preempt_enable();
 591
 592        return 0;
 593}
 594EXPORT_SYMBOL(smp_call_function);
 595#endif /* USE_GENERIC_SMP_HELPERS */
 596
 597/* Setup configured maximum number of CPUs to activate */
 598unsigned int setup_max_cpus = NR_CPUS;
 599EXPORT_SYMBOL(setup_max_cpus);
 600
 601
 602/*
 603 * Setup routine for controlling SMP activation
 604 *
 605 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
 606 * activation entirely (the MPS table probe still happens, though).
 607 *
 608 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
 609 * greater than 0, limits the maximum number of CPUs activated in
 610 * SMP mode to <NUM>.
 611 */
 612
 613void __weak arch_disable_smp_support(void) { }
 614
 615static int __init nosmp(char *str)
 616{
 617        setup_max_cpus = 0;
 618        arch_disable_smp_support();
 619
 620        return 0;
 621}
 622
 623early_param("nosmp", nosmp);
 624
 625/* this is hard limit */
 626static int __init nrcpus(char *str)
 627{
 628        int nr_cpus;
 629
 630        get_option(&str, &nr_cpus);
 631        if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
 632                nr_cpu_ids = nr_cpus;
 633
 634        return 0;
 635}
 636
 637early_param("nr_cpus", nrcpus);
 638
 639static int __init maxcpus(char *str)
 640{
 641        get_option(&str, &setup_max_cpus);
 642        if (setup_max_cpus == 0)
 643                arch_disable_smp_support();
 644
 645        return 0;
 646}
 647
 648early_param("maxcpus", maxcpus);
 649
 650/* Setup number of possible processor ids */
 651int nr_cpu_ids __read_mostly = NR_CPUS;
 652EXPORT_SYMBOL(nr_cpu_ids);
 653
 654/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
 655void __init setup_nr_cpu_ids(void)
 656{
 657        nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
 658}
 659
 660/* Called by boot processor to activate the rest. */
 661void __init smp_init(void)
 662{
 663        unsigned int cpu;
 664
 665        idle_threads_init();
 666
 667        /* FIXME: This should be done in userspace --RR */
 668        for_each_present_cpu(cpu) {
 669                if (num_online_cpus() >= setup_max_cpus)
 670                        break;
 671                if (!cpu_online(cpu))
 672                        cpu_up(cpu);
 673        }
 674
 675        /* Any cleanup work */
 676        printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
 677        smp_cpus_done(setup_max_cpus);
 678}
 679
 680/*
 681 * Call a function on all processors.  May be used during early boot while
 682 * early_boot_irqs_disabled is set.  Use local_irq_save/restore() instead
 683 * of local_irq_disable/enable().
 684 */
 685int on_each_cpu(void (*func) (void *info), void *info, int wait)
 686{
 687        unsigned long flags;
 688        int ret = 0;
 689
 690        preempt_disable();
 691        ret = smp_call_function(func, info, wait);
 692        local_irq_save(flags);
 693        func(info);
 694        local_irq_restore(flags);
 695        preempt_enable();
 696        return ret;
 697}
 698EXPORT_SYMBOL(on_each_cpu);
 699
 700/**
 701 * on_each_cpu_mask(): Run a function on processors specified by
 702 * cpumask, which may include the local processor.
 703 * @mask: The set of cpus to run on (only runs on online subset).
 704 * @func: The function to run. This must be fast and non-blocking.
 705 * @info: An arbitrary pointer to pass to the function.
 706 * @wait: If true, wait (atomically) until function has completed
 707 *        on other CPUs.
 708 *
 709 * If @wait is true, then returns once @func has returned.
 710 *
 711 * You must not call this function with disabled interrupts or
 712 * from a hardware interrupt handler or from a bottom half handler.
 713 */
 714void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
 715                        void *info, bool wait)
 716{
 717        int cpu = get_cpu();
 718
 719        smp_call_function_many(mask, func, info, wait);
 720        if (cpumask_test_cpu(cpu, mask)) {
 721                local_irq_disable();
 722                func(info);
 723                local_irq_enable();
 724        }
 725        put_cpu();
 726}
 727EXPORT_SYMBOL(on_each_cpu_mask);
 728
 729/*
 730 * on_each_cpu_cond(): Call a function on each processor for which
 731 * the supplied function cond_func returns true, optionally waiting
 732 * for all the required CPUs to finish. This may include the local
 733 * processor.
 734 * @cond_func:  A callback function that is passed a cpu id and
 735 *              the the info parameter. The function is called
 736 *              with preemption disabled. The function should
 737 *              return a blooean value indicating whether to IPI
 738 *              the specified CPU.
 739 * @func:       The function to run on all applicable CPUs.
 740 *              This must be fast and non-blocking.
 741 * @info:       An arbitrary pointer to pass to both functions.
 742 * @wait:       If true, wait (atomically) until function has
 743 *              completed on other CPUs.
 744 * @gfp_flags:  GFP flags to use when allocating the cpumask
 745 *              used internally by the function.
 746 *
 747 * The function might sleep if the GFP flags indicates a non
 748 * atomic allocation is allowed.
 749 *
 750 * Preemption is disabled to protect against CPUs going offline but not online.
 751 * CPUs going online during the call will not be seen or sent an IPI.
 752 *
 753 * You must not call this function with disabled interrupts or
 754 * from a hardware interrupt handler or from a bottom half handler.
 755 */
 756void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
 757                        smp_call_func_t func, void *info, bool wait,
 758                        gfp_t gfp_flags)
 759{
 760        cpumask_var_t cpus;
 761        int cpu, ret;
 762
 763        might_sleep_if(gfp_flags & __GFP_WAIT);
 764
 765        if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
 766                preempt_disable();
 767                for_each_online_cpu(cpu)
 768                        if (cond_func(cpu, info))
 769                                cpumask_set_cpu(cpu, cpus);
 770                on_each_cpu_mask(cpus, func, info, wait);
 771                preempt_enable();
 772                free_cpumask_var(cpus);
 773        } else {
 774                /*
 775                 * No free cpumask, bother. No matter, we'll
 776                 * just have to IPI them one by one.
 777                 */
 778                preempt_disable();
 779                for_each_online_cpu(cpu)
 780                        if (cond_func(cpu, info)) {
 781                                ret = smp_call_function_single(cpu, func,
 782                                                                info, wait);
 783                                WARN_ON_ONCE(!ret);
 784                        }
 785                preempt_enable();
 786        }
 787}
 788EXPORT_SYMBOL(on_each_cpu_cond);
 789
 790static void do_nothing(void *unused)
 791{
 792}
 793
 794/**
 795 * kick_all_cpus_sync - Force all cpus out of idle
 796 *
 797 * Used to synchronize the update of pm_idle function pointer. It's
 798 * called after the pointer is updated and returns after the dummy
 799 * callback function has been executed on all cpus. The execution of
 800 * the function can only happen on the remote cpus after they have
 801 * left the idle function which had been called via pm_idle function
 802 * pointer. So it's guaranteed that nothing uses the previous pointer
 803 * anymore.
 804 */
 805void kick_all_cpus_sync(void)
 806{
 807        /* Make sure the change is visible before we kick the cpus */
 808        smp_mb();
 809        smp_call_function(do_nothing, NULL, 1);
 810}
 811EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
 812