linux/lib/cpumask.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/slab.h>
   3#include <linux/kernel.h>
   4#include <linux/bitops.h>
   5#include <linux/cpumask.h>
   6#include <linux/export.h>
   7#include <linux/bootmem.h>
   8
   9/**
  10 * cpumask_next - get the next cpu in a cpumask
  11 * @n: the cpu prior to the place to search (ie. return will be > @n)
  12 * @srcp: the cpumask pointer
  13 *
  14 * Returns >= nr_cpu_ids if no further cpus set.
  15 */
  16unsigned int cpumask_next(int n, const struct cpumask *srcp)
  17{
  18        /* -1 is a legal arg here. */
  19        if (n != -1)
  20                cpumask_check(n);
  21        return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
  22}
  23EXPORT_SYMBOL(cpumask_next);
  24
  25/**
  26 * cpumask_next_and - get the next cpu in *src1p & *src2p
  27 * @n: the cpu prior to the place to search (ie. return will be > @n)
  28 * @src1p: the first cpumask pointer
  29 * @src2p: the second cpumask pointer
  30 *
  31 * Returns >= nr_cpu_ids if no further cpus set in both.
  32 */
  33int cpumask_next_and(int n, const struct cpumask *src1p,
  34                     const struct cpumask *src2p)
  35{
  36        /* -1 is a legal arg here. */
  37        if (n != -1)
  38                cpumask_check(n);
  39        return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
  40                nr_cpumask_bits, n + 1);
  41}
  42EXPORT_SYMBOL(cpumask_next_and);
  43
  44/**
  45 * cpumask_any_but - return a "random" in a cpumask, but not this one.
  46 * @mask: the cpumask to search
  47 * @cpu: the cpu to ignore.
  48 *
  49 * Often used to find any cpu but smp_processor_id() in a mask.
  50 * Returns >= nr_cpu_ids if no cpus set.
  51 */
  52int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
  53{
  54        unsigned int i;
  55
  56        cpumask_check(cpu);
  57        for_each_cpu(i, mask)
  58                if (i != cpu)
  59                        break;
  60        return i;
  61}
  62EXPORT_SYMBOL(cpumask_any_but);
  63
  64/**
  65 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
  66 * @n: the cpu prior to the place to search
  67 * @mask: the cpumask pointer
  68 * @start: the start point of the iteration
  69 * @wrap: assume @n crossing @start terminates the iteration
  70 *
  71 * Returns >= nr_cpu_ids on completion
  72 *
  73 * Note: the @wrap argument is required for the start condition when
  74 * we cannot assume @start is set in @mask.
  75 */
  76int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
  77{
  78        int next;
  79
  80again:
  81        next = cpumask_next(n, mask);
  82
  83        if (wrap && n < start && next >= start) {
  84                return nr_cpumask_bits;
  85
  86        } else if (next >= nr_cpumask_bits) {
  87                wrap = true;
  88                n = -1;
  89                goto again;
  90        }
  91
  92        return next;
  93}
  94EXPORT_SYMBOL(cpumask_next_wrap);
  95
  96/* These are not inline because of header tangles. */
  97#ifdef CONFIG_CPUMASK_OFFSTACK
  98/**
  99 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
 100 * @mask: pointer to cpumask_var_t where the cpumask is returned
 101 * @flags: GFP_ flags
 102 *
 103 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 104 * a nop returning a constant 1 (in <linux/cpumask.h>)
 105 * Returns TRUE if memory allocation succeeded, FALSE otherwise.
 106 *
 107 * In addition, mask will be NULL if this fails.  Note that gcc is
 108 * usually smart enough to know that mask can never be NULL if
 109 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
 110 * too.
 111 */
 112bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
 113{
 114        *mask = kmalloc_node(cpumask_size(), flags, node);
 115
 116#ifdef CONFIG_DEBUG_PER_CPU_MAPS
 117        if (!*mask) {
 118                printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
 119                dump_stack();
 120        }
 121#endif
 122
 123        return *mask != NULL;
 124}
 125EXPORT_SYMBOL(alloc_cpumask_var_node);
 126
 127bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
 128{
 129        return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
 130}
 131EXPORT_SYMBOL(zalloc_cpumask_var_node);
 132
 133/**
 134 * alloc_cpumask_var - allocate a struct cpumask
 135 * @mask: pointer to cpumask_var_t where the cpumask is returned
 136 * @flags: GFP_ flags
 137 *
 138 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 139 * a nop returning a constant 1 (in <linux/cpumask.h>).
 140 *
 141 * See alloc_cpumask_var_node.
 142 */
 143bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
 144{
 145        return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
 146}
 147EXPORT_SYMBOL(alloc_cpumask_var);
 148
 149bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
 150{
 151        return alloc_cpumask_var(mask, flags | __GFP_ZERO);
 152}
 153EXPORT_SYMBOL(zalloc_cpumask_var);
 154
 155/**
 156 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
 157 * @mask: pointer to cpumask_var_t where the cpumask is returned
 158 *
 159 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
 160 * a nop (in <linux/cpumask.h>).
 161 * Either returns an allocated (zero-filled) cpumask, or causes the
 162 * system to panic.
 163 */
 164void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
 165{
 166        *mask = memblock_virt_alloc(cpumask_size(), 0);
 167}
 168
 169/**
 170 * free_cpumask_var - frees memory allocated for a struct cpumask.
 171 * @mask: cpumask to free
 172 *
 173 * This is safe on a NULL mask.
 174 */
 175void free_cpumask_var(cpumask_var_t mask)
 176{
 177        kfree(mask);
 178}
 179EXPORT_SYMBOL(free_cpumask_var);
 180
 181/**
 182 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
 183 * @mask: cpumask to free
 184 */
 185void __init free_bootmem_cpumask_var(cpumask_var_t mask)
 186{
 187        memblock_free_early(__pa(mask), cpumask_size());
 188}
 189#endif
 190
 191/**
 192 * cpumask_local_spread - select the i'th cpu with local numa cpu's first
 193 * @i: index number
 194 * @node: local numa_node
 195 *
 196 * This function selects an online CPU according to a numa aware policy;
 197 * local cpus are returned first, followed by non-local ones, then it
 198 * wraps around.
 199 *
 200 * It's not very efficient, but useful for setup.
 201 */
 202unsigned int cpumask_local_spread(unsigned int i, int node)
 203{
 204        int cpu;
 205
 206        /* Wrap: we always want a cpu. */
 207        i %= num_online_cpus();
 208
 209        if (node == -1) {
 210                for_each_cpu(cpu, cpu_online_mask)
 211                        if (i-- == 0)
 212                                return cpu;
 213        } else {
 214                /* NUMA first. */
 215                for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
 216                        if (i-- == 0)
 217                                return cpu;
 218
 219                for_each_cpu(cpu, cpu_online_mask) {
 220                        /* Skip NUMA nodes, done above. */
 221                        if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
 222                                continue;
 223
 224                        if (i-- == 0)
 225                                return cpu;
 226                }
 227        }
 228        BUG();
 229}
 230EXPORT_SYMBOL(cpumask_local_spread);
 231