linux/mm/allocpercpu.c
<<
>>
Prefs
   1/*
   2 * linux/mm/allocpercpu.c
   3 *
   4 * Separated from slab.c August 11, 2006 Christoph Lameter
   5 */
   6#include <linux/mm.h>
   7#include <linux/module.h>
   8#include <linux/bootmem.h>
   9#include <asm/sections.h>
  10
  11#ifndef cache_line_size
  12#define cache_line_size()       L1_CACHE_BYTES
  13#endif
  14
  15/**
  16 * percpu_depopulate - depopulate per-cpu data for given cpu
  17 * @__pdata: per-cpu data to depopulate
  18 * @cpu: depopulate per-cpu data for this cpu
  19 *
  20 * Depopulating per-cpu data for a cpu going offline would be a typical
  21 * use case. You need to register a cpu hotplug handler for that purpose.
  22 */
  23static void percpu_depopulate(void *__pdata, int cpu)
  24{
  25        struct percpu_data *pdata = __percpu_disguise(__pdata);
  26
  27        kfree(pdata->ptrs[cpu]);
  28        pdata->ptrs[cpu] = NULL;
  29}
  30
  31/**
  32 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
  33 * @__pdata: per-cpu data to depopulate
  34 * @mask: depopulate per-cpu data for cpu's selected through mask bits
  35 */
  36static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
  37{
  38        int cpu;
  39        for_each_cpu_mask_nr(cpu, *mask)
  40                percpu_depopulate(__pdata, cpu);
  41}
  42
  43#define percpu_depopulate_mask(__pdata, mask) \
  44        __percpu_depopulate_mask((__pdata), &(mask))
  45
  46/**
  47 * percpu_populate - populate per-cpu data for given cpu
  48 * @__pdata: per-cpu data to populate further
  49 * @size: size of per-cpu object
  50 * @gfp: may sleep or not etc.
  51 * @cpu: populate per-data for this cpu
  52 *
  53 * Populating per-cpu data for a cpu coming online would be a typical
  54 * use case. You need to register a cpu hotplug handler for that purpose.
  55 * Per-cpu object is populated with zeroed buffer.
  56 */
  57static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
  58{
  59        struct percpu_data *pdata = __percpu_disguise(__pdata);
  60        int node = cpu_to_node(cpu);
  61
  62        /*
  63         * We should make sure each CPU gets private memory.
  64         */
  65        size = roundup(size, cache_line_size());
  66
  67        BUG_ON(pdata->ptrs[cpu]);
  68        if (node_online(node))
  69                pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
  70        else
  71                pdata->ptrs[cpu] = kzalloc(size, gfp);
  72        return pdata->ptrs[cpu];
  73}
  74
  75/**
  76 * percpu_populate_mask - populate per-cpu data for more cpu's
  77 * @__pdata: per-cpu data to populate further
  78 * @size: size of per-cpu object
  79 * @gfp: may sleep or not etc.
  80 * @mask: populate per-cpu data for cpu's selected through mask bits
  81 *
  82 * Per-cpu objects are populated with zeroed buffers.
  83 */
  84static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
  85                                  cpumask_t *mask)
  86{
  87        cpumask_t populated;
  88        int cpu;
  89
  90        cpus_clear(populated);
  91        for_each_cpu_mask_nr(cpu, *mask)
  92                if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
  93                        __percpu_depopulate_mask(__pdata, &populated);
  94                        return -ENOMEM;
  95                } else
  96                        cpu_set(cpu, populated);
  97        return 0;
  98}
  99
 100#define percpu_populate_mask(__pdata, size, gfp, mask) \
 101        __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
 102
 103/**
 104 * alloc_percpu - initial setup of per-cpu data
 105 * @size: size of per-cpu object
 106 * @align: alignment
 107 *
 108 * Allocate dynamic percpu area.  Percpu objects are populated with
 109 * zeroed buffers.
 110 */
 111void *__alloc_percpu(size_t size, size_t align)
 112{
 113        /*
 114         * We allocate whole cache lines to avoid false sharing
 115         */
 116        size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
 117        void *pdata = kzalloc(sz, GFP_KERNEL);
 118        void *__pdata = __percpu_disguise(pdata);
 119
 120        /*
 121         * Can't easily make larger alignment work with kmalloc.  WARN
 122         * on it.  Larger alignment should only be used for module
 123         * percpu sections on SMP for which this path isn't used.
 124         */
 125        WARN_ON_ONCE(align > SMP_CACHE_BYTES);
 126
 127        if (unlikely(!pdata))
 128                return NULL;
 129        if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
 130                                           &cpu_possible_map)))
 131                return __pdata;
 132        kfree(pdata);
 133        return NULL;
 134}
 135EXPORT_SYMBOL_GPL(__alloc_percpu);
 136
 137/**
 138 * free_percpu - final cleanup of per-cpu data
 139 * @__pdata: object to clean up
 140 *
 141 * We simply clean up any per-cpu object left. No need for the client to
 142 * track and specify through a bis mask which per-cpu objects are to free.
 143 */
 144void free_percpu(void *__pdata)
 145{
 146        if (unlikely(!__pdata))
 147                return;
 148        __percpu_depopulate_mask(__pdata, cpu_possible_mask);
 149        kfree(__percpu_disguise(__pdata));
 150}
 151EXPORT_SYMBOL_GPL(free_percpu);
 152
 153/*
 154 * Generic percpu area setup.
 155 */
 156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
 157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
 158
 159EXPORT_SYMBOL(__per_cpu_offset);
 160
 161void __init setup_per_cpu_areas(void)
 162{
 163        unsigned long size, i;
 164        char *ptr;
 165        unsigned long nr_possible_cpus = num_possible_cpus();
 166
 167        /* Copy section for each CPU (we discard the original) */
 168        size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
 169        ptr = alloc_bootmem_pages(size * nr_possible_cpus);
 170
 171        for_each_possible_cpu(i) {
 172                __per_cpu_offset[i] = ptr - __per_cpu_start;
 173                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
 174                ptr += size;
 175        }
 176}
 177#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
 178