1
2
3
4
5
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/bootmem.h>
9#include <asm/sections.h>
10
11#ifndef cache_line_size
12#define cache_line_size() L1_CACHE_BYTES
13#endif
14
15
16
17
18
19
20
21
22
23static void percpu_depopulate(void *__pdata, int cpu)
24{
25 struct percpu_data *pdata = __percpu_disguise(__pdata);
26
27 kfree(pdata->ptrs[cpu]);
28 pdata->ptrs[cpu] = NULL;
29}
30
31
32
33
34
35
36static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
37{
38 int cpu;
39 for_each_cpu_mask_nr(cpu, *mask)
40 percpu_depopulate(__pdata, cpu);
41}
42
43#define percpu_depopulate_mask(__pdata, mask) \
44 __percpu_depopulate_mask((__pdata), &(mask))
45
46
47
48
49
50
51
52
53
54
55
56
57static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
58{
59 struct percpu_data *pdata = __percpu_disguise(__pdata);
60 int node = cpu_to_node(cpu);
61
62
63
64
65 size = roundup(size, cache_line_size());
66
67 BUG_ON(pdata->ptrs[cpu]);
68 if (node_online(node))
69 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
70 else
71 pdata->ptrs[cpu] = kzalloc(size, gfp);
72 return pdata->ptrs[cpu];
73}
74
75
76
77
78
79
80
81
82
83
84static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
85 cpumask_t *mask)
86{
87 cpumask_t populated;
88 int cpu;
89
90 cpus_clear(populated);
91 for_each_cpu_mask_nr(cpu, *mask)
92 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
93 __percpu_depopulate_mask(__pdata, &populated);
94 return -ENOMEM;
95 } else
96 cpu_set(cpu, populated);
97 return 0;
98}
99
100#define percpu_populate_mask(__pdata, size, gfp, mask) \
101 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
102
103
104
105
106
107
108
109
110
111void *__alloc_percpu(size_t size, size_t align)
112{
113
114
115
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
117 void *pdata = kzalloc(sz, GFP_KERNEL);
118 void *__pdata = __percpu_disguise(pdata);
119
120
121
122
123
124
125 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
126
127 if (unlikely(!pdata))
128 return NULL;
129 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
130 &cpu_possible_map)))
131 return __pdata;
132 kfree(pdata);
133 return NULL;
134}
135EXPORT_SYMBOL_GPL(__alloc_percpu);
136
137
138
139
140
141
142
143
144void free_percpu(void *__pdata)
145{
146 if (unlikely(!__pdata))
147 return;
148 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
149 kfree(__percpu_disguise(__pdata));
150}
151EXPORT_SYMBOL_GPL(free_percpu);
152
153
154
155
156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158
159EXPORT_SYMBOL(__per_cpu_offset);
160
161void __init setup_per_cpu_areas(void)
162{
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
166
167
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
170
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
175 }
176}
177#endif
178