1
2
3
4
5
6
7
8
9
10
11
12#include <linux/mm.h>
13#include <linux/slab.h>
14#include <linux/highmem.h>
15#include <linux/kasan.h>
16#include <linux/kmemleak.h>
17#include <linux/export.h>
18#include <linux/mempool.h>
19#include <linux/blkdev.h>
20#include <linux/writeback.h>
21#include "slab.h"
22
23#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
24static void poison_error(mempool_t *pool, void *element, size_t size,
25 size_t byte)
26{
27 const int nr = pool->curr_nr;
28 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
29 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
30 int i;
31
32 pr_err("BUG: mempool element poison mismatch\n");
33 pr_err("Mempool %p size %zu\n", pool, size);
34 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
35 for (i = start; i < end; i++)
36 pr_cont("%x ", *(u8 *)(element + i));
37 pr_cont("%s\n", end < size ? "..." : "");
38 dump_stack();
39}
40
41static void __check_element(mempool_t *pool, void *element, size_t size)
42{
43 u8 *obj = element;
44 size_t i;
45
46 for (i = 0; i < size; i++) {
47 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
48
49 if (obj[i] != exp) {
50 poison_error(pool, element, size, i);
51 return;
52 }
53 }
54 memset(obj, POISON_INUSE, size);
55}
56
57static void check_element(mempool_t *pool, void *element)
58{
59
60 if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
61 __check_element(pool, element, ksize(element));
62
63
64 if (pool->free == mempool_free_pages) {
65 int order = (int)(long)pool->pool_data;
66 void *addr = kmap_atomic((struct page *)element);
67
68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
69 kunmap_atomic(addr);
70 }
71}
72
73static void __poison_element(void *element, size_t size)
74{
75 u8 *obj = element;
76
77 memset(obj, POISON_FREE, size - 1);
78 obj[size - 1] = POISON_END;
79}
80
81static void poison_element(mempool_t *pool, void *element)
82{
83
84 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
85 __poison_element(element, ksize(element));
86
87
88 if (pool->alloc == mempool_alloc_pages) {
89 int order = (int)(long)pool->pool_data;
90 void *addr = kmap_atomic((struct page *)element);
91
92 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
93 kunmap_atomic(addr);
94 }
95}
96#else
97static inline void check_element(mempool_t *pool, void *element)
98{
99}
100static inline void poison_element(mempool_t *pool, void *element)
101{
102}
103#endif
104
105static void kasan_poison_element(mempool_t *pool, void *element)
106{
107 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
108 kasan_poison_kfree(element);
109 if (pool->alloc == mempool_alloc_pages)
110 kasan_free_pages(element, (unsigned long)pool->pool_data);
111}
112
113static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
114{
115 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
116 kasan_unpoison_slab(element);
117 if (pool->alloc == mempool_alloc_pages)
118 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
119}
120
121static void add_element(mempool_t *pool, void *element)
122{
123 BUG_ON(pool->curr_nr >= pool->min_nr);
124 poison_element(pool, element);
125 kasan_poison_element(pool, element);
126 pool->elements[pool->curr_nr++] = element;
127}
128
129static void *remove_element(mempool_t *pool, gfp_t flags)
130{
131 void *element = pool->elements[--pool->curr_nr];
132
133 BUG_ON(pool->curr_nr < 0);
134 kasan_unpoison_element(pool, element, flags);
135 check_element(pool, element);
136 return element;
137}
138
139
140
141
142
143
144
145
146
147void mempool_destroy(mempool_t *pool)
148{
149 if (unlikely(!pool))
150 return;
151
152 while (pool->curr_nr) {
153 void *element = remove_element(pool, GFP_KERNEL);
154 pool->free(element, pool->pool_data);
155 }
156 kfree(pool->elements);
157 kfree(pool);
158}
159EXPORT_SYMBOL(mempool_destroy);
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
176 mempool_free_t *free_fn, void *pool_data)
177{
178 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
179 GFP_KERNEL, NUMA_NO_NODE);
180}
181EXPORT_SYMBOL(mempool_create);
182
183mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
184 mempool_free_t *free_fn, void *pool_data,
185 gfp_t gfp_mask, int node_id)
186{
187 mempool_t *pool;
188 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
189 if (!pool)
190 return NULL;
191 pool->elements = kmalloc_node(min_nr * sizeof(void *),
192 gfp_mask, node_id);
193 if (!pool->elements) {
194 kfree(pool);
195 return NULL;
196 }
197 spin_lock_init(&pool->lock);
198 pool->min_nr = min_nr;
199 pool->pool_data = pool_data;
200 init_waitqueue_head(&pool->wait);
201 pool->alloc = alloc_fn;
202 pool->free = free_fn;
203
204
205
206
207 while (pool->curr_nr < pool->min_nr) {
208 void *element;
209
210 element = pool->alloc(gfp_mask, pool->pool_data);
211 if (unlikely(!element)) {
212 mempool_destroy(pool);
213 return NULL;
214 }
215 add_element(pool, element);
216 }
217 return pool;
218}
219EXPORT_SYMBOL(mempool_create_node);
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237int mempool_resize(mempool_t *pool, int new_min_nr)
238{
239 void *element;
240 void **new_elements;
241 unsigned long flags;
242
243 BUG_ON(new_min_nr <= 0);
244 might_sleep();
245
246 spin_lock_irqsave(&pool->lock, flags);
247 if (new_min_nr <= pool->min_nr) {
248 while (new_min_nr < pool->curr_nr) {
249 element = remove_element(pool, GFP_KERNEL);
250 spin_unlock_irqrestore(&pool->lock, flags);
251 pool->free(element, pool->pool_data);
252 spin_lock_irqsave(&pool->lock, flags);
253 }
254 pool->min_nr = new_min_nr;
255 goto out_unlock;
256 }
257 spin_unlock_irqrestore(&pool->lock, flags);
258
259
260 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
261 GFP_KERNEL);
262 if (!new_elements)
263 return -ENOMEM;
264
265 spin_lock_irqsave(&pool->lock, flags);
266 if (unlikely(new_min_nr <= pool->min_nr)) {
267
268 spin_unlock_irqrestore(&pool->lock, flags);
269 kfree(new_elements);
270 goto out;
271 }
272 memcpy(new_elements, pool->elements,
273 pool->curr_nr * sizeof(*new_elements));
274 kfree(pool->elements);
275 pool->elements = new_elements;
276 pool->min_nr = new_min_nr;
277
278 while (pool->curr_nr < pool->min_nr) {
279 spin_unlock_irqrestore(&pool->lock, flags);
280 element = pool->alloc(GFP_KERNEL, pool->pool_data);
281 if (!element)
282 goto out;
283 spin_lock_irqsave(&pool->lock, flags);
284 if (pool->curr_nr < pool->min_nr) {
285 add_element(pool, element);
286 } else {
287 spin_unlock_irqrestore(&pool->lock, flags);
288 pool->free(element, pool->pool_data);
289 goto out;
290 }
291 }
292out_unlock:
293 spin_unlock_irqrestore(&pool->lock, flags);
294out:
295 return 0;
296}
297EXPORT_SYMBOL(mempool_resize);
298
299
300
301
302
303
304
305
306
307
308
309
310
311void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
312{
313 void *element;
314 unsigned long flags;
315 wait_queue_entry_t wait;
316 gfp_t gfp_temp;
317
318 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
319 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
320
321 gfp_mask |= __GFP_NOMEMALLOC;
322 gfp_mask |= __GFP_NORETRY;
323 gfp_mask |= __GFP_NOWARN;
324
325 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
326
327repeat_alloc:
328
329 element = pool->alloc(gfp_temp, pool->pool_data);
330 if (likely(element != NULL))
331 return element;
332
333 spin_lock_irqsave(&pool->lock, flags);
334 if (likely(pool->curr_nr)) {
335 element = remove_element(pool, gfp_temp);
336 spin_unlock_irqrestore(&pool->lock, flags);
337
338 smp_wmb();
339
340
341
342
343 kmemleak_update_trace(element);
344 return element;
345 }
346
347
348
349
350
351 if (gfp_temp != gfp_mask) {
352 spin_unlock_irqrestore(&pool->lock, flags);
353 gfp_temp = gfp_mask;
354 goto repeat_alloc;
355 }
356
357
358 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
359 spin_unlock_irqrestore(&pool->lock, flags);
360 return NULL;
361 }
362
363
364 init_wait(&wait);
365 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
366
367 spin_unlock_irqrestore(&pool->lock, flags);
368
369
370
371
372
373 io_schedule_timeout(5*HZ);
374
375 finish_wait(&pool->wait, &wait);
376 goto repeat_alloc;
377}
378EXPORT_SYMBOL(mempool_alloc);
379
380
381
382
383
384
385
386
387
388void mempool_free(void *element, mempool_t *pool)
389{
390 unsigned long flags;
391
392 if (unlikely(element == NULL))
393 return;
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409 smp_rmb();
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428 if (unlikely(pool->curr_nr < pool->min_nr)) {
429 spin_lock_irqsave(&pool->lock, flags);
430 if (likely(pool->curr_nr < pool->min_nr)) {
431 add_element(pool, element);
432 spin_unlock_irqrestore(&pool->lock, flags);
433 wake_up(&pool->wait);
434 return;
435 }
436 spin_unlock_irqrestore(&pool->lock, flags);
437 }
438 pool->free(element, pool->pool_data);
439}
440EXPORT_SYMBOL(mempool_free);
441
442
443
444
445void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
446{
447 struct kmem_cache *mem = pool_data;
448 VM_BUG_ON(mem->ctor);
449 return kmem_cache_alloc(mem, gfp_mask);
450}
451EXPORT_SYMBOL(mempool_alloc_slab);
452
453void mempool_free_slab(void *element, void *pool_data)
454{
455 struct kmem_cache *mem = pool_data;
456 kmem_cache_free(mem, element);
457}
458EXPORT_SYMBOL(mempool_free_slab);
459
460
461
462
463
464void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
465{
466 size_t size = (size_t)pool_data;
467 return kmalloc(size, gfp_mask);
468}
469EXPORT_SYMBOL(mempool_kmalloc);
470
471void mempool_kfree(void *element, void *pool_data)
472{
473 kfree(element);
474}
475EXPORT_SYMBOL(mempool_kfree);
476
477
478
479
480
481void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
482{
483 int order = (int)(long)pool_data;
484 return alloc_pages(gfp_mask, order);
485}
486EXPORT_SYMBOL(mempool_alloc_pages);
487
488void mempool_free_pages(void *element, void *pool_data)
489{
490 int order = (int)(long)pool_data;
491 __free_pages(element, order);
492}
493EXPORT_SYMBOL(mempool_free_pages);
494