1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/highmem.h>
16#include <linux/kasan.h>
17#include <linux/kmemleak.h>
18#include <linux/export.h>
19#include <linux/mempool.h>
20#include <linux/blkdev.h>
21#include <linux/writeback.h>
22#include "slab.h"
23
24#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
25static void poison_error(mempool_t *pool, void *element, size_t size,
26 size_t byte)
27{
28 const int nr = pool->curr_nr;
29 const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
30 const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
31 int i;
32
33 pr_err("BUG: mempool element poison mismatch\n");
34 pr_err("Mempool %p size %zu\n", pool, size);
35 pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
36 for (i = start; i < end; i++)
37 pr_cont("%x ", *(u8 *)(element + i));
38 pr_cont("%s\n", end < size ? "..." : "");
39 dump_stack();
40}
41
42static void __check_element(mempool_t *pool, void *element, size_t size)
43{
44 u8 *obj = element;
45 size_t i;
46
47 for (i = 0; i < size; i++) {
48 u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
49
50 if (obj[i] != exp) {
51 poison_error(pool, element, size, i);
52 return;
53 }
54 }
55 memset(obj, POISON_INUSE, size);
56}
57
58static void check_element(mempool_t *pool, void *element)
59{
60
61 if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
62 __check_element(pool, element, ksize(element));
63 } else if (pool->free == mempool_free_pages) {
64
65 int order = (int)(long)pool->pool_data;
66 void *addr = kmap_atomic((struct page *)element);
67
68 __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
69 kunmap_atomic(addr);
70 }
71}
72
73static void __poison_element(void *element, size_t size)
74{
75 u8 *obj = element;
76
77 memset(obj, POISON_FREE, size - 1);
78 obj[size - 1] = POISON_END;
79}
80
81static void poison_element(mempool_t *pool, void *element)
82{
83
84 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
85 __poison_element(element, ksize(element));
86 } else if (pool->alloc == mempool_alloc_pages) {
87
88 int order = (int)(long)pool->pool_data;
89 void *addr = kmap_atomic((struct page *)element);
90
91 __poison_element(addr, 1UL << (PAGE_SHIFT + order));
92 kunmap_atomic(addr);
93 }
94}
95#else
96static inline void check_element(mempool_t *pool, void *element)
97{
98}
99static inline void poison_element(mempool_t *pool, void *element)
100{
101}
102#endif
103
104static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
105{
106 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
107 kasan_slab_free_mempool(element, _RET_IP_);
108 else if (pool->alloc == mempool_alloc_pages)
109 kasan_free_pages(element, (unsigned long)pool->pool_data);
110}
111
112static void kasan_unpoison_element(mempool_t *pool, void *element)
113{
114 if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
115 kasan_unpoison_range(element, __ksize(element));
116 else if (pool->alloc == mempool_alloc_pages)
117 kasan_alloc_pages(element, (unsigned long)pool->pool_data);
118}
119
120static __always_inline void add_element(mempool_t *pool, void *element)
121{
122 BUG_ON(pool->curr_nr >= pool->min_nr);
123 poison_element(pool, element);
124 kasan_poison_element(pool, element);
125 pool->elements[pool->curr_nr++] = element;
126}
127
128static void *remove_element(mempool_t *pool)
129{
130 void *element = pool->elements[--pool->curr_nr];
131
132 BUG_ON(pool->curr_nr < 0);
133 kasan_unpoison_element(pool, element);
134 check_element(pool, element);
135 return element;
136}
137
138
139
140
141
142
143
144
145
146
147
148
149void mempool_exit(mempool_t *pool)
150{
151 while (pool->curr_nr) {
152 void *element = remove_element(pool);
153 pool->free(element, pool->pool_data);
154 }
155 kfree(pool->elements);
156 pool->elements = NULL;
157}
158EXPORT_SYMBOL(mempool_exit);
159
160
161
162
163
164
165
166
167
168void mempool_destroy(mempool_t *pool)
169{
170 if (unlikely(!pool))
171 return;
172
173 mempool_exit(pool);
174 kfree(pool);
175}
176EXPORT_SYMBOL(mempool_destroy);
177
178int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
179 mempool_free_t *free_fn, void *pool_data,
180 gfp_t gfp_mask, int node_id)
181{
182 spin_lock_init(&pool->lock);
183 pool->min_nr = min_nr;
184 pool->pool_data = pool_data;
185 pool->alloc = alloc_fn;
186 pool->free = free_fn;
187 init_waitqueue_head(&pool->wait);
188
189 pool->elements = kmalloc_array_node(min_nr, sizeof(void *),
190 gfp_mask, node_id);
191 if (!pool->elements)
192 return -ENOMEM;
193
194
195
196
197 while (pool->curr_nr < pool->min_nr) {
198 void *element;
199
200 element = pool->alloc(gfp_mask, pool->pool_data);
201 if (unlikely(!element)) {
202 mempool_exit(pool);
203 return -ENOMEM;
204 }
205 add_element(pool, element);
206 }
207
208 return 0;
209}
210EXPORT_SYMBOL(mempool_init_node);
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn,
227 mempool_free_t *free_fn, void *pool_data)
228{
229 return mempool_init_node(pool, min_nr, alloc_fn, free_fn,
230 pool_data, GFP_KERNEL, NUMA_NO_NODE);
231
232}
233EXPORT_SYMBOL(mempool_init);
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
252 mempool_free_t *free_fn, void *pool_data)
253{
254 return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,
255 GFP_KERNEL, NUMA_NO_NODE);
256}
257EXPORT_SYMBOL(mempool_create);
258
259mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
260 mempool_free_t *free_fn, void *pool_data,
261 gfp_t gfp_mask, int node_id)
262{
263 mempool_t *pool;
264
265 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id);
266 if (!pool)
267 return NULL;
268
269 if (mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data,
270 gfp_mask, node_id)) {
271 kfree(pool);
272 return NULL;
273 }
274
275 return pool;
276}
277EXPORT_SYMBOL(mempool_create_node);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297int mempool_resize(mempool_t *pool, int new_min_nr)
298{
299 void *element;
300 void **new_elements;
301 unsigned long flags;
302
303 BUG_ON(new_min_nr <= 0);
304 might_sleep();
305
306 spin_lock_irqsave(&pool->lock, flags);
307 if (new_min_nr <= pool->min_nr) {
308 while (new_min_nr < pool->curr_nr) {
309 element = remove_element(pool);
310 spin_unlock_irqrestore(&pool->lock, flags);
311 pool->free(element, pool->pool_data);
312 spin_lock_irqsave(&pool->lock, flags);
313 }
314 pool->min_nr = new_min_nr;
315 goto out_unlock;
316 }
317 spin_unlock_irqrestore(&pool->lock, flags);
318
319
320 new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
321 GFP_KERNEL);
322 if (!new_elements)
323 return -ENOMEM;
324
325 spin_lock_irqsave(&pool->lock, flags);
326 if (unlikely(new_min_nr <= pool->min_nr)) {
327
328 spin_unlock_irqrestore(&pool->lock, flags);
329 kfree(new_elements);
330 goto out;
331 }
332 memcpy(new_elements, pool->elements,
333 pool->curr_nr * sizeof(*new_elements));
334 kfree(pool->elements);
335 pool->elements = new_elements;
336 pool->min_nr = new_min_nr;
337
338 while (pool->curr_nr < pool->min_nr) {
339 spin_unlock_irqrestore(&pool->lock, flags);
340 element = pool->alloc(GFP_KERNEL, pool->pool_data);
341 if (!element)
342 goto out;
343 spin_lock_irqsave(&pool->lock, flags);
344 if (pool->curr_nr < pool->min_nr) {
345 add_element(pool, element);
346 } else {
347 spin_unlock_irqrestore(&pool->lock, flags);
348 pool->free(element, pool->pool_data);
349 goto out;
350 }
351 }
352out_unlock:
353 spin_unlock_irqrestore(&pool->lock, flags);
354out:
355 return 0;
356}
357EXPORT_SYMBOL(mempool_resize);
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
374{
375 void *element;
376 unsigned long flags;
377 wait_queue_entry_t wait;
378 gfp_t gfp_temp;
379
380 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
381 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
382
383 gfp_mask |= __GFP_NOMEMALLOC;
384 gfp_mask |= __GFP_NORETRY;
385 gfp_mask |= __GFP_NOWARN;
386
387 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO);
388
389repeat_alloc:
390
391 element = pool->alloc(gfp_temp, pool->pool_data);
392 if (likely(element != NULL))
393 return element;
394
395 spin_lock_irqsave(&pool->lock, flags);
396 if (likely(pool->curr_nr)) {
397 element = remove_element(pool);
398 spin_unlock_irqrestore(&pool->lock, flags);
399
400 smp_wmb();
401
402
403
404
405 kmemleak_update_trace(element);
406 return element;
407 }
408
409
410
411
412
413 if (gfp_temp != gfp_mask) {
414 spin_unlock_irqrestore(&pool->lock, flags);
415 gfp_temp = gfp_mask;
416 goto repeat_alloc;
417 }
418
419
420 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
421 spin_unlock_irqrestore(&pool->lock, flags);
422 return NULL;
423 }
424
425
426 init_wait(&wait);
427 prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
428
429 spin_unlock_irqrestore(&pool->lock, flags);
430
431
432
433
434
435 io_schedule_timeout(5*HZ);
436
437 finish_wait(&pool->wait, &wait);
438 goto repeat_alloc;
439}
440EXPORT_SYMBOL(mempool_alloc);
441
442
443
444
445
446
447
448
449
450void mempool_free(void *element, mempool_t *pool)
451{
452 unsigned long flags;
453
454 if (unlikely(element == NULL))
455 return;
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471 smp_rmb();
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490 if (unlikely(READ_ONCE(pool->curr_nr) < pool->min_nr)) {
491 spin_lock_irqsave(&pool->lock, flags);
492 if (likely(pool->curr_nr < pool->min_nr)) {
493 add_element(pool, element);
494 spin_unlock_irqrestore(&pool->lock, flags);
495 wake_up(&pool->wait);
496 return;
497 }
498 spin_unlock_irqrestore(&pool->lock, flags);
499 }
500 pool->free(element, pool->pool_data);
501}
502EXPORT_SYMBOL(mempool_free);
503
504
505
506
507void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
508{
509 struct kmem_cache *mem = pool_data;
510 VM_BUG_ON(mem->ctor);
511 return kmem_cache_alloc(mem, gfp_mask);
512}
513EXPORT_SYMBOL(mempool_alloc_slab);
514
515void mempool_free_slab(void *element, void *pool_data)
516{
517 struct kmem_cache *mem = pool_data;
518 kmem_cache_free(mem, element);
519}
520EXPORT_SYMBOL(mempool_free_slab);
521
522
523
524
525
526void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
527{
528 size_t size = (size_t)pool_data;
529 return kmalloc(size, gfp_mask);
530}
531EXPORT_SYMBOL(mempool_kmalloc);
532
533void mempool_kfree(void *element, void *pool_data)
534{
535 kfree(element);
536}
537EXPORT_SYMBOL(mempool_kfree);
538
539
540
541
542
543void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
544{
545 int order = (int)(long)pool_data;
546 return alloc_pages(gfp_mask, order);
547}
548EXPORT_SYMBOL(mempool_alloc_pages);
549
550void mempool_free_pages(void *element, void *pool_data)
551{
552 int order = (int)(long)pool_data;
553 __free_pages(element, order);
554}
555EXPORT_SYMBOL(mempool_free_pages);
556