1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60#include <linux/kernel.h>
61#include <linux/slab.h>
62#include <linux/mm.h>
63#include <linux/swap.h>
64#include <linux/cache.h>
65#include <linux/init.h>
66#include <linux/module.h>
67#include <linux/rcupdate.h>
68#include <linux/list.h>
69#include <linux/kmemleak.h>
70
71#include <trace/events/kmem.h>
72
73#include <asm/atomic.h>
74
75
76
77
78
79
80
81
82
83#if PAGE_SIZE <= (32767 * 2)
84typedef s16 slobidx_t;
85#else
86typedef s32 slobidx_t;
87#endif
88
89struct slob_block {
90 slobidx_t units;
91};
92typedef struct slob_block slob_t;
93
94
95
96
97
98
99struct slob_page {
100 union {
101 struct {
102 unsigned long flags;
103 atomic_t _count;
104 slobidx_t units;
105 unsigned long pad[2];
106 slob_t *free;
107 struct list_head list;
108 };
109 struct page page;
110 };
111};
112static inline void struct_slob_page_wrong_size(void)
113{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
114
115
116
117
118static inline void free_slob_page(struct slob_page *sp)
119{
120 reset_page_mapcount(&sp->page);
121 sp->page.mapping = NULL;
122}
123
124
125
126
127#define SLOB_BREAK1 256
128#define SLOB_BREAK2 1024
129static LIST_HEAD(free_slob_small);
130static LIST_HEAD(free_slob_medium);
131static LIST_HEAD(free_slob_large);
132
133
134
135
136static inline int is_slob_page(struct slob_page *sp)
137{
138 return PageSlab((struct page *)sp);
139}
140
141static inline void set_slob_page(struct slob_page *sp)
142{
143 __SetPageSlab((struct page *)sp);
144}
145
146static inline void clear_slob_page(struct slob_page *sp)
147{
148 __ClearPageSlab((struct page *)sp);
149}
150
151static inline struct slob_page *slob_page(const void *addr)
152{
153 return (struct slob_page *)virt_to_page(addr);
154}
155
156
157
158
159static inline int slob_page_free(struct slob_page *sp)
160{
161 return PageSlobFree((struct page *)sp);
162}
163
164static void set_slob_page_free(struct slob_page *sp, struct list_head *list)
165{
166 list_add(&sp->list, list);
167 __SetPageSlobFree((struct page *)sp);
168}
169
170static inline void clear_slob_page_free(struct slob_page *sp)
171{
172 list_del(&sp->list);
173 __ClearPageSlobFree((struct page *)sp);
174}
175
176#define SLOB_UNIT sizeof(slob_t)
177#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
178#define SLOB_ALIGN L1_CACHE_BYTES
179
180
181
182
183
184
185struct slob_rcu {
186 struct rcu_head head;
187 int size;
188};
189
190
191
192
193static DEFINE_SPINLOCK(slob_lock);
194
195
196
197
198static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
199{
200 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
201 slobidx_t offset = next - base;
202
203 if (size > 1) {
204 s[0].units = size;
205 s[1].units = offset;
206 } else
207 s[0].units = -offset;
208}
209
210
211
212
213static slobidx_t slob_units(slob_t *s)
214{
215 if (s->units > 0)
216 return s->units;
217 return 1;
218}
219
220
221
222
223static slob_t *slob_next(slob_t *s)
224{
225 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
226 slobidx_t next;
227
228 if (s[0].units < 0)
229 next = -s[0].units;
230 else
231 next = s[1].units;
232 return base+next;
233}
234
235
236
237
238static int slob_last(slob_t *s)
239{
240 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
241}
242
243static void *slob_new_pages(gfp_t gfp, int order, int node)
244{
245 void *page;
246
247#ifdef CONFIG_NUMA
248 if (node != -1)
249 page = alloc_pages_exact_node(node, gfp, order);
250 else
251#endif
252 page = alloc_pages(gfp, order);
253
254 if (!page)
255 return NULL;
256
257 return page_address(page);
258}
259
260static void slob_free_pages(void *b, int order)
261{
262 if (current->reclaim_state)
263 current->reclaim_state->reclaimed_slab += 1 << order;
264 free_pages((unsigned long)b, order);
265}
266
267
268
269
270static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
271{
272 slob_t *prev, *cur, *aligned = NULL;
273 int delta = 0, units = SLOB_UNITS(size);
274
275 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
276 slobidx_t avail = slob_units(cur);
277
278 if (align) {
279 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
280 delta = aligned - cur;
281 }
282 if (avail >= units + delta) {
283 slob_t *next;
284
285 if (delta) {
286 next = slob_next(cur);
287 set_slob(aligned, avail - delta, next);
288 set_slob(cur, delta, aligned);
289 prev = cur;
290 cur = aligned;
291 avail = slob_units(cur);
292 }
293
294 next = slob_next(cur);
295 if (avail == units) {
296 if (prev)
297 set_slob(prev, slob_units(prev), next);
298 else
299 sp->free = next;
300 } else {
301 if (prev)
302 set_slob(prev, slob_units(prev), cur + units);
303 else
304 sp->free = cur + units;
305 set_slob(cur + units, avail - units, next);
306 }
307
308 sp->units -= units;
309 if (!sp->units)
310 clear_slob_page_free(sp);
311 return cur;
312 }
313 if (slob_last(cur))
314 return NULL;
315 }
316}
317
318
319
320
321static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
322{
323 struct slob_page *sp;
324 struct list_head *prev;
325 struct list_head *slob_list;
326 slob_t *b = NULL;
327 unsigned long flags;
328
329 if (size < SLOB_BREAK1)
330 slob_list = &free_slob_small;
331 else if (size < SLOB_BREAK2)
332 slob_list = &free_slob_medium;
333 else
334 slob_list = &free_slob_large;
335
336 spin_lock_irqsave(&slob_lock, flags);
337
338 list_for_each_entry(sp, slob_list, list) {
339#ifdef CONFIG_NUMA
340
341
342
343
344 if (node != -1 && page_to_nid(&sp->page) != node)
345 continue;
346#endif
347
348 if (sp->units < SLOB_UNITS(size))
349 continue;
350
351
352 prev = sp->list.prev;
353 b = slob_page_alloc(sp, size, align);
354 if (!b)
355 continue;
356
357
358
359
360 if (prev != slob_list->prev &&
361 slob_list->next != prev->next)
362 list_move_tail(slob_list, prev->next);
363 break;
364 }
365 spin_unlock_irqrestore(&slob_lock, flags);
366
367
368 if (!b) {
369 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
370 if (!b)
371 return NULL;
372 sp = slob_page(b);
373 set_slob_page(sp);
374
375 spin_lock_irqsave(&slob_lock, flags);
376 sp->units = SLOB_UNITS(PAGE_SIZE);
377 sp->free = b;
378 INIT_LIST_HEAD(&sp->list);
379 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
380 set_slob_page_free(sp, slob_list);
381 b = slob_page_alloc(sp, size, align);
382 BUG_ON(!b);
383 spin_unlock_irqrestore(&slob_lock, flags);
384 }
385 if (unlikely((gfp & __GFP_ZERO) && b))
386 memset(b, 0, size);
387 return b;
388}
389
390
391
392
393static void slob_free(void *block, int size)
394{
395 struct slob_page *sp;
396 slob_t *prev, *next, *b = (slob_t *)block;
397 slobidx_t units;
398 unsigned long flags;
399 struct list_head *slob_list;
400
401 if (unlikely(ZERO_OR_NULL_PTR(block)))
402 return;
403 BUG_ON(!size);
404
405 sp = slob_page(block);
406 units = SLOB_UNITS(size);
407
408 spin_lock_irqsave(&slob_lock, flags);
409
410 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
411
412 if (slob_page_free(sp))
413 clear_slob_page_free(sp);
414 spin_unlock_irqrestore(&slob_lock, flags);
415 clear_slob_page(sp);
416 free_slob_page(sp);
417 slob_free_pages(b, 0);
418 return;
419 }
420
421 if (!slob_page_free(sp)) {
422
423 sp->units = units;
424 sp->free = b;
425 set_slob(b, units,
426 (void *)((unsigned long)(b +
427 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
428 if (size < SLOB_BREAK1)
429 slob_list = &free_slob_small;
430 else if (size < SLOB_BREAK2)
431 slob_list = &free_slob_medium;
432 else
433 slob_list = &free_slob_large;
434 set_slob_page_free(sp, slob_list);
435 goto out;
436 }
437
438
439
440
441
442 sp->units += units;
443
444 if (b < sp->free) {
445 if (b + units == sp->free) {
446 units += slob_units(sp->free);
447 sp->free = slob_next(sp->free);
448 }
449 set_slob(b, units, sp->free);
450 sp->free = b;
451 } else {
452 prev = sp->free;
453 next = slob_next(prev);
454 while (b > next) {
455 prev = next;
456 next = slob_next(prev);
457 }
458
459 if (!slob_last(prev) && b + units == next) {
460 units += slob_units(next);
461 set_slob(b, units, slob_next(next));
462 } else
463 set_slob(b, units, next);
464
465 if (prev + slob_units(prev) == b) {
466 units = slob_units(b) + slob_units(prev);
467 set_slob(prev, units, slob_next(b));
468 } else
469 set_slob(prev, slob_units(prev), b);
470 }
471out:
472 spin_unlock_irqrestore(&slob_lock, flags);
473}
474
475
476
477
478
479void *__kmalloc_node(size_t size, gfp_t gfp, int node)
480{
481 unsigned int *m;
482 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
483 void *ret;
484
485 lockdep_trace_alloc(gfp);
486
487 if (size < PAGE_SIZE - align) {
488 if (!size)
489 return ZERO_SIZE_PTR;
490
491 m = slob_alloc(size + align, gfp, align, node);
492
493 if (!m)
494 return NULL;
495 *m = size;
496 ret = (void *)m + align;
497
498 trace_kmalloc_node(_RET_IP_, ret,
499 size, size + align, gfp, node);
500 } else {
501 unsigned int order = get_order(size);
502
503 if (likely(order))
504 gfp |= __GFP_COMP;
505 ret = slob_new_pages(gfp, order, node);
506 if (ret) {
507 struct page *page;
508 page = virt_to_page(ret);
509 page->private = size;
510 }
511
512 trace_kmalloc_node(_RET_IP_, ret,
513 size, PAGE_SIZE << order, gfp, node);
514 }
515
516 kmemleak_alloc(ret, size, 1, gfp);
517 return ret;
518}
519EXPORT_SYMBOL(__kmalloc_node);
520
521void kfree(const void *block)
522{
523 struct slob_page *sp;
524
525 trace_kfree(_RET_IP_, block);
526
527 if (unlikely(ZERO_OR_NULL_PTR(block)))
528 return;
529 kmemleak_free(block);
530
531 sp = slob_page(block);
532 if (is_slob_page(sp)) {
533 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
534 unsigned int *m = (unsigned int *)(block - align);
535 slob_free(m, *m + align);
536 } else
537 put_page(&sp->page);
538}
539EXPORT_SYMBOL(kfree);
540
541
542size_t ksize(const void *block)
543{
544 struct slob_page *sp;
545
546 BUG_ON(!block);
547 if (unlikely(block == ZERO_SIZE_PTR))
548 return 0;
549
550 sp = slob_page(block);
551 if (is_slob_page(sp)) {
552 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
553 unsigned int *m = (unsigned int *)(block - align);
554 return SLOB_UNITS(*m) * SLOB_UNIT;
555 } else
556 return sp->page.private;
557}
558EXPORT_SYMBOL(ksize);
559
560struct kmem_cache {
561 unsigned int size, align;
562 unsigned long flags;
563 const char *name;
564 void (*ctor)(void *);
565};
566
567struct kmem_cache *kmem_cache_create(const char *name, size_t size,
568 size_t align, unsigned long flags, void (*ctor)(void *))
569{
570 struct kmem_cache *c;
571
572 c = slob_alloc(sizeof(struct kmem_cache),
573 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
574
575 if (c) {
576 c->name = name;
577 c->size = size;
578 if (flags & SLAB_DESTROY_BY_RCU) {
579
580 c->size += sizeof(struct slob_rcu);
581 }
582 c->flags = flags;
583 c->ctor = ctor;
584
585 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
586 if (c->align < ARCH_SLAB_MINALIGN)
587 c->align = ARCH_SLAB_MINALIGN;
588 if (c->align < align)
589 c->align = align;
590 } else if (flags & SLAB_PANIC)
591 panic("Cannot create slab cache %s\n", name);
592
593 kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
594 return c;
595}
596EXPORT_SYMBOL(kmem_cache_create);
597
598void kmem_cache_destroy(struct kmem_cache *c)
599{
600 kmemleak_free(c);
601 if (c->flags & SLAB_DESTROY_BY_RCU)
602 rcu_barrier();
603 slob_free(c, sizeof(struct kmem_cache));
604}
605EXPORT_SYMBOL(kmem_cache_destroy);
606
607void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
608{
609 void *b;
610
611 if (c->size < PAGE_SIZE) {
612 b = slob_alloc(c->size, flags, c->align, node);
613 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
614 SLOB_UNITS(c->size) * SLOB_UNIT,
615 flags, node);
616 } else {
617 b = slob_new_pages(flags, get_order(c->size), node);
618 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
619 PAGE_SIZE << get_order(c->size),
620 flags, node);
621 }
622
623 if (c->ctor)
624 c->ctor(b);
625
626 kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags);
627 return b;
628}
629EXPORT_SYMBOL(kmem_cache_alloc_node);
630
631static void __kmem_cache_free(void *b, int size)
632{
633 if (size < PAGE_SIZE)
634 slob_free(b, size);
635 else
636 slob_free_pages(b, get_order(size));
637}
638
639static void kmem_rcu_free(struct rcu_head *head)
640{
641 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
642 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
643
644 __kmem_cache_free(b, slob_rcu->size);
645}
646
647void kmem_cache_free(struct kmem_cache *c, void *b)
648{
649 kmemleak_free_recursive(b, c->flags);
650 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
651 struct slob_rcu *slob_rcu;
652 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
653 slob_rcu->size = c->size;
654 call_rcu(&slob_rcu->head, kmem_rcu_free);
655 } else {
656 __kmem_cache_free(b, c->size);
657 }
658
659 trace_kmem_cache_free(_RET_IP_, b);
660}
661EXPORT_SYMBOL(kmem_cache_free);
662
663unsigned int kmem_cache_size(struct kmem_cache *c)
664{
665 return c->size;
666}
667EXPORT_SYMBOL(kmem_cache_size);
668
669const char *kmem_cache_name(struct kmem_cache *c)
670{
671 return c->name;
672}
673EXPORT_SYMBOL(kmem_cache_name);
674
675int kmem_cache_shrink(struct kmem_cache *d)
676{
677 return 0;
678}
679EXPORT_SYMBOL(kmem_cache_shrink);
680
681static unsigned int slob_ready __read_mostly;
682
683int slab_is_available(void)
684{
685 return slob_ready;
686}
687
688void __init kmem_cache_init(void)
689{
690 slob_ready = 1;
691}
692
693void __init kmem_cache_init_late(void)
694{
695
696}
697