1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53#include <linux/kernel.h>
54#include <linux/slab.h>
55#include <linux/mm.h>
56#include <linux/cache.h>
57#include <linux/init.h>
58#include <linux/module.h>
59#include <linux/rcupdate.h>
60#include <linux/list.h>
61#include <asm/atomic.h>
62
63
64
65
66
67
68
69
70
71#if PAGE_SIZE <= (32767 * 2)
72typedef s16 slobidx_t;
73#else
74typedef s32 slobidx_t;
75#endif
76
77struct slob_block {
78 slobidx_t units;
79};
80typedef struct slob_block slob_t;
81
82
83
84
85
86
87struct slob_page {
88 union {
89 struct {
90 unsigned long flags;
91 atomic_t _count;
92 slobidx_t units;
93 unsigned long pad[2];
94 slob_t *free;
95 struct list_head list;
96 };
97 struct page page;
98 };
99};
100static inline void struct_slob_page_wrong_size(void)
101{ BUILD_BUG_ON(sizeof(struct slob_page) != sizeof(struct page)); }
102
103
104
105
106static inline void free_slob_page(struct slob_page *sp)
107{
108 reset_page_mapcount(&sp->page);
109 sp->page.mapping = NULL;
110}
111
112
113
114
115static LIST_HEAD(free_slob_pages);
116
117
118
119
120static inline int slob_page(struct slob_page *sp)
121{
122 return test_bit(PG_active, &sp->flags);
123}
124
125static inline void set_slob_page(struct slob_page *sp)
126{
127 __set_bit(PG_active, &sp->flags);
128}
129
130static inline void clear_slob_page(struct slob_page *sp)
131{
132 __clear_bit(PG_active, &sp->flags);
133}
134
135
136
137
138static inline int slob_page_free(struct slob_page *sp)
139{
140 return test_bit(PG_private, &sp->flags);
141}
142
143static inline void set_slob_page_free(struct slob_page *sp)
144{
145 list_add(&sp->list, &free_slob_pages);
146 __set_bit(PG_private, &sp->flags);
147}
148
149static inline void clear_slob_page_free(struct slob_page *sp)
150{
151 list_del(&sp->list);
152 __clear_bit(PG_private, &sp->flags);
153}
154
155#define SLOB_UNIT sizeof(slob_t)
156#define SLOB_UNITS(size) (((size) + SLOB_UNIT - 1)/SLOB_UNIT)
157#define SLOB_ALIGN L1_CACHE_BYTES
158
159
160
161
162
163
164struct slob_rcu {
165 struct rcu_head head;
166 int size;
167};
168
169
170
171
172static DEFINE_SPINLOCK(slob_lock);
173
174
175
176
177static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
178{
179 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
180 slobidx_t offset = next - base;
181
182 if (size > 1) {
183 s[0].units = size;
184 s[1].units = offset;
185 } else
186 s[0].units = -offset;
187}
188
189
190
191
192static slobidx_t slob_units(slob_t *s)
193{
194 if (s->units > 0)
195 return s->units;
196 return 1;
197}
198
199
200
201
202static slob_t *slob_next(slob_t *s)
203{
204 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
205 slobidx_t next;
206
207 if (s[0].units < 0)
208 next = -s[0].units;
209 else
210 next = s[1].units;
211 return base+next;
212}
213
214
215
216
217static int slob_last(slob_t *s)
218{
219 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
220}
221
222static void *slob_new_page(gfp_t gfp, int order, int node)
223{
224 void *page;
225
226#ifdef CONFIG_NUMA
227 if (node != -1)
228 page = alloc_pages_node(node, gfp, order);
229 else
230#endif
231 page = alloc_pages(gfp, order);
232
233 if (!page)
234 return NULL;
235
236 return page_address(page);
237}
238
239
240
241
242static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
243{
244 slob_t *prev, *cur, *aligned = 0;
245 int delta = 0, units = SLOB_UNITS(size);
246
247 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
248 slobidx_t avail = slob_units(cur);
249
250 if (align) {
251 aligned = (slob_t *)ALIGN((unsigned long)cur, align);
252 delta = aligned - cur;
253 }
254 if (avail >= units + delta) {
255 slob_t *next;
256
257 if (delta) {
258 next = slob_next(cur);
259 set_slob(aligned, avail - delta, next);
260 set_slob(cur, delta, aligned);
261 prev = cur;
262 cur = aligned;
263 avail = slob_units(cur);
264 }
265
266 next = slob_next(cur);
267 if (avail == units) {
268 if (prev)
269 set_slob(prev, slob_units(prev), next);
270 else
271 sp->free = next;
272 } else {
273 if (prev)
274 set_slob(prev, slob_units(prev), cur + units);
275 else
276 sp->free = cur + units;
277 set_slob(cur + units, avail - units, next);
278 }
279
280 sp->units -= units;
281 if (!sp->units)
282 clear_slob_page_free(sp);
283 return cur;
284 }
285 if (slob_last(cur))
286 return NULL;
287 }
288}
289
290
291
292
293static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
294{
295 struct slob_page *sp;
296 struct list_head *prev;
297 slob_t *b = NULL;
298 unsigned long flags;
299
300 spin_lock_irqsave(&slob_lock, flags);
301
302 list_for_each_entry(sp, &free_slob_pages, list) {
303#ifdef CONFIG_NUMA
304
305
306
307
308 if (node != -1 && page_to_nid(&sp->page) != node)
309 continue;
310#endif
311
312 if (sp->units < SLOB_UNITS(size))
313 continue;
314
315
316 prev = sp->list.prev;
317 b = slob_page_alloc(sp, size, align);
318 if (!b)
319 continue;
320
321
322
323
324 if (prev != free_slob_pages.prev &&
325 free_slob_pages.next != prev->next)
326 list_move_tail(&free_slob_pages, prev->next);
327 break;
328 }
329 spin_unlock_irqrestore(&slob_lock, flags);
330
331
332 if (!b) {
333 b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
334 if (!b)
335 return 0;
336 sp = (struct slob_page *)virt_to_page(b);
337 set_slob_page(sp);
338
339 spin_lock_irqsave(&slob_lock, flags);
340 sp->units = SLOB_UNITS(PAGE_SIZE);
341 sp->free = b;
342 INIT_LIST_HEAD(&sp->list);
343 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
344 set_slob_page_free(sp);
345 b = slob_page_alloc(sp, size, align);
346 BUG_ON(!b);
347 spin_unlock_irqrestore(&slob_lock, flags);
348 }
349 if (unlikely((gfp & __GFP_ZERO) && b))
350 memset(b, 0, size);
351 return b;
352}
353
354
355
356
357static void slob_free(void *block, int size)
358{
359 struct slob_page *sp;
360 slob_t *prev, *next, *b = (slob_t *)block;
361 slobidx_t units;
362 unsigned long flags;
363
364 if (unlikely(ZERO_OR_NULL_PTR(block)))
365 return;
366 BUG_ON(!size);
367
368 sp = (struct slob_page *)virt_to_page(block);
369 units = SLOB_UNITS(size);
370
371 spin_lock_irqsave(&slob_lock, flags);
372
373 if (sp->units + units == SLOB_UNITS(PAGE_SIZE)) {
374
375 if (slob_page_free(sp))
376 clear_slob_page_free(sp);
377 clear_slob_page(sp);
378 free_slob_page(sp);
379 free_page((unsigned long)b);
380 goto out;
381 }
382
383 if (!slob_page_free(sp)) {
384
385 sp->units = units;
386 sp->free = b;
387 set_slob(b, units,
388 (void *)((unsigned long)(b +
389 SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
390 set_slob_page_free(sp);
391 goto out;
392 }
393
394
395
396
397
398 sp->units += units;
399
400 if (b < sp->free) {
401 set_slob(b, units, sp->free);
402 sp->free = b;
403 } else {
404 prev = sp->free;
405 next = slob_next(prev);
406 while (b > next) {
407 prev = next;
408 next = slob_next(prev);
409 }
410
411 if (!slob_last(prev) && b + units == next) {
412 units += slob_units(next);
413 set_slob(b, units, slob_next(next));
414 } else
415 set_slob(b, units, next);
416
417 if (prev + slob_units(prev) == b) {
418 units = slob_units(b) + slob_units(prev);
419 set_slob(prev, units, slob_next(b));
420 } else
421 set_slob(prev, slob_units(prev), b);
422 }
423out:
424 spin_unlock_irqrestore(&slob_lock, flags);
425}
426
427
428
429
430
431#ifndef ARCH_KMALLOC_MINALIGN
432#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
433#endif
434
435#ifndef ARCH_SLAB_MINALIGN
436#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
437#endif
438
439void *__kmalloc_node(size_t size, gfp_t gfp, int node)
440{
441 unsigned int *m;
442 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
443
444 if (size < PAGE_SIZE - align) {
445 if (!size)
446 return ZERO_SIZE_PTR;
447
448 m = slob_alloc(size + align, gfp, align, node);
449 if (m)
450 *m = size;
451 return (void *)m + align;
452 } else {
453 void *ret;
454
455 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
456 if (ret) {
457 struct page *page;
458 page = virt_to_page(ret);
459 page->private = size;
460 }
461 return ret;
462 }
463}
464EXPORT_SYMBOL(__kmalloc_node);
465
466void kfree(const void *block)
467{
468 struct slob_page *sp;
469
470 if (unlikely(ZERO_OR_NULL_PTR(block)))
471 return;
472
473 sp = (struct slob_page *)virt_to_page(block);
474 if (slob_page(sp)) {
475 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
476 unsigned int *m = (unsigned int *)(block - align);
477 slob_free(m, *m + align);
478 } else
479 put_page(&sp->page);
480}
481EXPORT_SYMBOL(kfree);
482
483
484size_t ksize(const void *block)
485{
486 struct slob_page *sp;
487
488 BUG_ON(!block);
489 if (unlikely(block == ZERO_SIZE_PTR))
490 return 0;
491
492 sp = (struct slob_page *)virt_to_page(block);
493 if (slob_page(sp))
494 return ((slob_t *)block - 1)->units + SLOB_UNIT;
495 else
496 return sp->page.private;
497}
498EXPORT_SYMBOL(ksize);
499
500struct kmem_cache {
501 unsigned int size, align;
502 unsigned long flags;
503 const char *name;
504 void (*ctor)(struct kmem_cache *, void *);
505};
506
507struct kmem_cache *kmem_cache_create(const char *name, size_t size,
508 size_t align, unsigned long flags,
509 void (*ctor)(struct kmem_cache *, void *))
510{
511 struct kmem_cache *c;
512
513 c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
514
515 if (c) {
516 c->name = name;
517 c->size = size;
518 if (flags & SLAB_DESTROY_BY_RCU) {
519
520 c->size += sizeof(struct slob_rcu);
521 }
522 c->flags = flags;
523 c->ctor = ctor;
524
525 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
526 if (c->align < ARCH_SLAB_MINALIGN)
527 c->align = ARCH_SLAB_MINALIGN;
528 if (c->align < align)
529 c->align = align;
530 } else if (flags & SLAB_PANIC)
531 panic("Cannot create slab cache %s\n", name);
532
533 return c;
534}
535EXPORT_SYMBOL(kmem_cache_create);
536
537void kmem_cache_destroy(struct kmem_cache *c)
538{
539 slob_free(c, sizeof(struct kmem_cache));
540}
541EXPORT_SYMBOL(kmem_cache_destroy);
542
543void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
544{
545 void *b;
546
547 if (c->size < PAGE_SIZE)
548 b = slob_alloc(c->size, flags, c->align, node);
549 else
550 b = slob_new_page(flags, get_order(c->size), node);
551
552 if (c->ctor)
553 c->ctor(c, b);
554
555 return b;
556}
557EXPORT_SYMBOL(kmem_cache_alloc_node);
558
559static void __kmem_cache_free(void *b, int size)
560{
561 if (size < PAGE_SIZE)
562 slob_free(b, size);
563 else
564 free_pages((unsigned long)b, get_order(size));
565}
566
567static void kmem_rcu_free(struct rcu_head *head)
568{
569 struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
570 void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
571
572 __kmem_cache_free(b, slob_rcu->size);
573}
574
575void kmem_cache_free(struct kmem_cache *c, void *b)
576{
577 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
578 struct slob_rcu *slob_rcu;
579 slob_rcu = b + (c->size - sizeof(struct slob_rcu));
580 INIT_RCU_HEAD(&slob_rcu->head);
581 slob_rcu->size = c->size;
582 call_rcu(&slob_rcu->head, kmem_rcu_free);
583 } else {
584 __kmem_cache_free(b, c->size);
585 }
586}
587EXPORT_SYMBOL(kmem_cache_free);
588
589unsigned int kmem_cache_size(struct kmem_cache *c)
590{
591 return c->size;
592}
593EXPORT_SYMBOL(kmem_cache_size);
594
595const char *kmem_cache_name(struct kmem_cache *c)
596{
597 return c->name;
598}
599EXPORT_SYMBOL(kmem_cache_name);
600
601int kmem_cache_shrink(struct kmem_cache *d)
602{
603 return 0;
604}
605EXPORT_SYMBOL(kmem_cache_shrink);
606
607int kmem_ptr_validate(struct kmem_cache *a, const void *b)
608{
609 return 0;
610}
611
612static unsigned int slob_ready __read_mostly;
613
614int slab_is_available(void)
615{
616 return slob_ready;
617}
618
619void __init kmem_cache_init(void)
620{
621 slob_ready = 1;
622}
623