1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
14#include <linux/gfp.h>
15#include <linux/types.h>
16#include <linux/workqueue.h>
17
18
19
20
21
22
23#define SLAB_DEBUG_FREE 0x00000100UL
24#define SLAB_RED_ZONE 0x00000400UL
25#define SLAB_POISON 0x00000800UL
26#define SLAB_HWCACHE_ALIGN 0x00002000UL
27#define SLAB_CACHE_DMA 0x00004000UL
28#define SLAB_STORE_USER 0x00010000UL
29#define SLAB_PANIC 0x00040000UL
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define SLAB_DESTROY_BY_RCU 0x00080000UL
66#define SLAB_MEM_SPREAD 0x00100000UL
67#define SLAB_TRACE 0x00200000UL
68
69
70#ifdef CONFIG_DEBUG_OBJECTS
71# define SLAB_DEBUG_OBJECTS 0x00400000UL
72#else
73# define SLAB_DEBUG_OBJECTS 0x00000000UL
74#endif
75
76#define SLAB_NOLEAKTRACE 0x00800000UL
77
78
79#ifdef CONFIG_KMEMCHECK
80# define SLAB_NOTRACK 0x01000000UL
81#else
82# define SLAB_NOTRACK 0x00000000UL
83#endif
84#ifdef CONFIG_FAILSLAB
85# define SLAB_FAILSLAB 0x02000000UL
86#else
87# define SLAB_FAILSLAB 0x00000000UL
88#endif
89
90
91#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
92#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
93
94
95
96
97
98
99
100
101#define ZERO_SIZE_PTR ((void *)16)
102
103#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
104 (unsigned long)ZERO_SIZE_PTR)
105
106#include <linux/kmemleak.h>
107
108struct mem_cgroup;
109
110
111
112void __init kmem_cache_init(void);
113int slab_is_available(void);
114
115struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
116 unsigned long,
117 void (*)(void *));
118#ifdef CONFIG_MEMCG_KMEM
119void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *);
120#endif
121void kmem_cache_destroy(struct kmem_cache *);
122int kmem_cache_shrink(struct kmem_cache *);
123void kmem_cache_free(struct kmem_cache *, void *);
124
125
126
127
128
129
130
131
132
133#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
134 sizeof(struct __struct), __alignof__(struct __struct),\
135 (__flags), NULL)
136
137
138
139
140void * __must_check __krealloc(const void *, size_t, gfp_t);
141void * __must_check krealloc(const void *, size_t, gfp_t);
142void kfree(const void *);
143void kzfree(const void *);
144size_t ksize(const void *);
145
146
147
148
149
150
151#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
152#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
153#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
154#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
155#else
156#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
157#endif
158
159#ifdef CONFIG_SLOB
160
161
162
163
164
165
166
167
168
169
170
171struct kmem_cache {
172 unsigned int object_size;
173 unsigned int size;
174 unsigned int align;
175 unsigned long flags;
176 const char *name;
177 int refcount;
178 void (*ctor)(void *);
179 struct list_head list;
180};
181
182#endif
183
184
185
186
187
188#ifdef CONFIG_SLAB
189
190
191
192
193
194
195
196
197
198#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
199 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
200#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
201#ifndef KMALLOC_SHIFT_LOW
202#define KMALLOC_SHIFT_LOW 5
203#endif
204#endif
205
206#ifdef CONFIG_SLUB
207
208
209
210
211#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
212#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
213#ifndef KMALLOC_SHIFT_LOW
214#define KMALLOC_SHIFT_LOW 3
215#endif
216#endif
217
218#ifdef CONFIG_SLOB
219
220
221
222
223
224#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
225#define KMALLOC_SHIFT_MAX 30
226#ifndef KMALLOC_SHIFT_LOW
227#define KMALLOC_SHIFT_LOW 3
228#endif
229#endif
230
231
232#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
233
234#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
235
236#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
237
238
239
240
241#ifndef KMALLOC_MIN_SIZE
242#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
243#endif
244
245
246
247
248
249
250
251
252
253#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
254 (KMALLOC_MIN_SIZE) : 16)
255
256#ifndef CONFIG_SLOB
257extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
258#ifdef CONFIG_ZONE_DMA
259extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
260#endif
261
262
263
264
265
266
267
268
269
270static __always_inline int kmalloc_index(size_t size)
271{
272 if (!size)
273 return 0;
274
275 if (size <= KMALLOC_MIN_SIZE)
276 return KMALLOC_SHIFT_LOW;
277
278 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
279 return 1;
280 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
281 return 2;
282 if (size <= 8) return 3;
283 if (size <= 16) return 4;
284 if (size <= 32) return 5;
285 if (size <= 64) return 6;
286 if (size <= 128) return 7;
287 if (size <= 256) return 8;
288 if (size <= 512) return 9;
289 if (size <= 1024) return 10;
290 if (size <= 2 * 1024) return 11;
291 if (size <= 4 * 1024) return 12;
292 if (size <= 8 * 1024) return 13;
293 if (size <= 16 * 1024) return 14;
294 if (size <= 32 * 1024) return 15;
295 if (size <= 64 * 1024) return 16;
296 if (size <= 128 * 1024) return 17;
297 if (size <= 256 * 1024) return 18;
298 if (size <= 512 * 1024) return 19;
299 if (size <= 1024 * 1024) return 20;
300 if (size <= 2 * 1024 * 1024) return 21;
301 if (size <= 4 * 1024 * 1024) return 22;
302 if (size <= 8 * 1024 * 1024) return 23;
303 if (size <= 16 * 1024 * 1024) return 24;
304 if (size <= 32 * 1024 * 1024) return 25;
305 if (size <= 64 * 1024 * 1024) return 26;
306 BUG();
307
308
309 return -1;
310}
311#endif
312
313void *__kmalloc(size_t size, gfp_t flags);
314void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
315
316#ifdef CONFIG_NUMA
317void *__kmalloc_node(size_t size, gfp_t flags, int node);
318void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
319#else
320static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
321{
322 return __kmalloc(size, flags);
323}
324
325static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
326{
327 return kmem_cache_alloc(s, flags);
328}
329#endif
330
331#ifdef CONFIG_TRACING
332extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
333
334#ifdef CONFIG_NUMA
335extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
336 gfp_t gfpflags,
337 int node, size_t size);
338#else
339static __always_inline void *
340kmem_cache_alloc_node_trace(struct kmem_cache *s,
341 gfp_t gfpflags,
342 int node, size_t size)
343{
344 return kmem_cache_alloc_trace(s, gfpflags, size);
345}
346#endif
347
348#else
349static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
350 gfp_t flags, size_t size)
351{
352 return kmem_cache_alloc(s, flags);
353}
354
355static __always_inline void *
356kmem_cache_alloc_node_trace(struct kmem_cache *s,
357 gfp_t gfpflags,
358 int node, size_t size)
359{
360 return kmem_cache_alloc_node(s, gfpflags, node);
361}
362#endif
363
364#ifdef CONFIG_SLAB
365#include <linux/slab_def.h>
366#endif
367
368#ifdef CONFIG_SLUB
369#include <linux/slub_def.h>
370#endif
371
372static __always_inline void *
373kmalloc_order(size_t size, gfp_t flags, unsigned int order)
374{
375 void *ret;
376
377 flags |= (__GFP_COMP | __GFP_KMEMCG);
378 ret = (void *) __get_free_pages(flags, order);
379 kmemleak_alloc(ret, size, 1, flags);
380 return ret;
381}
382
383#ifdef CONFIG_TRACING
384extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
385#else
386static __always_inline void *
387kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
388{
389 return kmalloc_order(size, flags, order);
390}
391#endif
392
393static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
394{
395 unsigned int order = get_order(size);
396 return kmalloc_order_trace(size, flags, order);
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452static __always_inline void *kmalloc(size_t size, gfp_t flags)
453{
454 if (__builtin_constant_p(size)) {
455 if (size > KMALLOC_MAX_CACHE_SIZE)
456 return kmalloc_large(size, flags);
457#ifndef CONFIG_SLOB
458 if (!(flags & GFP_DMA)) {
459 int index = kmalloc_index(size);
460
461 if (!index)
462 return ZERO_SIZE_PTR;
463
464 return kmem_cache_alloc_trace(kmalloc_caches[index],
465 flags, size);
466 }
467#endif
468 }
469 return __kmalloc(size, flags);
470}
471
472
473
474
475
476
477static __always_inline int kmalloc_size(int n)
478{
479#ifndef CONFIG_SLOB
480 if (n > 2)
481 return 1 << n;
482
483 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
484 return 96;
485
486 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
487 return 192;
488#endif
489 return 0;
490}
491
492static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
493{
494#ifndef CONFIG_SLOB
495 if (__builtin_constant_p(size) &&
496 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
497 int i = kmalloc_index(size);
498
499 if (!i)
500 return ZERO_SIZE_PTR;
501
502 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
503 flags, node, size);
504 }
505#endif
506 return __kmalloc_node(size, flags, node);
507}
508
509
510
511
512
513
514#ifndef ARCH_SLAB_MINALIGN
515#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
516#endif
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541struct memcg_cache_params {
542 bool is_root_cache;
543 union {
544 struct {
545 struct rcu_head rcu_head;
546 struct kmem_cache *memcg_caches[0];
547 };
548 struct {
549 struct mem_cgroup *memcg;
550 struct list_head list;
551 struct kmem_cache *root_cache;
552 bool dead;
553 atomic_t nr_pages;
554 struct work_struct destroy;
555 };
556 };
557};
558
559int memcg_update_all_caches(int num_memcgs);
560
561struct seq_file;
562int cache_show(struct kmem_cache *s, struct seq_file *m);
563void print_slabinfo_header(struct seq_file *m);
564
565
566
567
568
569
570
571static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
572{
573 if (size != 0 && n > SIZE_MAX / size)
574 return NULL;
575 return __kmalloc(n * size, flags);
576}
577
578
579
580
581
582
583
584static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
585{
586 return kmalloc_array(n, size, flags | __GFP_ZERO);
587}
588
589
590
591
592
593
594
595
596
597#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
598 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
599 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
600extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
601#define kmalloc_track_caller(size, flags) \
602 __kmalloc_track_caller(size, flags, _RET_IP_)
603#else
604#define kmalloc_track_caller(size, flags) \
605 __kmalloc(size, flags)
606#endif
607
608#ifdef CONFIG_NUMA
609
610
611
612
613
614
615
616
617#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
618 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
619 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
620extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
621#define kmalloc_node_track_caller(size, flags, node) \
622 __kmalloc_node_track_caller(size, flags, node, \
623 _RET_IP_)
624#else
625#define kmalloc_node_track_caller(size, flags, node) \
626 __kmalloc_node(size, flags, node)
627#endif
628
629#else
630
631#define kmalloc_node_track_caller(size, flags, node) \
632 kmalloc_track_caller(size, flags)
633
634#endif
635
636
637
638
639static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
640{
641 return kmem_cache_alloc(k, flags | __GFP_ZERO);
642}
643
644
645
646
647
648
649static inline void *kzalloc(size_t size, gfp_t flags)
650{
651 return kmalloc(size, flags | __GFP_ZERO);
652}
653
654
655
656
657
658
659
660static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
661{
662 return kmalloc_node(size, flags | __GFP_ZERO, node);
663}
664
665
666
667
668static inline unsigned int kmem_cache_size(struct kmem_cache *s)
669{
670 return s->object_size;
671}
672
673void __init kmem_cache_init_late(void);
674
675#endif
676