1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
14#include <linux/gfp.h>
15#include <linux/types.h>
16#include <linux/workqueue.h>
17
18
19
20
21
22
23#define SLAB_DEBUG_FREE 0x00000100UL
24#define SLAB_RED_ZONE 0x00000400UL
25#define SLAB_POISON 0x00000800UL
26#define SLAB_HWCACHE_ALIGN 0x00002000UL
27#define SLAB_CACHE_DMA 0x00004000UL
28#define SLAB_STORE_USER 0x00010000UL
29#define SLAB_PANIC 0x00040000UL
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define SLAB_DESTROY_BY_RCU 0x00080000UL
66#define SLAB_MEM_SPREAD 0x00100000UL
67#define SLAB_TRACE 0x00200000UL
68
69
70#ifdef CONFIG_DEBUG_OBJECTS
71# define SLAB_DEBUG_OBJECTS 0x00400000UL
72#else
73# define SLAB_DEBUG_OBJECTS 0x00000000UL
74#endif
75
76#define SLAB_NOLEAKTRACE 0x00800000UL
77
78
79#ifdef CONFIG_KMEMCHECK
80# define SLAB_NOTRACK 0x01000000UL
81#else
82# define SLAB_NOTRACK 0x00000000UL
83#endif
84#ifdef CONFIG_FAILSLAB
85# define SLAB_FAILSLAB 0x02000000UL
86#else
87# define SLAB_FAILSLAB 0x00000000UL
88#endif
89
90
91#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
92#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
93
94
95
96
97
98
99
100
101#define ZERO_SIZE_PTR ((void *)16)
102
103#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
104 (unsigned long)ZERO_SIZE_PTR)
105
106#include <linux/kmemleak.h>
107#include <linux/kasan.h>
108
109struct mem_cgroup;
110
111
112
113void __init kmem_cache_init(void);
114int slab_is_available(void);
115
116struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
117 unsigned long,
118 void (*)(void *));
119void kmem_cache_destroy(struct kmem_cache *);
120int kmem_cache_shrink(struct kmem_cache *);
121
122void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
123void memcg_deactivate_kmem_caches(struct mem_cgroup *);
124void memcg_destroy_kmem_caches(struct mem_cgroup *);
125
126
127
128
129
130
131
132
133
134#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
135 sizeof(struct __struct), __alignof__(struct __struct),\
136 (__flags), NULL)
137
138
139
140
141void * __must_check __krealloc(const void *, size_t, gfp_t);
142void * __must_check krealloc(const void *, size_t, gfp_t);
143void kfree(const void *);
144void kzfree(const void *);
145size_t ksize(const void *);
146
147
148
149
150
151
152#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
153#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
154#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
155#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
156#else
157#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
158#endif
159
160
161
162
163
164#ifdef CONFIG_SLAB
165
166
167
168
169
170
171
172
173
174#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
175 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
176#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
177#ifndef KMALLOC_SHIFT_LOW
178#define KMALLOC_SHIFT_LOW 5
179#endif
180#endif
181
182#ifdef CONFIG_SLUB
183
184
185
186
187#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
188#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
189#ifndef KMALLOC_SHIFT_LOW
190#define KMALLOC_SHIFT_LOW 3
191#endif
192#endif
193
194#ifdef CONFIG_SLOB
195
196
197
198
199
200#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
201#define KMALLOC_SHIFT_MAX 30
202#ifndef KMALLOC_SHIFT_LOW
203#define KMALLOC_SHIFT_LOW 3
204#endif
205#endif
206
207
208#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
209
210#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
211
212#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
213
214
215
216
217#ifndef KMALLOC_MIN_SIZE
218#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
219#endif
220
221
222
223
224
225
226
227
228
229#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
230 (KMALLOC_MIN_SIZE) : 16)
231
232#ifndef CONFIG_SLOB
233extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
234#ifdef CONFIG_ZONE_DMA
235extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
236#endif
237
238
239
240
241
242
243
244
245
246static __always_inline int kmalloc_index(size_t size)
247{
248 if (!size)
249 return 0;
250
251 if (size <= KMALLOC_MIN_SIZE)
252 return KMALLOC_SHIFT_LOW;
253
254 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
255 return 1;
256 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
257 return 2;
258 if (size <= 8) return 3;
259 if (size <= 16) return 4;
260 if (size <= 32) return 5;
261 if (size <= 64) return 6;
262 if (size <= 128) return 7;
263 if (size <= 256) return 8;
264 if (size <= 512) return 9;
265 if (size <= 1024) return 10;
266 if (size <= 2 * 1024) return 11;
267 if (size <= 4 * 1024) return 12;
268 if (size <= 8 * 1024) return 13;
269 if (size <= 16 * 1024) return 14;
270 if (size <= 32 * 1024) return 15;
271 if (size <= 64 * 1024) return 16;
272 if (size <= 128 * 1024) return 17;
273 if (size <= 256 * 1024) return 18;
274 if (size <= 512 * 1024) return 19;
275 if (size <= 1024 * 1024) return 20;
276 if (size <= 2 * 1024 * 1024) return 21;
277 if (size <= 4 * 1024 * 1024) return 22;
278 if (size <= 8 * 1024 * 1024) return 23;
279 if (size <= 16 * 1024 * 1024) return 24;
280 if (size <= 32 * 1024 * 1024) return 25;
281 if (size <= 64 * 1024 * 1024) return 26;
282 BUG();
283
284
285 return -1;
286}
287#endif
288
289void *__kmalloc(size_t size, gfp_t flags);
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
291void kmem_cache_free(struct kmem_cache *, void *);
292
293#ifdef CONFIG_NUMA
294void *__kmalloc_node(size_t size, gfp_t flags, int node);
295void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
296#else
297static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
298{
299 return __kmalloc(size, flags);
300}
301
302static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
303{
304 return kmem_cache_alloc(s, flags);
305}
306#endif
307
308#ifdef CONFIG_TRACING
309extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
310
311#ifdef CONFIG_NUMA
312extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
313 gfp_t gfpflags,
314 int node, size_t size);
315#else
316static __always_inline void *
317kmem_cache_alloc_node_trace(struct kmem_cache *s,
318 gfp_t gfpflags,
319 int node, size_t size)
320{
321 return kmem_cache_alloc_trace(s, gfpflags, size);
322}
323#endif
324
325#else
326static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
327 gfp_t flags, size_t size)
328{
329 void *ret = kmem_cache_alloc(s, flags);
330
331 kasan_kmalloc(s, ret, size);
332 return ret;
333}
334
335static __always_inline void *
336kmem_cache_alloc_node_trace(struct kmem_cache *s,
337 gfp_t gfpflags,
338 int node, size_t size)
339{
340 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
341
342 kasan_kmalloc(s, ret, size);
343 return ret;
344}
345#endif
346
347extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
348
349#ifdef CONFIG_TRACING
350extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
351#else
352static __always_inline void *
353kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
354{
355 return kmalloc_order(size, flags, order);
356}
357#endif
358
359static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
360{
361 unsigned int order = get_order(size);
362 return kmalloc_order_trace(size, flags, order);
363}
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418static __always_inline void *kmalloc(size_t size, gfp_t flags)
419{
420 if (__builtin_constant_p(size)) {
421 if (size > KMALLOC_MAX_CACHE_SIZE)
422 return kmalloc_large(size, flags);
423#ifndef CONFIG_SLOB
424 if (!(flags & GFP_DMA)) {
425 int index = kmalloc_index(size);
426
427 if (!index)
428 return ZERO_SIZE_PTR;
429
430 return kmem_cache_alloc_trace(kmalloc_caches[index],
431 flags, size);
432 }
433#endif
434 }
435 return __kmalloc(size, flags);
436}
437
438
439
440
441
442
443static __always_inline int kmalloc_size(int n)
444{
445#ifndef CONFIG_SLOB
446 if (n > 2)
447 return 1 << n;
448
449 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
450 return 96;
451
452 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
453 return 192;
454#endif
455 return 0;
456}
457
458static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
459{
460#ifndef CONFIG_SLOB
461 if (__builtin_constant_p(size) &&
462 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
463 int i = kmalloc_index(size);
464
465 if (!i)
466 return ZERO_SIZE_PTR;
467
468 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
469 flags, node, size);
470 }
471#endif
472 return __kmalloc_node(size, flags, node);
473}
474
475
476
477
478
479
480#ifndef ARCH_SLAB_MINALIGN
481#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
482#endif
483
484struct memcg_cache_array {
485 struct rcu_head rcu;
486 struct kmem_cache *entries[0];
487};
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505struct memcg_cache_params {
506 bool is_root_cache;
507 struct list_head list;
508 union {
509 struct memcg_cache_array __rcu *memcg_caches;
510 struct {
511 struct mem_cgroup *memcg;
512 struct kmem_cache *root_cache;
513 };
514 };
515};
516
517int memcg_update_all_caches(int num_memcgs);
518
519
520
521
522
523
524
525static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
526{
527 if (size != 0 && n > SIZE_MAX / size)
528 return NULL;
529 return __kmalloc(n * size, flags);
530}
531
532
533
534
535
536
537
538static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
539{
540 return kmalloc_array(n, size, flags | __GFP_ZERO);
541}
542
543
544
545
546
547
548
549
550
551extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
552#define kmalloc_track_caller(size, flags) \
553 __kmalloc_track_caller(size, flags, _RET_IP_)
554
555#ifdef CONFIG_NUMA
556extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
557#define kmalloc_node_track_caller(size, flags, node) \
558 __kmalloc_node_track_caller(size, flags, node, \
559 _RET_IP_)
560
561#else
562
563#define kmalloc_node_track_caller(size, flags, node) \
564 kmalloc_track_caller(size, flags)
565
566#endif
567
568
569
570
571static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
572{
573 return kmem_cache_alloc(k, flags | __GFP_ZERO);
574}
575
576
577
578
579
580
581static inline void *kzalloc(size_t size, gfp_t flags)
582{
583 return kmalloc(size, flags | __GFP_ZERO);
584}
585
586
587
588
589
590
591
592static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
593{
594 return kmalloc_node(size, flags | __GFP_ZERO, node);
595}
596
597unsigned int kmem_cache_size(struct kmem_cache *s);
598void __init kmem_cache_init_late(void);
599
600#endif
601