1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
14#include <linux/gfp.h>
15#include <linux/types.h>
16#include <linux/workqueue.h>
17
18
19
20
21
22
23#define SLAB_DEBUG_FREE 0x00000100UL
24#define SLAB_RED_ZONE 0x00000400UL
25#define SLAB_POISON 0x00000800UL
26#define SLAB_HWCACHE_ALIGN 0x00002000UL
27#define SLAB_CACHE_DMA 0x00004000UL
28#define SLAB_STORE_USER 0x00010000UL
29#define SLAB_PANIC 0x00040000UL
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define SLAB_DESTROY_BY_RCU 0x00080000UL
66#define SLAB_MEM_SPREAD 0x00100000UL
67#define SLAB_TRACE 0x00200000UL
68
69
70#ifdef CONFIG_DEBUG_OBJECTS
71# define SLAB_DEBUG_OBJECTS 0x00400000UL
72#else
73# define SLAB_DEBUG_OBJECTS 0x00000000UL
74#endif
75
76#define SLAB_NOLEAKTRACE 0x00800000UL
77
78
79#ifdef CONFIG_KMEMCHECK
80# define SLAB_NOTRACK 0x01000000UL
81#else
82# define SLAB_NOTRACK 0x00000000UL
83#endif
84#ifdef CONFIG_FAILSLAB
85# define SLAB_FAILSLAB 0x02000000UL
86#else
87# define SLAB_FAILSLAB 0x00000000UL
88#endif
89
90
91#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
92#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
93
94
95
96
97
98
99
100
101#define ZERO_SIZE_PTR ((void *)16)
102
103#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
104 (unsigned long)ZERO_SIZE_PTR)
105
106#include <linux/kmemleak.h>
107#include <linux/kasan.h>
108
109struct mem_cgroup;
110
111
112
113void __init kmem_cache_init(void);
114int slab_is_available(void);
115
116struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
117 unsigned long,
118 void (*)(void *));
119void kmem_cache_destroy(struct kmem_cache *);
120int kmem_cache_shrink(struct kmem_cache *);
121
122void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
123void memcg_deactivate_kmem_caches(struct mem_cgroup *);
124void memcg_destroy_kmem_caches(struct mem_cgroup *);
125
126
127
128
129
130
131
132
133
134#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
135 sizeof(struct __struct), __alignof__(struct __struct),\
136 (__flags), NULL)
137
138
139
140
141void * __must_check __krealloc(const void *, size_t, gfp_t);
142void * __must_check krealloc(const void *, size_t, gfp_t);
143void kfree(const void *);
144void kzfree(const void *);
145size_t ksize(const void *);
146
147
148
149
150
151
152#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
153#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
154#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
155#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
156#else
157#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
158#endif
159
160
161
162
163
164#ifdef CONFIG_SLAB
165
166
167
168
169
170
171
172
173
174#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
175 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
176#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
177#ifndef KMALLOC_SHIFT_LOW
178#define KMALLOC_SHIFT_LOW 5
179#endif
180#endif
181
182#ifdef CONFIG_SLUB
183
184
185
186
187#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
188#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
189#ifndef KMALLOC_SHIFT_LOW
190#define KMALLOC_SHIFT_LOW 3
191#endif
192#endif
193
194#ifdef CONFIG_SLOB
195
196
197
198
199
200#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
201#define KMALLOC_SHIFT_MAX 30
202#ifndef KMALLOC_SHIFT_LOW
203#define KMALLOC_SHIFT_LOW 3
204#endif
205#endif
206
207
208#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
209
210#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
211
212#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
213
214
215
216
217#ifndef KMALLOC_MIN_SIZE
218#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
219#endif
220
221
222
223
224
225
226
227
228
229#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
230 (KMALLOC_MIN_SIZE) : 16)
231
232#ifndef CONFIG_SLOB
233extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
234#ifdef CONFIG_ZONE_DMA
235extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
236#endif
237
238
239
240
241
242
243
244
245
246static __always_inline int kmalloc_index(size_t size)
247{
248 if (!size)
249 return 0;
250
251 if (size <= KMALLOC_MIN_SIZE)
252 return KMALLOC_SHIFT_LOW;
253
254 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
255 return 1;
256 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
257 return 2;
258 if (size <= 8) return 3;
259 if (size <= 16) return 4;
260 if (size <= 32) return 5;
261 if (size <= 64) return 6;
262 if (size <= 128) return 7;
263 if (size <= 256) return 8;
264 if (size <= 512) return 9;
265 if (size <= 1024) return 10;
266 if (size <= 2 * 1024) return 11;
267 if (size <= 4 * 1024) return 12;
268 if (size <= 8 * 1024) return 13;
269 if (size <= 16 * 1024) return 14;
270 if (size <= 32 * 1024) return 15;
271 if (size <= 64 * 1024) return 16;
272 if (size <= 128 * 1024) return 17;
273 if (size <= 256 * 1024) return 18;
274 if (size <= 512 * 1024) return 19;
275 if (size <= 1024 * 1024) return 20;
276 if (size <= 2 * 1024 * 1024) return 21;
277 if (size <= 4 * 1024 * 1024) return 22;
278 if (size <= 8 * 1024 * 1024) return 23;
279 if (size <= 16 * 1024 * 1024) return 24;
280 if (size <= 32 * 1024 * 1024) return 25;
281 if (size <= 64 * 1024 * 1024) return 26;
282 BUG();
283
284
285 return -1;
286}
287#endif
288
289void *__kmalloc(size_t size, gfp_t flags);
290void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
291void kmem_cache_free(struct kmem_cache *, void *);
292
293
294
295
296
297
298
299
300void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
301bool kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
302
303#ifdef CONFIG_NUMA
304void *__kmalloc_node(size_t size, gfp_t flags, int node);
305void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
306#else
307static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
308{
309 return __kmalloc(size, flags);
310}
311
312static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
313{
314 return kmem_cache_alloc(s, flags);
315}
316#endif
317
318#ifdef CONFIG_TRACING
319extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
320
321#ifdef CONFIG_NUMA
322extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
323 gfp_t gfpflags,
324 int node, size_t size);
325#else
326static __always_inline void *
327kmem_cache_alloc_node_trace(struct kmem_cache *s,
328 gfp_t gfpflags,
329 int node, size_t size)
330{
331 return kmem_cache_alloc_trace(s, gfpflags, size);
332}
333#endif
334
335#else
336static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
337 gfp_t flags, size_t size)
338{
339 void *ret = kmem_cache_alloc(s, flags);
340
341 kasan_kmalloc(s, ret, size);
342 return ret;
343}
344
345static __always_inline void *
346kmem_cache_alloc_node_trace(struct kmem_cache *s,
347 gfp_t gfpflags,
348 int node, size_t size)
349{
350 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
351
352 kasan_kmalloc(s, ret, size);
353 return ret;
354}
355#endif
356
357extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
358
359#ifdef CONFIG_TRACING
360extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
361#else
362static __always_inline void *
363kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
364{
365 return kmalloc_order(size, flags, order);
366}
367#endif
368
369static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
370{
371 unsigned int order = get_order(size);
372 return kmalloc_order_trace(size, flags, order);
373}
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428static __always_inline void *kmalloc(size_t size, gfp_t flags)
429{
430 if (__builtin_constant_p(size)) {
431 if (size > KMALLOC_MAX_CACHE_SIZE)
432 return kmalloc_large(size, flags);
433#ifndef CONFIG_SLOB
434 if (!(flags & GFP_DMA)) {
435 int index = kmalloc_index(size);
436
437 if (!index)
438 return ZERO_SIZE_PTR;
439
440 return kmem_cache_alloc_trace(kmalloc_caches[index],
441 flags, size);
442 }
443#endif
444 }
445 return __kmalloc(size, flags);
446}
447
448
449
450
451
452
453static __always_inline int kmalloc_size(int n)
454{
455#ifndef CONFIG_SLOB
456 if (n > 2)
457 return 1 << n;
458
459 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
460 return 96;
461
462 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
463 return 192;
464#endif
465 return 0;
466}
467
468static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
469{
470#ifndef CONFIG_SLOB
471 if (__builtin_constant_p(size) &&
472 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
473 int i = kmalloc_index(size);
474
475 if (!i)
476 return ZERO_SIZE_PTR;
477
478 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
479 flags, node, size);
480 }
481#endif
482 return __kmalloc_node(size, flags, node);
483}
484
485
486
487
488
489
490#ifndef ARCH_SLAB_MINALIGN
491#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
492#endif
493
494struct memcg_cache_array {
495 struct rcu_head rcu;
496 struct kmem_cache *entries[0];
497};
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515struct memcg_cache_params {
516 bool is_root_cache;
517 struct list_head list;
518 union {
519 struct memcg_cache_array __rcu *memcg_caches;
520 struct {
521 struct mem_cgroup *memcg;
522 struct kmem_cache *root_cache;
523 };
524 };
525};
526
527int memcg_update_all_caches(int num_memcgs);
528
529
530
531
532
533
534
535static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
536{
537 if (size != 0 && n > SIZE_MAX / size)
538 return NULL;
539 return __kmalloc(n * size, flags);
540}
541
542
543
544
545
546
547
548static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
549{
550 return kmalloc_array(n, size, flags | __GFP_ZERO);
551}
552
553
554
555
556
557
558
559
560
561extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
562#define kmalloc_track_caller(size, flags) \
563 __kmalloc_track_caller(size, flags, _RET_IP_)
564
565#ifdef CONFIG_NUMA
566extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
567#define kmalloc_node_track_caller(size, flags, node) \
568 __kmalloc_node_track_caller(size, flags, node, \
569 _RET_IP_)
570
571#else
572
573#define kmalloc_node_track_caller(size, flags, node) \
574 kmalloc_track_caller(size, flags)
575
576#endif
577
578
579
580
581static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
582{
583 return kmem_cache_alloc(k, flags | __GFP_ZERO);
584}
585
586
587
588
589
590
591static inline void *kzalloc(size_t size, gfp_t flags)
592{
593 return kmalloc(size, flags | __GFP_ZERO);
594}
595
596
597
598
599
600
601
602static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
603{
604 return kmalloc_node(size, flags | __GFP_ZERO, node);
605}
606
607unsigned int kmem_cache_size(struct kmem_cache *s);
608void __init kmem_cache_init_late(void);
609
610#endif
611