1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
14#include <linux/gfp.h>
15#include <linux/types.h>
16#include <linux/workqueue.h>
17
18
19
20
21
22
23#define SLAB_CONSISTENCY_CHECKS 0x00000100UL
24#define SLAB_RED_ZONE 0x00000400UL
25#define SLAB_POISON 0x00000800UL
26#define SLAB_HWCACHE_ALIGN 0x00002000UL
27#define SLAB_CACHE_DMA 0x00004000UL
28#define SLAB_STORE_USER 0x00010000UL
29#define SLAB_PANIC 0x00040000UL
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define SLAB_DESTROY_BY_RCU 0x00080000UL
66#define SLAB_MEM_SPREAD 0x00100000UL
67#define SLAB_TRACE 0x00200000UL
68
69
70#ifdef CONFIG_DEBUG_OBJECTS
71# define SLAB_DEBUG_OBJECTS 0x00400000UL
72#else
73# define SLAB_DEBUG_OBJECTS 0x00000000UL
74#endif
75
76#define SLAB_NOLEAKTRACE 0x00800000UL
77
78
79#ifdef CONFIG_KMEMCHECK
80# define SLAB_NOTRACK 0x01000000UL
81#else
82# define SLAB_NOTRACK 0x00000000UL
83#endif
84#ifdef CONFIG_FAILSLAB
85# define SLAB_FAILSLAB 0x02000000UL
86#else
87# define SLAB_FAILSLAB 0x00000000UL
88#endif
89#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
90# define SLAB_ACCOUNT 0x04000000UL
91#else
92# define SLAB_ACCOUNT 0x00000000UL
93#endif
94
95#ifdef CONFIG_KASAN
96#define SLAB_KASAN 0x08000000UL
97#else
98#define SLAB_KASAN 0x00000000UL
99#endif
100
101
102#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
103#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
104
105
106
107
108
109
110
111
112#define ZERO_SIZE_PTR ((void *)16)
113
114#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
115 (unsigned long)ZERO_SIZE_PTR)
116
117#include <linux/kmemleak.h>
118#include <linux/kasan.h>
119
120struct mem_cgroup;
121
122
123
124void __init kmem_cache_init(void);
125bool slab_is_available(void);
126
127struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128 unsigned long,
129 void (*)(void *));
130void kmem_cache_destroy(struct kmem_cache *);
131int kmem_cache_shrink(struct kmem_cache *);
132
133void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
134void memcg_deactivate_kmem_caches(struct mem_cgroup *);
135void memcg_destroy_kmem_caches(struct mem_cgroup *);
136
137
138
139
140
141
142
143
144
145#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
146 sizeof(struct __struct), __alignof__(struct __struct),\
147 (__flags), NULL)
148
149
150
151
152void * __must_check __krealloc(const void *, size_t, gfp_t);
153void * __must_check krealloc(const void *, size_t, gfp_t);
154void kfree(const void *);
155void kzfree(const void *);
156size_t ksize(const void *);
157
158
159
160
161
162
163#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
164#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
165#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
166#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
167#else
168#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
169#endif
170
171
172
173
174
175
176#ifndef ARCH_SLAB_MINALIGN
177#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
178#endif
179
180
181
182
183
184
185#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
186#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
187#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
188
189
190
191
192
193#ifdef CONFIG_SLAB
194
195
196
197
198
199
200
201
202
203#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
204 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
205#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
206#ifndef KMALLOC_SHIFT_LOW
207#define KMALLOC_SHIFT_LOW 5
208#endif
209#endif
210
211#ifdef CONFIG_SLUB
212
213
214
215
216#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
217#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
218#ifndef KMALLOC_SHIFT_LOW
219#define KMALLOC_SHIFT_LOW 3
220#endif
221#endif
222
223#ifdef CONFIG_SLOB
224
225
226
227
228
229#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
230#define KMALLOC_SHIFT_MAX 30
231#ifndef KMALLOC_SHIFT_LOW
232#define KMALLOC_SHIFT_LOW 3
233#endif
234#endif
235
236
237#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
238
239#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
240
241#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
242
243
244
245
246#ifndef KMALLOC_MIN_SIZE
247#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
248#endif
249
250
251
252
253
254
255
256
257
258#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
259 (KMALLOC_MIN_SIZE) : 16)
260
261#ifndef CONFIG_SLOB
262extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
263#ifdef CONFIG_ZONE_DMA
264extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
265#endif
266
267
268
269
270
271
272
273
274
275static __always_inline int kmalloc_index(size_t size)
276{
277 if (!size)
278 return 0;
279
280 if (size <= KMALLOC_MIN_SIZE)
281 return KMALLOC_SHIFT_LOW;
282
283 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
284 return 1;
285 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
286 return 2;
287 if (size <= 8) return 3;
288 if (size <= 16) return 4;
289 if (size <= 32) return 5;
290 if (size <= 64) return 6;
291 if (size <= 128) return 7;
292 if (size <= 256) return 8;
293 if (size <= 512) return 9;
294 if (size <= 1024) return 10;
295 if (size <= 2 * 1024) return 11;
296 if (size <= 4 * 1024) return 12;
297 if (size <= 8 * 1024) return 13;
298 if (size <= 16 * 1024) return 14;
299 if (size <= 32 * 1024) return 15;
300 if (size <= 64 * 1024) return 16;
301 if (size <= 128 * 1024) return 17;
302 if (size <= 256 * 1024) return 18;
303 if (size <= 512 * 1024) return 19;
304 if (size <= 1024 * 1024) return 20;
305 if (size <= 2 * 1024 * 1024) return 21;
306 if (size <= 4 * 1024 * 1024) return 22;
307 if (size <= 8 * 1024 * 1024) return 23;
308 if (size <= 16 * 1024 * 1024) return 24;
309 if (size <= 32 * 1024 * 1024) return 25;
310 if (size <= 64 * 1024 * 1024) return 26;
311 BUG();
312
313
314 return -1;
315}
316#endif
317
318void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
319void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
320void kmem_cache_free(struct kmem_cache *, void *);
321
322
323
324
325
326
327
328
329void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
330int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
331
332
333
334
335
336static __always_inline void kfree_bulk(size_t size, void **p)
337{
338 kmem_cache_free_bulk(NULL, size, p);
339}
340
341#ifdef CONFIG_NUMA
342void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
343void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
344#else
345static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
346{
347 return __kmalloc(size, flags);
348}
349
350static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
351{
352 return kmem_cache_alloc(s, flags);
353}
354#endif
355
356#ifdef CONFIG_TRACING
357extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
358
359#ifdef CONFIG_NUMA
360extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
361 gfp_t gfpflags,
362 int node, size_t size) __assume_slab_alignment;
363#else
364static __always_inline void *
365kmem_cache_alloc_node_trace(struct kmem_cache *s,
366 gfp_t gfpflags,
367 int node, size_t size)
368{
369 return kmem_cache_alloc_trace(s, gfpflags, size);
370}
371#endif
372
373#else
374static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
375 gfp_t flags, size_t size)
376{
377 void *ret = kmem_cache_alloc(s, flags);
378
379 kasan_kmalloc(s, ret, size, flags);
380 return ret;
381}
382
383static __always_inline void *
384kmem_cache_alloc_node_trace(struct kmem_cache *s,
385 gfp_t gfpflags,
386 int node, size_t size)
387{
388 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
389
390 kasan_kmalloc(s, ret, size, gfpflags);
391 return ret;
392}
393#endif
394
395extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
396
397#ifdef CONFIG_TRACING
398extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
399#else
400static __always_inline void *
401kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
402{
403 return kmalloc_order(size, flags, order);
404}
405#endif
406
407static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
408{
409 unsigned int order = get_order(size);
410 return kmalloc_order_trace(size, flags, order);
411}
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466static __always_inline void *kmalloc(size_t size, gfp_t flags)
467{
468 if (__builtin_constant_p(size)) {
469 if (size > KMALLOC_MAX_CACHE_SIZE)
470 return kmalloc_large(size, flags);
471#ifndef CONFIG_SLOB
472 if (!(flags & GFP_DMA)) {
473 int index = kmalloc_index(size);
474
475 if (!index)
476 return ZERO_SIZE_PTR;
477
478 return kmem_cache_alloc_trace(kmalloc_caches[index],
479 flags, size);
480 }
481#endif
482 }
483 return __kmalloc(size, flags);
484}
485
486
487
488
489
490
491static __always_inline int kmalloc_size(int n)
492{
493#ifndef CONFIG_SLOB
494 if (n > 2)
495 return 1 << n;
496
497 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
498 return 96;
499
500 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
501 return 192;
502#endif
503 return 0;
504}
505
506static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
507{
508#ifndef CONFIG_SLOB
509 if (__builtin_constant_p(size) &&
510 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
511 int i = kmalloc_index(size);
512
513 if (!i)
514 return ZERO_SIZE_PTR;
515
516 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
517 flags, node, size);
518 }
519#endif
520 return __kmalloc_node(size, flags, node);
521}
522
523struct memcg_cache_array {
524 struct rcu_head rcu;
525 struct kmem_cache *entries[0];
526};
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544struct memcg_cache_params {
545 bool is_root_cache;
546 struct list_head list;
547 union {
548 struct memcg_cache_array __rcu *memcg_caches;
549 struct {
550 struct mem_cgroup *memcg;
551 struct kmem_cache *root_cache;
552 };
553 };
554};
555
556int memcg_update_all_caches(int num_memcgs);
557
558
559
560
561
562
563
564static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
565{
566 if (size != 0 && n > SIZE_MAX / size)
567 return NULL;
568 return __kmalloc(n * size, flags);
569}
570
571
572
573
574
575
576
577static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
578{
579 return kmalloc_array(n, size, flags | __GFP_ZERO);
580}
581
582
583
584
585
586
587
588
589
590extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
591#define kmalloc_track_caller(size, flags) \
592 __kmalloc_track_caller(size, flags, _RET_IP_)
593
594#ifdef CONFIG_NUMA
595extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
596#define kmalloc_node_track_caller(size, flags, node) \
597 __kmalloc_node_track_caller(size, flags, node, \
598 _RET_IP_)
599
600#else
601
602#define kmalloc_node_track_caller(size, flags, node) \
603 kmalloc_track_caller(size, flags)
604
605#endif
606
607
608
609
610static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
611{
612 return kmem_cache_alloc(k, flags | __GFP_ZERO);
613}
614
615
616
617
618
619
620static inline void *kzalloc(size_t size, gfp_t flags)
621{
622 return kmalloc(size, flags | __GFP_ZERO);
623}
624
625
626
627
628
629
630
631static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
632{
633 return kmalloc_node(size, flags | __GFP_ZERO, node);
634}
635
636unsigned int kmem_cache_size(struct kmem_cache *s);
637void __init kmem_cache_init_late(void);
638
639#endif
640