1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
14#include <linux/gfp.h>
15#include <linux/types.h>
16#include <linux/workqueue.h>
17
18
19
20
21
22
23#define SLAB_CONSISTENCY_CHECKS 0x00000100UL
24#define SLAB_RED_ZONE 0x00000400UL
25#define SLAB_POISON 0x00000800UL
26#define SLAB_HWCACHE_ALIGN 0x00002000UL
27#define SLAB_CACHE_DMA 0x00004000UL
28#define SLAB_STORE_USER 0x00010000UL
29#define SLAB_PANIC 0x00040000UL
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65#define SLAB_DESTROY_BY_RCU 0x00080000UL
66#define SLAB_MEM_SPREAD 0x00100000UL
67#define SLAB_TRACE 0x00200000UL
68
69
70#ifdef CONFIG_DEBUG_OBJECTS
71# define SLAB_DEBUG_OBJECTS 0x00400000UL
72#else
73# define SLAB_DEBUG_OBJECTS 0x00000000UL
74#endif
75
76#define SLAB_NOLEAKTRACE 0x00800000UL
77
78
79#ifdef CONFIG_KMEMCHECK
80# define SLAB_NOTRACK 0x01000000UL
81#else
82# define SLAB_NOTRACK 0x00000000UL
83#endif
84#ifdef CONFIG_FAILSLAB
85# define SLAB_FAILSLAB 0x02000000UL
86#else
87# define SLAB_FAILSLAB 0x00000000UL
88#endif
89#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
90# define SLAB_ACCOUNT 0x04000000UL
91#else
92# define SLAB_ACCOUNT 0x00000000UL
93#endif
94
95#ifdef CONFIG_KASAN
96#define SLAB_KASAN 0x08000000UL
97#else
98#define SLAB_KASAN 0x00000000UL
99#endif
100
101
102#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
103#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
104
105
106
107
108
109
110
111
112#define ZERO_SIZE_PTR ((void *)16)
113
114#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
115 (unsigned long)ZERO_SIZE_PTR)
116
117#include <linux/kmemleak.h>
118#include <linux/kasan.h>
119
120struct mem_cgroup;
121
122
123
124void __init kmem_cache_init(void);
125bool slab_is_available(void);
126
127struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
128 unsigned long,
129 void (*)(void *));
130void kmem_cache_destroy(struct kmem_cache *);
131int kmem_cache_shrink(struct kmem_cache *);
132
133void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
134void memcg_deactivate_kmem_caches(struct mem_cgroup *);
135void memcg_destroy_kmem_caches(struct mem_cgroup *);
136
137
138
139
140
141
142
143
144
145#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
146 sizeof(struct __struct), __alignof__(struct __struct),\
147 (__flags), NULL)
148
149
150
151
152void * __must_check __krealloc(const void *, size_t, gfp_t);
153void * __must_check krealloc(const void *, size_t, gfp_t);
154void kfree(const void *);
155void kzfree(const void *);
156size_t ksize(const void *);
157
158#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
159const char *__check_heap_object(const void *ptr, unsigned long n,
160 struct page *page);
161#else
162static inline const char *__check_heap_object(const void *ptr,
163 unsigned long n,
164 struct page *page)
165{
166 return NULL;
167}
168#endif
169
170
171
172
173
174
175#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
176#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
177#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
178#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
179#else
180#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
181#endif
182
183
184
185
186
187
188#ifndef ARCH_SLAB_MINALIGN
189#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
190#endif
191
192
193
194
195
196
197#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
198#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
199#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
200
201
202
203
204
205#ifdef CONFIG_SLAB
206
207
208
209
210
211
212
213
214
215#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
216 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
217#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
218#ifndef KMALLOC_SHIFT_LOW
219#define KMALLOC_SHIFT_LOW 5
220#endif
221#endif
222
223#ifdef CONFIG_SLUB
224
225
226
227
228#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
229#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
230#ifndef KMALLOC_SHIFT_LOW
231#define KMALLOC_SHIFT_LOW 3
232#endif
233#endif
234
235#ifdef CONFIG_SLOB
236
237
238
239
240
241#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
242#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
243#ifndef KMALLOC_SHIFT_LOW
244#define KMALLOC_SHIFT_LOW 3
245#endif
246#endif
247
248
249#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
250
251#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
252
253#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
254
255
256
257
258#ifndef KMALLOC_MIN_SIZE
259#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
260#endif
261
262
263
264
265
266
267
268
269
270#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
271 (KMALLOC_MIN_SIZE) : 16)
272
273#ifndef CONFIG_SLOB
274extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
275#ifdef CONFIG_ZONE_DMA
276extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
277#endif
278
279
280
281
282
283
284
285
286
287static __always_inline int kmalloc_index(size_t size)
288{
289 if (!size)
290 return 0;
291
292 if (size <= KMALLOC_MIN_SIZE)
293 return KMALLOC_SHIFT_LOW;
294
295 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
296 return 1;
297 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
298 return 2;
299 if (size <= 8) return 3;
300 if (size <= 16) return 4;
301 if (size <= 32) return 5;
302 if (size <= 64) return 6;
303 if (size <= 128) return 7;
304 if (size <= 256) return 8;
305 if (size <= 512) return 9;
306 if (size <= 1024) return 10;
307 if (size <= 2 * 1024) return 11;
308 if (size <= 4 * 1024) return 12;
309 if (size <= 8 * 1024) return 13;
310 if (size <= 16 * 1024) return 14;
311 if (size <= 32 * 1024) return 15;
312 if (size <= 64 * 1024) return 16;
313 if (size <= 128 * 1024) return 17;
314 if (size <= 256 * 1024) return 18;
315 if (size <= 512 * 1024) return 19;
316 if (size <= 1024 * 1024) return 20;
317 if (size <= 2 * 1024 * 1024) return 21;
318 if (size <= 4 * 1024 * 1024) return 22;
319 if (size <= 8 * 1024 * 1024) return 23;
320 if (size <= 16 * 1024 * 1024) return 24;
321 if (size <= 32 * 1024 * 1024) return 25;
322 if (size <= 64 * 1024 * 1024) return 26;
323 BUG();
324
325
326 return -1;
327}
328#endif
329
330void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
331void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
332void kmem_cache_free(struct kmem_cache *, void *);
333
334
335
336
337
338
339
340
341void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
342int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
343
344
345
346
347
348static __always_inline void kfree_bulk(size_t size, void **p)
349{
350 kmem_cache_free_bulk(NULL, size, p);
351}
352
353#ifdef CONFIG_NUMA
354void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
355void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
356#else
357static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
358{
359 return __kmalloc(size, flags);
360}
361
362static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
363{
364 return kmem_cache_alloc(s, flags);
365}
366#endif
367
368#ifdef CONFIG_TRACING
369extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
370
371#ifdef CONFIG_NUMA
372extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
373 gfp_t gfpflags,
374 int node, size_t size) __assume_slab_alignment __malloc;
375#else
376static __always_inline void *
377kmem_cache_alloc_node_trace(struct kmem_cache *s,
378 gfp_t gfpflags,
379 int node, size_t size)
380{
381 return kmem_cache_alloc_trace(s, gfpflags, size);
382}
383#endif
384
385#else
386static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
387 gfp_t flags, size_t size)
388{
389 void *ret = kmem_cache_alloc(s, flags);
390
391 kasan_kmalloc(s, ret, size, flags);
392 return ret;
393}
394
395static __always_inline void *
396kmem_cache_alloc_node_trace(struct kmem_cache *s,
397 gfp_t gfpflags,
398 int node, size_t size)
399{
400 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
401
402 kasan_kmalloc(s, ret, size, gfpflags);
403 return ret;
404}
405#endif
406
407extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
408
409#ifdef CONFIG_TRACING
410extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
411#else
412static __always_inline void *
413kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
414{
415 return kmalloc_order(size, flags, order);
416}
417#endif
418
419static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
420{
421 unsigned int order = get_order(size);
422 return kmalloc_order_trace(size, flags, order);
423}
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478static __always_inline void *kmalloc(size_t size, gfp_t flags)
479{
480 if (__builtin_constant_p(size)) {
481 if (size > KMALLOC_MAX_CACHE_SIZE)
482 return kmalloc_large(size, flags);
483#ifndef CONFIG_SLOB
484 if (!(flags & GFP_DMA)) {
485 int index = kmalloc_index(size);
486
487 if (!index)
488 return ZERO_SIZE_PTR;
489
490 return kmem_cache_alloc_trace(kmalloc_caches[index],
491 flags, size);
492 }
493#endif
494 }
495 return __kmalloc(size, flags);
496}
497
498
499
500
501
502
503static __always_inline int kmalloc_size(int n)
504{
505#ifndef CONFIG_SLOB
506 if (n > 2)
507 return 1 << n;
508
509 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
510 return 96;
511
512 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
513 return 192;
514#endif
515 return 0;
516}
517
518static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
519{
520#ifndef CONFIG_SLOB
521 if (__builtin_constant_p(size) &&
522 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
523 int i = kmalloc_index(size);
524
525 if (!i)
526 return ZERO_SIZE_PTR;
527
528 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
529 flags, node, size);
530 }
531#endif
532 return __kmalloc_node(size, flags, node);
533}
534
535struct memcg_cache_array {
536 struct rcu_head rcu;
537 struct kmem_cache *entries[0];
538};
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573struct memcg_cache_params {
574 struct kmem_cache *root_cache;
575 union {
576 struct {
577 struct memcg_cache_array __rcu *memcg_caches;
578 struct list_head __root_caches_node;
579 struct list_head children;
580 };
581 struct {
582 struct mem_cgroup *memcg;
583 struct list_head children_node;
584 struct list_head kmem_caches_node;
585
586 void (*deact_fn)(struct kmem_cache *);
587 union {
588 struct rcu_head deact_rcu_head;
589 struct work_struct deact_work;
590 };
591 };
592 };
593};
594
595int memcg_update_all_caches(int num_memcgs);
596
597
598
599
600
601
602
603static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
604{
605 if (size != 0 && n > SIZE_MAX / size)
606 return NULL;
607 if (__builtin_constant_p(n) && __builtin_constant_p(size))
608 return kmalloc(n * size, flags);
609 return __kmalloc(n * size, flags);
610}
611
612
613
614
615
616
617
618static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
619{
620 return kmalloc_array(n, size, flags | __GFP_ZERO);
621}
622
623
624
625
626
627
628
629
630
631extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
632#define kmalloc_track_caller(size, flags) \
633 __kmalloc_track_caller(size, flags, _RET_IP_)
634
635#ifdef CONFIG_NUMA
636extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
637#define kmalloc_node_track_caller(size, flags, node) \
638 __kmalloc_node_track_caller(size, flags, node, \
639 _RET_IP_)
640
641#else
642
643#define kmalloc_node_track_caller(size, flags, node) \
644 kmalloc_track_caller(size, flags)
645
646#endif
647
648
649
650
651static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
652{
653 return kmem_cache_alloc(k, flags | __GFP_ZERO);
654}
655
656
657
658
659
660
661static inline void *kzalloc(size_t size, gfp_t flags)
662{
663 return kmalloc(size, flags | __GFP_ZERO);
664}
665
666
667
668
669
670
671
672static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
673{
674 return kmalloc_node(size, flags | __GFP_ZERO, node);
675}
676
677unsigned int kmem_cache_size(struct kmem_cache *s);
678void __init kmem_cache_init_late(void);
679
680#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
681int slab_prepare_cpu(unsigned int cpu);
682int slab_dead_cpu(unsigned int cpu);
683#else
684#define slab_prepare_cpu NULL
685#define slab_dead_cpu NULL
686#endif
687
688#endif
689