1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19#include <linux/percpu-refcount.h>
20
21
22
23
24
25
26
27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28
29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30
31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32
33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34
35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38
39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40
41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81
82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83
84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85
86
87#ifdef CONFIG_DEBUG_OBJECTS
88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89#else
90# define SLAB_DEBUG_OBJECTS 0
91#endif
92
93
94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95
96
97#ifdef CONFIG_FAILSLAB
98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99#else
100# define SLAB_FAILSLAB 0
101#endif
102
103#ifdef CONFIG_MEMCG_KMEM
104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105#else
106# define SLAB_ACCOUNT 0
107#endif
108
109#ifdef CONFIG_KASAN
110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111#else
112#define SLAB_KASAN 0
113#endif
114
115
116
117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
119
120
121#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
122
123
124
125
126
127
128
129
130
131#define ZERO_SIZE_PTR ((void *)16)
132
133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
134 (unsigned long)ZERO_SIZE_PTR)
135
136#include <linux/kasan.h>
137
138struct mem_cgroup;
139
140
141
142void __init kmem_cache_init(void);
143bool slab_is_available(void);
144
145extern bool usercopy_fallback;
146
147struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
148 unsigned int align, slab_flags_t flags,
149 void (*ctor)(void *));
150struct kmem_cache *kmem_cache_create_usercopy(const char *name,
151 unsigned int size, unsigned int align,
152 slab_flags_t flags,
153 unsigned int useroffset, unsigned int usersize,
154 void (*ctor)(void *));
155void kmem_cache_destroy(struct kmem_cache *);
156int kmem_cache_shrink(struct kmem_cache *);
157
158void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
159void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
160
161
162
163
164
165
166
167
168
169#define KMEM_CACHE(__struct, __flags) \
170 kmem_cache_create(#__struct, sizeof(struct __struct), \
171 __alignof__(struct __struct), (__flags), NULL)
172
173
174
175
176
177#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
178 kmem_cache_create_usercopy(#__struct, \
179 sizeof(struct __struct), \
180 __alignof__(struct __struct), (__flags), \
181 offsetof(struct __struct, __field), \
182 sizeof_field(struct __struct, __field), NULL)
183
184
185
186
187void * __must_check __krealloc(const void *, size_t, gfp_t);
188void * __must_check krealloc(const void *, size_t, gfp_t);
189void kfree(const void *);
190void kzfree(const void *);
191size_t __ksize(const void *);
192size_t ksize(const void *);
193
194#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
195void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
196 bool to_user);
197#else
198static inline void __check_heap_object(const void *ptr, unsigned long n,
199 struct page *page, bool to_user) { }
200#endif
201
202
203
204
205
206
207#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
208#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
209#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
210#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
211#else
212#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
213#endif
214
215
216
217
218
219
220#ifndef ARCH_SLAB_MINALIGN
221#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
222#endif
223
224
225
226
227
228
229#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
230#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
231#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
232
233
234
235
236
237#ifdef CONFIG_SLAB
238
239
240
241
242
243
244
245
246
247#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
248 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
249#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
250#ifndef KMALLOC_SHIFT_LOW
251#define KMALLOC_SHIFT_LOW 5
252#endif
253#endif
254
255#ifdef CONFIG_SLUB
256
257
258
259
260#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
261#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
262#ifndef KMALLOC_SHIFT_LOW
263#define KMALLOC_SHIFT_LOW 3
264#endif
265#endif
266
267#ifdef CONFIG_SLOB
268
269
270
271
272
273#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
274#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
275#ifndef KMALLOC_SHIFT_LOW
276#define KMALLOC_SHIFT_LOW 3
277#endif
278#endif
279
280
281#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
282
283#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
284
285#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
286
287
288
289
290#ifndef KMALLOC_MIN_SIZE
291#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
292#endif
293
294
295
296
297
298
299
300
301
302#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
303 (KMALLOC_MIN_SIZE) : 16)
304
305
306
307
308
309enum kmalloc_cache_type {
310 KMALLOC_NORMAL = 0,
311 KMALLOC_RECLAIM,
312#ifdef CONFIG_ZONE_DMA
313 KMALLOC_DMA,
314#endif
315 NR_KMALLOC_TYPES
316};
317
318#ifndef CONFIG_SLOB
319extern struct kmem_cache *
320kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
321
322static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
323{
324#ifdef CONFIG_ZONE_DMA
325
326
327
328
329 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
330 return KMALLOC_NORMAL;
331
332
333
334
335
336 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
337#else
338 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
339#endif
340}
341
342
343
344
345
346
347
348
349
350static __always_inline unsigned int kmalloc_index(size_t size)
351{
352 if (!size)
353 return 0;
354
355 if (size <= KMALLOC_MIN_SIZE)
356 return KMALLOC_SHIFT_LOW;
357
358 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
359 return 1;
360 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
361 return 2;
362 if (size <= 8) return 3;
363 if (size <= 16) return 4;
364 if (size <= 32) return 5;
365 if (size <= 64) return 6;
366 if (size <= 128) return 7;
367 if (size <= 256) return 8;
368 if (size <= 512) return 9;
369 if (size <= 1024) return 10;
370 if (size <= 2 * 1024) return 11;
371 if (size <= 4 * 1024) return 12;
372 if (size <= 8 * 1024) return 13;
373 if (size <= 16 * 1024) return 14;
374 if (size <= 32 * 1024) return 15;
375 if (size <= 64 * 1024) return 16;
376 if (size <= 128 * 1024) return 17;
377 if (size <= 256 * 1024) return 18;
378 if (size <= 512 * 1024) return 19;
379 if (size <= 1024 * 1024) return 20;
380 if (size <= 2 * 1024 * 1024) return 21;
381 if (size <= 4 * 1024 * 1024) return 22;
382 if (size <= 8 * 1024 * 1024) return 23;
383 if (size <= 16 * 1024 * 1024) return 24;
384 if (size <= 32 * 1024 * 1024) return 25;
385 if (size <= 64 * 1024 * 1024) return 26;
386 BUG();
387
388
389 return -1;
390}
391#endif
392
393void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
394void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
395void kmem_cache_free(struct kmem_cache *, void *);
396
397
398
399
400
401
402
403
404void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
405int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
406
407
408
409
410
411static __always_inline void kfree_bulk(size_t size, void **p)
412{
413 kmem_cache_free_bulk(NULL, size, p);
414}
415
416#ifdef CONFIG_NUMA
417void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
418void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
419#else
420static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
421{
422 return __kmalloc(size, flags);
423}
424
425static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
426{
427 return kmem_cache_alloc(s, flags);
428}
429#endif
430
431#ifdef CONFIG_TRACING
432extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
433
434#ifdef CONFIG_NUMA
435extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
436 gfp_t gfpflags,
437 int node, size_t size) __assume_slab_alignment __malloc;
438#else
439static __always_inline void *
440kmem_cache_alloc_node_trace(struct kmem_cache *s,
441 gfp_t gfpflags,
442 int node, size_t size)
443{
444 return kmem_cache_alloc_trace(s, gfpflags, size);
445}
446#endif
447
448#else
449static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
450 gfp_t flags, size_t size)
451{
452 void *ret = kmem_cache_alloc(s, flags);
453
454 ret = kasan_kmalloc(s, ret, size, flags);
455 return ret;
456}
457
458static __always_inline void *
459kmem_cache_alloc_node_trace(struct kmem_cache *s,
460 gfp_t gfpflags,
461 int node, size_t size)
462{
463 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
464
465 ret = kasan_kmalloc(s, ret, size, gfpflags);
466 return ret;
467}
468#endif
469
470extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
471
472#ifdef CONFIG_TRACING
473extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
474#else
475static __always_inline void *
476kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
477{
478 return kmalloc_order(size, flags, order);
479}
480#endif
481
482static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
483{
484 unsigned int order = get_order(size);
485 return kmalloc_order_trace(size, flags, order);
486}
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538static __always_inline void *kmalloc(size_t size, gfp_t flags)
539{
540 if (__builtin_constant_p(size)) {
541#ifndef CONFIG_SLOB
542 unsigned int index;
543#endif
544 if (size > KMALLOC_MAX_CACHE_SIZE)
545 return kmalloc_large(size, flags);
546#ifndef CONFIG_SLOB
547 index = kmalloc_index(size);
548
549 if (!index)
550 return ZERO_SIZE_PTR;
551
552 return kmem_cache_alloc_trace(
553 kmalloc_caches[kmalloc_type(flags)][index],
554 flags, size);
555#endif
556 }
557 return __kmalloc(size, flags);
558}
559
560
561
562
563
564
565static __always_inline unsigned int kmalloc_size(unsigned int n)
566{
567#ifndef CONFIG_SLOB
568 if (n > 2)
569 return 1U << n;
570
571 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
572 return 96;
573
574 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
575 return 192;
576#endif
577 return 0;
578}
579
580static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
581{
582#ifndef CONFIG_SLOB
583 if (__builtin_constant_p(size) &&
584 size <= KMALLOC_MAX_CACHE_SIZE) {
585 unsigned int i = kmalloc_index(size);
586
587 if (!i)
588 return ZERO_SIZE_PTR;
589
590 return kmem_cache_alloc_node_trace(
591 kmalloc_caches[kmalloc_type(flags)][i],
592 flags, node, size);
593 }
594#endif
595 return __kmalloc_node(size, flags, node);
596}
597
598struct memcg_cache_array {
599 struct rcu_head rcu;
600 struct kmem_cache *entries[0];
601};
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636struct memcg_cache_params {
637 struct kmem_cache *root_cache;
638 union {
639 struct {
640 struct memcg_cache_array __rcu *memcg_caches;
641 struct list_head __root_caches_node;
642 struct list_head children;
643 bool dying;
644 };
645 struct {
646 struct mem_cgroup *memcg;
647 struct list_head children_node;
648 struct list_head kmem_caches_node;
649 struct percpu_ref refcnt;
650
651 void (*work_fn)(struct kmem_cache *);
652 union {
653 struct rcu_head rcu_head;
654 struct work_struct work;
655 };
656 };
657 };
658};
659
660int memcg_update_all_caches(int num_memcgs);
661
662
663
664
665
666
667
668static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
669{
670 size_t bytes;
671
672 if (unlikely(check_mul_overflow(n, size, &bytes)))
673 return NULL;
674 if (__builtin_constant_p(n) && __builtin_constant_p(size))
675 return kmalloc(bytes, flags);
676 return __kmalloc(bytes, flags);
677}
678
679
680
681
682
683
684
685static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
686{
687 return kmalloc_array(n, size, flags | __GFP_ZERO);
688}
689
690
691
692
693
694
695
696
697
698extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
699#define kmalloc_track_caller(size, flags) \
700 __kmalloc_track_caller(size, flags, _RET_IP_)
701
702static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
703 int node)
704{
705 size_t bytes;
706
707 if (unlikely(check_mul_overflow(n, size, &bytes)))
708 return NULL;
709 if (__builtin_constant_p(n) && __builtin_constant_p(size))
710 return kmalloc_node(bytes, flags, node);
711 return __kmalloc_node(bytes, flags, node);
712}
713
714static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
715{
716 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
717}
718
719
720#ifdef CONFIG_NUMA
721extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
722#define kmalloc_node_track_caller(size, flags, node) \
723 __kmalloc_node_track_caller(size, flags, node, \
724 _RET_IP_)
725
726#else
727
728#define kmalloc_node_track_caller(size, flags, node) \
729 kmalloc_track_caller(size, flags)
730
731#endif
732
733
734
735
736static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
737{
738 return kmem_cache_alloc(k, flags | __GFP_ZERO);
739}
740
741
742
743
744
745
746static inline void *kzalloc(size_t size, gfp_t flags)
747{
748 return kmalloc(size, flags | __GFP_ZERO);
749}
750
751
752
753
754
755
756
757static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
758{
759 return kmalloc_node(size, flags | __GFP_ZERO, node);
760}
761
762unsigned int kmem_cache_size(struct kmem_cache *s);
763void __init kmem_cache_init_late(void);
764
765#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
766int slab_prepare_cpu(unsigned int cpu);
767int slab_dead_cpu(unsigned int cpu);
768#else
769#define slab_prepare_cpu NULL
770#define slab_dead_cpu NULL
771#endif
772
773#endif
774