1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19
20
21
22
23
24
25
26#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
27
28#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
29
30#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
31
32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
33
34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
35
36#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
37
38#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
78
79#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
80
81#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
82
83
84#ifdef CONFIG_DEBUG_OBJECTS
85# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
86#else
87# define SLAB_DEBUG_OBJECTS 0
88#endif
89
90
91#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
92
93
94#ifdef CONFIG_FAILSLAB
95# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
96#else
97# define SLAB_FAILSLAB 0
98#endif
99
100#ifdef CONFIG_MEMCG_KMEM
101# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
102#else
103# define SLAB_ACCOUNT 0
104#endif
105
106#ifdef CONFIG_KASAN
107#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
108#else
109#define SLAB_KASAN 0
110#endif
111
112
113
114#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
115#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
116
117
118
119
120
121
122
123
124#define ZERO_SIZE_PTR ((void *)16)
125
126#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
127 (unsigned long)ZERO_SIZE_PTR)
128
129#include <linux/kasan.h>
130
131struct mem_cgroup;
132
133
134
135void __init kmem_cache_init(void);
136bool slab_is_available(void);
137
138extern bool usercopy_fallback;
139
140struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
141 unsigned int align, slab_flags_t flags,
142 void (*ctor)(void *));
143struct kmem_cache *kmem_cache_create_usercopy(const char *name,
144 unsigned int size, unsigned int align,
145 slab_flags_t flags,
146 unsigned int useroffset, unsigned int usersize,
147 void (*ctor)(void *));
148void kmem_cache_destroy(struct kmem_cache *);
149int kmem_cache_shrink(struct kmem_cache *);
150
151void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
152void memcg_deactivate_kmem_caches(struct mem_cgroup *);
153void memcg_destroy_kmem_caches(struct mem_cgroup *);
154
155
156
157
158
159
160
161
162
163#define KMEM_CACHE(__struct, __flags) \
164 kmem_cache_create(#__struct, sizeof(struct __struct), \
165 __alignof__(struct __struct), (__flags), NULL)
166
167
168
169
170
171#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
172 kmem_cache_create_usercopy(#__struct, \
173 sizeof(struct __struct), \
174 __alignof__(struct __struct), (__flags), \
175 offsetof(struct __struct, __field), \
176 sizeof_field(struct __struct, __field), NULL)
177
178
179
180
181void * __must_check __krealloc(const void *, size_t, gfp_t);
182void * __must_check krealloc(const void *, size_t, gfp_t);
183void kfree(const void *);
184void kzfree(const void *);
185size_t ksize(const void *);
186
187#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
188void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
189 bool to_user);
190#else
191static inline void __check_heap_object(const void *ptr, unsigned long n,
192 struct page *page, bool to_user) { }
193#endif
194
195
196
197
198
199
200#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
201#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
202#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
203#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
204#else
205#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
206#endif
207
208
209
210
211
212
213#ifndef ARCH_SLAB_MINALIGN
214#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
215#endif
216
217
218
219
220
221
222#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
223#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
224#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
225
226
227
228
229
230#ifdef CONFIG_SLAB
231
232
233
234
235
236
237
238
239
240#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
241 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
242#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
243#ifndef KMALLOC_SHIFT_LOW
244#define KMALLOC_SHIFT_LOW 5
245#endif
246#endif
247
248#ifdef CONFIG_SLUB
249
250
251
252
253#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
254#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
255#ifndef KMALLOC_SHIFT_LOW
256#define KMALLOC_SHIFT_LOW 3
257#endif
258#endif
259
260#ifdef CONFIG_SLOB
261
262
263
264
265
266#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
267#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
268#ifndef KMALLOC_SHIFT_LOW
269#define KMALLOC_SHIFT_LOW 3
270#endif
271#endif
272
273
274#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
275
276#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
277
278#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
279
280
281
282
283#ifndef KMALLOC_MIN_SIZE
284#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
285#endif
286
287
288
289
290
291
292
293
294
295#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
296 (KMALLOC_MIN_SIZE) : 16)
297
298
299
300
301
302enum kmalloc_cache_type {
303 KMALLOC_NORMAL = 0,
304 KMALLOC_RECLAIM,
305#ifdef CONFIG_ZONE_DMA
306 KMALLOC_DMA,
307#endif
308 NR_KMALLOC_TYPES
309};
310
311#ifndef CONFIG_SLOB
312extern struct kmem_cache *
313kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
314
315static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
316{
317 int is_dma = 0;
318 int type_dma = 0;
319 int is_reclaimable;
320
321#ifdef CONFIG_ZONE_DMA
322 is_dma = !!(flags & __GFP_DMA);
323 type_dma = is_dma * KMALLOC_DMA;
324#endif
325
326 is_reclaimable = !!(flags & __GFP_RECLAIMABLE);
327
328
329
330
331
332 return type_dma + (is_reclaimable & !is_dma) * KMALLOC_RECLAIM;
333}
334
335
336
337
338
339
340
341
342
343static __always_inline unsigned int kmalloc_index(size_t size)
344{
345 if (!size)
346 return 0;
347
348 if (size <= KMALLOC_MIN_SIZE)
349 return KMALLOC_SHIFT_LOW;
350
351 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
352 return 1;
353 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
354 return 2;
355 if (size <= 8) return 3;
356 if (size <= 16) return 4;
357 if (size <= 32) return 5;
358 if (size <= 64) return 6;
359 if (size <= 128) return 7;
360 if (size <= 256) return 8;
361 if (size <= 512) return 9;
362 if (size <= 1024) return 10;
363 if (size <= 2 * 1024) return 11;
364 if (size <= 4 * 1024) return 12;
365 if (size <= 8 * 1024) return 13;
366 if (size <= 16 * 1024) return 14;
367 if (size <= 32 * 1024) return 15;
368 if (size <= 64 * 1024) return 16;
369 if (size <= 128 * 1024) return 17;
370 if (size <= 256 * 1024) return 18;
371 if (size <= 512 * 1024) return 19;
372 if (size <= 1024 * 1024) return 20;
373 if (size <= 2 * 1024 * 1024) return 21;
374 if (size <= 4 * 1024 * 1024) return 22;
375 if (size <= 8 * 1024 * 1024) return 23;
376 if (size <= 16 * 1024 * 1024) return 24;
377 if (size <= 32 * 1024 * 1024) return 25;
378 if (size <= 64 * 1024 * 1024) return 26;
379 BUG();
380
381
382 return -1;
383}
384#endif
385
386void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
387void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
388void kmem_cache_free(struct kmem_cache *, void *);
389
390
391
392
393
394
395
396
397void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
398int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
399
400
401
402
403
404static __always_inline void kfree_bulk(size_t size, void **p)
405{
406 kmem_cache_free_bulk(NULL, size, p);
407}
408
409#ifdef CONFIG_NUMA
410void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
411void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
412#else
413static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
414{
415 return __kmalloc(size, flags);
416}
417
418static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
419{
420 return kmem_cache_alloc(s, flags);
421}
422#endif
423
424#ifdef CONFIG_TRACING
425extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
426
427#ifdef CONFIG_NUMA
428extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
429 gfp_t gfpflags,
430 int node, size_t size) __assume_slab_alignment __malloc;
431#else
432static __always_inline void *
433kmem_cache_alloc_node_trace(struct kmem_cache *s,
434 gfp_t gfpflags,
435 int node, size_t size)
436{
437 return kmem_cache_alloc_trace(s, gfpflags, size);
438}
439#endif
440
441#else
442static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
443 gfp_t flags, size_t size)
444{
445 void *ret = kmem_cache_alloc(s, flags);
446
447 kasan_kmalloc(s, ret, size, flags);
448 return ret;
449}
450
451static __always_inline void *
452kmem_cache_alloc_node_trace(struct kmem_cache *s,
453 gfp_t gfpflags,
454 int node, size_t size)
455{
456 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
457
458 kasan_kmalloc(s, ret, size, gfpflags);
459 return ret;
460}
461#endif
462
463extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
464
465#ifdef CONFIG_TRACING
466extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
467#else
468static __always_inline void *
469kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
470{
471 return kmalloc_order(size, flags, order);
472}
473#endif
474
475static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
476{
477 unsigned int order = get_order(size);
478 return kmalloc_order_trace(size, flags, order);
479}
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532static __always_inline void *kmalloc(size_t size, gfp_t flags)
533{
534 if (__builtin_constant_p(size)) {
535#ifndef CONFIG_SLOB
536 unsigned int index;
537#endif
538 if (size > KMALLOC_MAX_CACHE_SIZE)
539 return kmalloc_large(size, flags);
540#ifndef CONFIG_SLOB
541 index = kmalloc_index(size);
542
543 if (!index)
544 return ZERO_SIZE_PTR;
545
546 return kmem_cache_alloc_trace(
547 kmalloc_caches[kmalloc_type(flags)][index],
548 flags, size);
549#endif
550 }
551 return __kmalloc(size, flags);
552}
553
554
555
556
557
558
559static __always_inline unsigned int kmalloc_size(unsigned int n)
560{
561#ifndef CONFIG_SLOB
562 if (n > 2)
563 return 1U << n;
564
565 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
566 return 96;
567
568 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
569 return 192;
570#endif
571 return 0;
572}
573
574static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
575{
576#ifndef CONFIG_SLOB
577 if (__builtin_constant_p(size) &&
578 size <= KMALLOC_MAX_CACHE_SIZE) {
579 unsigned int i = kmalloc_index(size);
580
581 if (!i)
582 return ZERO_SIZE_PTR;
583
584 return kmem_cache_alloc_node_trace(
585 kmalloc_caches[kmalloc_type(flags)][i],
586 flags, node, size);
587 }
588#endif
589 return __kmalloc_node(size, flags, node);
590}
591
592struct memcg_cache_array {
593 struct rcu_head rcu;
594 struct kmem_cache *entries[0];
595};
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630struct memcg_cache_params {
631 struct kmem_cache *root_cache;
632 union {
633 struct {
634 struct memcg_cache_array __rcu *memcg_caches;
635 struct list_head __root_caches_node;
636 struct list_head children;
637 bool dying;
638 };
639 struct {
640 struct mem_cgroup *memcg;
641 struct list_head children_node;
642 struct list_head kmem_caches_node;
643
644 void (*deact_fn)(struct kmem_cache *);
645 union {
646 struct rcu_head deact_rcu_head;
647 struct work_struct deact_work;
648 };
649 };
650 };
651};
652
653int memcg_update_all_caches(int num_memcgs);
654
655
656
657
658
659
660
661static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
662{
663 size_t bytes;
664
665 if (unlikely(check_mul_overflow(n, size, &bytes)))
666 return NULL;
667 if (__builtin_constant_p(n) && __builtin_constant_p(size))
668 return kmalloc(bytes, flags);
669 return __kmalloc(bytes, flags);
670}
671
672
673
674
675
676
677
678static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
679{
680 return kmalloc_array(n, size, flags | __GFP_ZERO);
681}
682
683
684
685
686
687
688
689
690
691extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
692#define kmalloc_track_caller(size, flags) \
693 __kmalloc_track_caller(size, flags, _RET_IP_)
694
695static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
696 int node)
697{
698 size_t bytes;
699
700 if (unlikely(check_mul_overflow(n, size, &bytes)))
701 return NULL;
702 if (__builtin_constant_p(n) && __builtin_constant_p(size))
703 return kmalloc_node(bytes, flags, node);
704 return __kmalloc_node(bytes, flags, node);
705}
706
707static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
708{
709 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
710}
711
712
713#ifdef CONFIG_NUMA
714extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
715#define kmalloc_node_track_caller(size, flags, node) \
716 __kmalloc_node_track_caller(size, flags, node, \
717 _RET_IP_)
718
719#else
720
721#define kmalloc_node_track_caller(size, flags, node) \
722 kmalloc_track_caller(size, flags)
723
724#endif
725
726
727
728
729static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
730{
731 return kmem_cache_alloc(k, flags | __GFP_ZERO);
732}
733
734
735
736
737
738
739static inline void *kzalloc(size_t size, gfp_t flags)
740{
741 return kmalloc(size, flags | __GFP_ZERO);
742}
743
744
745
746
747
748
749
750static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
751{
752 return kmalloc_node(size, flags | __GFP_ZERO, node);
753}
754
755unsigned int kmem_cache_size(struct kmem_cache *s);
756void __init kmem_cache_init_late(void);
757
758#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
759int slab_prepare_cpu(unsigned int cpu);
760int slab_dead_cpu(unsigned int cpu);
761#else
762#define slab_prepare_cpu NULL
763#define slab_dead_cpu NULL
764#endif
765
766#endif
767