1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19
20
21
22
23
24
25
26#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
27
28#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
29
30#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
31
32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
33
34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
35
36#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
37
38#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
39
40#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
80
81#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
82
83#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
84
85
86#ifdef CONFIG_DEBUG_OBJECTS
87# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
88#else
89# define SLAB_DEBUG_OBJECTS 0
90#endif
91
92
93#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
94
95
96#ifdef CONFIG_FAILSLAB
97# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
98#else
99# define SLAB_FAILSLAB 0
100#endif
101
102#ifdef CONFIG_MEMCG_KMEM
103# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
104#else
105# define SLAB_ACCOUNT 0
106#endif
107
108#ifdef CONFIG_KASAN
109#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
110#else
111#define SLAB_KASAN 0
112#endif
113
114
115
116#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
117#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
118
119
120
121
122
123
124
125
126#define ZERO_SIZE_PTR ((void *)16)
127
128#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
129 (unsigned long)ZERO_SIZE_PTR)
130
131#include <linux/kasan.h>
132
133struct mem_cgroup;
134
135
136
137void __init kmem_cache_init(void);
138bool slab_is_available(void);
139
140extern bool usercopy_fallback;
141
142struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
143 unsigned int align, slab_flags_t flags,
144 void (*ctor)(void *));
145struct kmem_cache *kmem_cache_create_usercopy(const char *name,
146 unsigned int size, unsigned int align,
147 slab_flags_t flags,
148 unsigned int useroffset, unsigned int usersize,
149 void (*ctor)(void *));
150void kmem_cache_destroy(struct kmem_cache *);
151int kmem_cache_shrink(struct kmem_cache *);
152
153void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
154void memcg_deactivate_kmem_caches(struct mem_cgroup *);
155void memcg_destroy_kmem_caches(struct mem_cgroup *);
156
157
158
159
160
161
162
163
164
165#define KMEM_CACHE(__struct, __flags) \
166 kmem_cache_create(#__struct, sizeof(struct __struct), \
167 __alignof__(struct __struct), (__flags), NULL)
168
169
170
171
172
173#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
174 kmem_cache_create_usercopy(#__struct, \
175 sizeof(struct __struct), \
176 __alignof__(struct __struct), (__flags), \
177 offsetof(struct __struct, __field), \
178 sizeof_field(struct __struct, __field), NULL)
179
180
181
182
183void * __must_check __krealloc(const void *, size_t, gfp_t);
184void * __must_check krealloc(const void *, size_t, gfp_t);
185void kfree(const void *);
186void kzfree(const void *);
187size_t ksize(const void *);
188
189#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
190void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
191 bool to_user);
192#else
193static inline void __check_heap_object(const void *ptr, unsigned long n,
194 struct page *page, bool to_user) { }
195#endif
196
197
198
199
200
201
202#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
203#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
204#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
205#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
206#else
207#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
208#endif
209
210
211
212
213
214
215#ifndef ARCH_SLAB_MINALIGN
216#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
217#endif
218
219
220
221
222
223
224#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
225#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
226#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
227
228
229
230
231
232#ifdef CONFIG_SLAB
233
234
235
236
237
238
239
240
241
242#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
243 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
244#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
245#ifndef KMALLOC_SHIFT_LOW
246#define KMALLOC_SHIFT_LOW 5
247#endif
248#endif
249
250#ifdef CONFIG_SLUB
251
252
253
254
255#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
256#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
257#ifndef KMALLOC_SHIFT_LOW
258#define KMALLOC_SHIFT_LOW 3
259#endif
260#endif
261
262#ifdef CONFIG_SLOB
263
264
265
266
267
268#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
269#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
270#ifndef KMALLOC_SHIFT_LOW
271#define KMALLOC_SHIFT_LOW 3
272#endif
273#endif
274
275
276#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
277
278#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
279
280#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
281
282
283
284
285#ifndef KMALLOC_MIN_SIZE
286#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
287#endif
288
289
290
291
292
293
294
295
296
297#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
298 (KMALLOC_MIN_SIZE) : 16)
299
300
301
302
303
304enum kmalloc_cache_type {
305 KMALLOC_NORMAL = 0,
306 KMALLOC_RECLAIM,
307#ifdef CONFIG_ZONE_DMA
308 KMALLOC_DMA,
309#endif
310 NR_KMALLOC_TYPES
311};
312
313#ifndef CONFIG_SLOB
314extern struct kmem_cache *
315kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
316
317static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
318{
319#ifdef CONFIG_ZONE_DMA
320
321
322
323
324 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
325 return KMALLOC_NORMAL;
326
327
328
329
330
331 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
332#else
333 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
334#endif
335}
336
337
338
339
340
341
342
343
344
345static __always_inline unsigned int kmalloc_index(size_t size)
346{
347 if (!size)
348 return 0;
349
350 if (size <= KMALLOC_MIN_SIZE)
351 return KMALLOC_SHIFT_LOW;
352
353 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
354 return 1;
355 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
356 return 2;
357 if (size <= 8) return 3;
358 if (size <= 16) return 4;
359 if (size <= 32) return 5;
360 if (size <= 64) return 6;
361 if (size <= 128) return 7;
362 if (size <= 256) return 8;
363 if (size <= 512) return 9;
364 if (size <= 1024) return 10;
365 if (size <= 2 * 1024) return 11;
366 if (size <= 4 * 1024) return 12;
367 if (size <= 8 * 1024) return 13;
368 if (size <= 16 * 1024) return 14;
369 if (size <= 32 * 1024) return 15;
370 if (size <= 64 * 1024) return 16;
371 if (size <= 128 * 1024) return 17;
372 if (size <= 256 * 1024) return 18;
373 if (size <= 512 * 1024) return 19;
374 if (size <= 1024 * 1024) return 20;
375 if (size <= 2 * 1024 * 1024) return 21;
376 if (size <= 4 * 1024 * 1024) return 22;
377 if (size <= 8 * 1024 * 1024) return 23;
378 if (size <= 16 * 1024 * 1024) return 24;
379 if (size <= 32 * 1024 * 1024) return 25;
380 if (size <= 64 * 1024 * 1024) return 26;
381 BUG();
382
383
384 return -1;
385}
386#endif
387
388void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
389void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
390void kmem_cache_free(struct kmem_cache *, void *);
391
392
393
394
395
396
397
398
399void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
400int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
401
402
403
404
405
406static __always_inline void kfree_bulk(size_t size, void **p)
407{
408 kmem_cache_free_bulk(NULL, size, p);
409}
410
411#ifdef CONFIG_NUMA
412void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
413void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
414#else
415static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
416{
417 return __kmalloc(size, flags);
418}
419
420static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
421{
422 return kmem_cache_alloc(s, flags);
423}
424#endif
425
426#ifdef CONFIG_TRACING
427extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
428
429#ifdef CONFIG_NUMA
430extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
431 gfp_t gfpflags,
432 int node, size_t size) __assume_slab_alignment __malloc;
433#else
434static __always_inline void *
435kmem_cache_alloc_node_trace(struct kmem_cache *s,
436 gfp_t gfpflags,
437 int node, size_t size)
438{
439 return kmem_cache_alloc_trace(s, gfpflags, size);
440}
441#endif
442
443#else
444static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
445 gfp_t flags, size_t size)
446{
447 void *ret = kmem_cache_alloc(s, flags);
448
449 ret = kasan_kmalloc(s, ret, size, flags);
450 return ret;
451}
452
453static __always_inline void *
454kmem_cache_alloc_node_trace(struct kmem_cache *s,
455 gfp_t gfpflags,
456 int node, size_t size)
457{
458 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
459
460 ret = kasan_kmalloc(s, ret, size, gfpflags);
461 return ret;
462}
463#endif
464
465extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
466
467#ifdef CONFIG_TRACING
468extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
469#else
470static __always_inline void *
471kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
472{
473 return kmalloc_order(size, flags, order);
474}
475#endif
476
477static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
478{
479 unsigned int order = get_order(size);
480 return kmalloc_order_trace(size, flags, order);
481}
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533static __always_inline void *kmalloc(size_t size, gfp_t flags)
534{
535 if (__builtin_constant_p(size)) {
536#ifndef CONFIG_SLOB
537 unsigned int index;
538#endif
539 if (size > KMALLOC_MAX_CACHE_SIZE)
540 return kmalloc_large(size, flags);
541#ifndef CONFIG_SLOB
542 index = kmalloc_index(size);
543
544 if (!index)
545 return ZERO_SIZE_PTR;
546
547 return kmem_cache_alloc_trace(
548 kmalloc_caches[kmalloc_type(flags)][index],
549 flags, size);
550#endif
551 }
552 return __kmalloc(size, flags);
553}
554
555
556
557
558
559
560static __always_inline unsigned int kmalloc_size(unsigned int n)
561{
562#ifndef CONFIG_SLOB
563 if (n > 2)
564 return 1U << n;
565
566 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
567 return 96;
568
569 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
570 return 192;
571#endif
572 return 0;
573}
574
575static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
576{
577#ifndef CONFIG_SLOB
578 if (__builtin_constant_p(size) &&
579 size <= KMALLOC_MAX_CACHE_SIZE) {
580 unsigned int i = kmalloc_index(size);
581
582 if (!i)
583 return ZERO_SIZE_PTR;
584
585 return kmem_cache_alloc_node_trace(
586 kmalloc_caches[kmalloc_type(flags)][i],
587 flags, node, size);
588 }
589#endif
590 return __kmalloc_node(size, flags, node);
591}
592
593struct memcg_cache_array {
594 struct rcu_head rcu;
595 struct kmem_cache *entries[0];
596};
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631struct memcg_cache_params {
632 struct kmem_cache *root_cache;
633 union {
634 struct {
635 struct memcg_cache_array __rcu *memcg_caches;
636 struct list_head __root_caches_node;
637 struct list_head children;
638 bool dying;
639 };
640 struct {
641 struct mem_cgroup *memcg;
642 struct list_head children_node;
643 struct list_head kmem_caches_node;
644
645 void (*deact_fn)(struct kmem_cache *);
646 union {
647 struct rcu_head deact_rcu_head;
648 struct work_struct deact_work;
649 };
650 };
651 };
652};
653
654int memcg_update_all_caches(int num_memcgs);
655
656
657
658
659
660
661
662static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
663{
664 size_t bytes;
665
666 if (unlikely(check_mul_overflow(n, size, &bytes)))
667 return NULL;
668 if (__builtin_constant_p(n) && __builtin_constant_p(size))
669 return kmalloc(bytes, flags);
670 return __kmalloc(bytes, flags);
671}
672
673
674
675
676
677
678
679static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
680{
681 return kmalloc_array(n, size, flags | __GFP_ZERO);
682}
683
684
685
686
687
688
689
690
691
692extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
693#define kmalloc_track_caller(size, flags) \
694 __kmalloc_track_caller(size, flags, _RET_IP_)
695
696static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
697 int node)
698{
699 size_t bytes;
700
701 if (unlikely(check_mul_overflow(n, size, &bytes)))
702 return NULL;
703 if (__builtin_constant_p(n) && __builtin_constant_p(size))
704 return kmalloc_node(bytes, flags, node);
705 return __kmalloc_node(bytes, flags, node);
706}
707
708static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
709{
710 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
711}
712
713
714#ifdef CONFIG_NUMA
715extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
716#define kmalloc_node_track_caller(size, flags, node) \
717 __kmalloc_node_track_caller(size, flags, node, \
718 _RET_IP_)
719
720#else
721
722#define kmalloc_node_track_caller(size, flags, node) \
723 kmalloc_track_caller(size, flags)
724
725#endif
726
727
728
729
730static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
731{
732 return kmem_cache_alloc(k, flags | __GFP_ZERO);
733}
734
735
736
737
738
739
740static inline void *kzalloc(size_t size, gfp_t flags)
741{
742 return kmalloc(size, flags | __GFP_ZERO);
743}
744
745
746
747
748
749
750
751static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
752{
753 return kmalloc_node(size, flags | __GFP_ZERO, node);
754}
755
756unsigned int kmem_cache_size(struct kmem_cache *s);
757void __init kmem_cache_init_late(void);
758
759#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
760int slab_prepare_cpu(unsigned int cpu);
761int slab_dead_cpu(unsigned int cpu);
762#else
763#define slab_prepare_cpu NULL
764#define slab_dead_cpu NULL
765#endif
766
767#endif
768