1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19#include <linux/percpu-refcount.h>
20
21
22
23
24
25
26
27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28
29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30
31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32
33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34
35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38
39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40
41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81
82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83
84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85
86
87#ifdef CONFIG_DEBUG_OBJECTS
88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89#else
90# define SLAB_DEBUG_OBJECTS 0
91#endif
92
93
94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95
96
97#ifdef CONFIG_FAILSLAB
98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99#else
100# define SLAB_FAILSLAB 0
101#endif
102
103#ifdef CONFIG_MEMCG_KMEM
104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105#else
106# define SLAB_ACCOUNT 0
107#endif
108
109#ifdef CONFIG_KASAN
110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111#else
112#define SLAB_KASAN 0
113#endif
114
115
116
117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
119
120
121#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
122
123
124
125
126
127
128
129
130
131#define ZERO_SIZE_PTR ((void *)16)
132
133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
134 (unsigned long)ZERO_SIZE_PTR)
135
136#include <linux/kasan.h>
137
138struct mem_cgroup;
139
140
141
142void __init kmem_cache_init(void);
143bool slab_is_available(void);
144
145struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
146 unsigned int align, slab_flags_t flags,
147 void (*ctor)(void *));
148struct kmem_cache *kmem_cache_create_usercopy(const char *name,
149 unsigned int size, unsigned int align,
150 slab_flags_t flags,
151 unsigned int useroffset, unsigned int usersize,
152 void (*ctor)(void *));
153void kmem_cache_destroy(struct kmem_cache *s);
154int kmem_cache_shrink(struct kmem_cache *s);
155
156
157
158
159
160
161
162
163
164#define KMEM_CACHE(__struct, __flags) \
165 kmem_cache_create(#__struct, sizeof(struct __struct), \
166 __alignof__(struct __struct), (__flags), NULL)
167
168
169
170
171
172#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
173 kmem_cache_create_usercopy(#__struct, \
174 sizeof(struct __struct), \
175 __alignof__(struct __struct), (__flags), \
176 offsetof(struct __struct, __field), \
177 sizeof_field(struct __struct, __field), NULL)
178
179
180
181
182void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
183void kfree(const void *objp);
184void kfree_sensitive(const void *objp);
185size_t __ksize(const void *objp);
186size_t ksize(const void *objp);
187#ifdef CONFIG_PRINTK
188bool kmem_valid_obj(void *object);
189void kmem_dump_obj(void *object);
190#endif
191
192#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
193void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
194 bool to_user);
195#else
196static inline void __check_heap_object(const void *ptr, unsigned long n,
197 struct page *page, bool to_user) { }
198#endif
199
200
201
202
203
204
205#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
206#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
207#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
208#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
209#else
210#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
211#endif
212
213
214
215
216
217
218#ifndef ARCH_SLAB_MINALIGN
219#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
220#endif
221
222
223
224
225
226
227#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
228#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
229#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
230
231
232
233
234
235#ifdef CONFIG_SLAB
236
237
238
239
240
241
242
243
244
245#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
246 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
247#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
248#ifndef KMALLOC_SHIFT_LOW
249#define KMALLOC_SHIFT_LOW 5
250#endif
251#endif
252
253#ifdef CONFIG_SLUB
254
255
256
257
258#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
259#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
260#ifndef KMALLOC_SHIFT_LOW
261#define KMALLOC_SHIFT_LOW 3
262#endif
263#endif
264
265#ifdef CONFIG_SLOB
266
267
268
269
270
271#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
272#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
273#ifndef KMALLOC_SHIFT_LOW
274#define KMALLOC_SHIFT_LOW 3
275#endif
276#endif
277
278
279#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
280
281#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
282
283#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
284
285
286
287
288#ifndef KMALLOC_MIN_SIZE
289#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
290#endif
291
292
293
294
295
296
297
298
299
300#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
301 (KMALLOC_MIN_SIZE) : 16)
302
303
304
305
306
307
308
309
310
311enum kmalloc_cache_type {
312 KMALLOC_NORMAL = 0,
313#ifndef CONFIG_ZONE_DMA
314 KMALLOC_DMA = KMALLOC_NORMAL,
315#endif
316#ifndef CONFIG_MEMCG_KMEM
317 KMALLOC_CGROUP = KMALLOC_NORMAL,
318#else
319 KMALLOC_CGROUP,
320#endif
321 KMALLOC_RECLAIM,
322#ifdef CONFIG_ZONE_DMA
323 KMALLOC_DMA,
324#endif
325 NR_KMALLOC_TYPES
326};
327
328#ifndef CONFIG_SLOB
329extern struct kmem_cache *
330kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
331
332
333
334
335#define KMALLOC_NOT_NORMAL_BITS \
336 (__GFP_RECLAIMABLE | \
337 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
338 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
339
340static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
341{
342
343
344
345
346 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
347 return KMALLOC_NORMAL;
348
349
350
351
352
353
354
355
356 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
357 return KMALLOC_DMA;
358 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
359 return KMALLOC_RECLAIM;
360 else
361 return KMALLOC_CGROUP;
362}
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377static __always_inline unsigned int __kmalloc_index(size_t size,
378 bool size_is_constant)
379{
380 if (!size)
381 return 0;
382
383 if (size <= KMALLOC_MIN_SIZE)
384 return KMALLOC_SHIFT_LOW;
385
386 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
387 return 1;
388 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
389 return 2;
390 if (size <= 8) return 3;
391 if (size <= 16) return 4;
392 if (size <= 32) return 5;
393 if (size <= 64) return 6;
394 if (size <= 128) return 7;
395 if (size <= 256) return 8;
396 if (size <= 512) return 9;
397 if (size <= 1024) return 10;
398 if (size <= 2 * 1024) return 11;
399 if (size <= 4 * 1024) return 12;
400 if (size <= 8 * 1024) return 13;
401 if (size <= 16 * 1024) return 14;
402 if (size <= 32 * 1024) return 15;
403 if (size <= 64 * 1024) return 16;
404 if (size <= 128 * 1024) return 17;
405 if (size <= 256 * 1024) return 18;
406 if (size <= 512 * 1024) return 19;
407 if (size <= 1024 * 1024) return 20;
408 if (size <= 2 * 1024 * 1024) return 21;
409 if (size <= 4 * 1024 * 1024) return 22;
410 if (size <= 8 * 1024 * 1024) return 23;
411 if (size <= 16 * 1024 * 1024) return 24;
412 if (size <= 32 * 1024 * 1024) return 25;
413
414 if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000)
415 && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
416 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
417 else
418 BUG();
419
420
421 return -1;
422}
423#define kmalloc_index(s) __kmalloc_index(s, true)
424#endif
425
426void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
427void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
428void kmem_cache_free(struct kmem_cache *s, void *objp);
429
430
431
432
433
434
435
436
437void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
438int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
439
440
441
442
443
444static __always_inline void kfree_bulk(size_t size, void **p)
445{
446 kmem_cache_free_bulk(NULL, size, p);
447}
448
449#ifdef CONFIG_NUMA
450void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
451 __alloc_size(1);
452void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
453 __malloc;
454#else
455static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
456{
457 return __kmalloc(size, flags);
458}
459
460static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
461{
462 return kmem_cache_alloc(s, flags);
463}
464#endif
465
466#ifdef CONFIG_TRACING
467extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
468 __assume_slab_alignment __alloc_size(3);
469
470#ifdef CONFIG_NUMA
471extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
472 int node, size_t size) __assume_slab_alignment
473 __alloc_size(4);
474#else
475static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
476 gfp_t gfpflags, int node, size_t size)
477{
478 return kmem_cache_alloc_trace(s, gfpflags, size);
479}
480#endif
481
482#else
483static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
484 gfp_t flags, size_t size)
485{
486 void *ret = kmem_cache_alloc(s, flags);
487
488 ret = kasan_kmalloc(s, ret, size, flags);
489 return ret;
490}
491
492static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
493 int node, size_t size)
494{
495 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
496
497 ret = kasan_kmalloc(s, ret, size, gfpflags);
498 return ret;
499}
500#endif
501
502extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
503 __alloc_size(1);
504
505#ifdef CONFIG_TRACING
506extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
507 __assume_page_alignment __alloc_size(1);
508#else
509static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
510 unsigned int order)
511{
512 return kmalloc_order(size, flags, order);
513}
514#endif
515
516static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
517{
518 unsigned int order = get_order(size);
519 return kmalloc_order_trace(size, flags, order);
520}
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
577{
578 if (__builtin_constant_p(size)) {
579#ifndef CONFIG_SLOB
580 unsigned int index;
581#endif
582 if (size > KMALLOC_MAX_CACHE_SIZE)
583 return kmalloc_large(size, flags);
584#ifndef CONFIG_SLOB
585 index = kmalloc_index(size);
586
587 if (!index)
588 return ZERO_SIZE_PTR;
589
590 return kmem_cache_alloc_trace(
591 kmalloc_caches[kmalloc_type(flags)][index],
592 flags, size);
593#endif
594 }
595 return __kmalloc(size, flags);
596}
597
598static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
599{
600#ifndef CONFIG_SLOB
601 if (__builtin_constant_p(size) &&
602 size <= KMALLOC_MAX_CACHE_SIZE) {
603 unsigned int i = kmalloc_index(size);
604
605 if (!i)
606 return ZERO_SIZE_PTR;
607
608 return kmem_cache_alloc_node_trace(
609 kmalloc_caches[kmalloc_type(flags)][i],
610 flags, node, size);
611 }
612#endif
613 return __kmalloc_node(size, flags, node);
614}
615
616
617
618
619
620
621
622static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
623{
624 size_t bytes;
625
626 if (unlikely(check_mul_overflow(n, size, &bytes)))
627 return NULL;
628 if (__builtin_constant_p(n) && __builtin_constant_p(size))
629 return kmalloc(bytes, flags);
630 return __kmalloc(bytes, flags);
631}
632
633
634
635
636
637
638
639
640static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
641 size_t new_n,
642 size_t new_size,
643 gfp_t flags)
644{
645 size_t bytes;
646
647 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
648 return NULL;
649
650 return krealloc(p, bytes, flags);
651}
652
653
654
655
656
657
658
659static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
660{
661 return kmalloc_array(n, size, flags | __GFP_ZERO);
662}
663
664
665
666
667
668
669
670
671
672extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
673 __alloc_size(1);
674#define kmalloc_track_caller(size, flags) \
675 __kmalloc_track_caller(size, flags, _RET_IP_)
676
677static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
678 int node)
679{
680 size_t bytes;
681
682 if (unlikely(check_mul_overflow(n, size, &bytes)))
683 return NULL;
684 if (__builtin_constant_p(n) && __builtin_constant_p(size))
685 return kmalloc_node(bytes, flags, node);
686 return __kmalloc_node(bytes, flags, node);
687}
688
689static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
690{
691 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
692}
693
694
695#ifdef CONFIG_NUMA
696extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
697 unsigned long caller) __alloc_size(1);
698#define kmalloc_node_track_caller(size, flags, node) \
699 __kmalloc_node_track_caller(size, flags, node, \
700 _RET_IP_)
701
702#else
703
704#define kmalloc_node_track_caller(size, flags, node) \
705 kmalloc_track_caller(size, flags)
706
707#endif
708
709
710
711
712static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
713{
714 return kmem_cache_alloc(k, flags | __GFP_ZERO);
715}
716
717
718
719
720
721
722static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
723{
724 return kmalloc(size, flags | __GFP_ZERO);
725}
726
727
728
729
730
731
732
733static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
734{
735 return kmalloc_node(size, flags | __GFP_ZERO, node);
736}
737
738extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
739static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
740{
741 return kvmalloc_node(size, flags, NUMA_NO_NODE);
742}
743static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
744{
745 return kvmalloc_node(size, flags | __GFP_ZERO, node);
746}
747static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
748{
749 return kvmalloc(size, flags | __GFP_ZERO);
750}
751
752static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
753{
754 size_t bytes;
755
756 if (unlikely(check_mul_overflow(n, size, &bytes)))
757 return NULL;
758
759 return kvmalloc(bytes, flags);
760}
761
762static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
763{
764 return kvmalloc_array(n, size, flags | __GFP_ZERO);
765}
766
767extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
768 __alloc_size(3);
769extern void kvfree(const void *addr);
770extern void kvfree_sensitive(const void *addr, size_t len);
771
772unsigned int kmem_cache_size(struct kmem_cache *s);
773void __init kmem_cache_init_late(void);
774
775#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
776int slab_prepare_cpu(unsigned int cpu);
777int slab_dead_cpu(unsigned int cpu);
778#else
779#define slab_prepare_cpu NULL
780#define slab_dead_cpu NULL
781#endif
782
783#endif
784