1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19#include <linux/percpu-refcount.h>
20
21
22
23
24
25
26
27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28
29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30
31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32
33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34
35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38
39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40
41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81
82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83
84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85
86
87#ifdef CONFIG_DEBUG_OBJECTS
88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89#else
90# define SLAB_DEBUG_OBJECTS 0
91#endif
92
93
94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95
96
97#ifdef CONFIG_FAILSLAB
98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99#else
100# define SLAB_FAILSLAB 0
101#endif
102
103#ifdef CONFIG_MEMCG_KMEM
104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105#else
106# define SLAB_ACCOUNT 0
107#endif
108
109#ifdef CONFIG_KASAN
110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111#else
112#define SLAB_KASAN 0
113#endif
114
115
116
117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
119
120
121
122
123
124
125
126
127
128#define ZERO_SIZE_PTR ((void *)16)
129
130#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
131 (unsigned long)ZERO_SIZE_PTR)
132
133#include <linux/kasan.h>
134
135struct list_lru;
136struct mem_cgroup;
137
138
139
140void __init kmem_cache_init(void);
141bool slab_is_available(void);
142
143struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
144 unsigned int align, slab_flags_t flags,
145 void (*ctor)(void *));
146struct kmem_cache *kmem_cache_create_usercopy(const char *name,
147 unsigned int size, unsigned int align,
148 slab_flags_t flags,
149 unsigned int useroffset, unsigned int usersize,
150 void (*ctor)(void *));
151void kmem_cache_destroy(struct kmem_cache *s);
152int kmem_cache_shrink(struct kmem_cache *s);
153
154
155
156
157
158
159
160
161
162#define KMEM_CACHE(__struct, __flags) \
163 kmem_cache_create(#__struct, sizeof(struct __struct), \
164 __alignof__(struct __struct), (__flags), NULL)
165
166
167
168
169
170#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
171 kmem_cache_create_usercopy(#__struct, \
172 sizeof(struct __struct), \
173 __alignof__(struct __struct), (__flags), \
174 offsetof(struct __struct, __field), \
175 sizeof_field(struct __struct, __field), NULL)
176
177
178
179
180void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
181void kfree(const void *objp);
182void kfree_sensitive(const void *objp);
183size_t __ksize(const void *objp);
184size_t ksize(const void *objp);
185#ifdef CONFIG_PRINTK
186bool kmem_valid_obj(void *object);
187void kmem_dump_obj(void *object);
188#endif
189
190
191
192
193
194
195#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
196#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
197#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
198#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
199#else
200#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
201#endif
202
203
204
205
206
207
208#ifndef ARCH_SLAB_MINALIGN
209#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
210#endif
211
212
213
214
215
216
217#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
218#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
219#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
220
221
222
223
224
225#ifdef CONFIG_SLAB
226
227
228
229
230
231
232
233
234
235#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
236 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
237#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
238#ifndef KMALLOC_SHIFT_LOW
239#define KMALLOC_SHIFT_LOW 5
240#endif
241#endif
242
243#ifdef CONFIG_SLUB
244
245
246
247
248#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
249#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
250#ifndef KMALLOC_SHIFT_LOW
251#define KMALLOC_SHIFT_LOW 3
252#endif
253#endif
254
255#ifdef CONFIG_SLOB
256
257
258
259
260
261#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
262#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
263#ifndef KMALLOC_SHIFT_LOW
264#define KMALLOC_SHIFT_LOW 3
265#endif
266#endif
267
268
269#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
270
271#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
272
273#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
274
275
276
277
278#ifndef KMALLOC_MIN_SIZE
279#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
280#endif
281
282
283
284
285
286
287
288
289
290#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
291 (KMALLOC_MIN_SIZE) : 16)
292
293
294
295
296
297
298
299
300
301enum kmalloc_cache_type {
302 KMALLOC_NORMAL = 0,
303#ifndef CONFIG_ZONE_DMA
304 KMALLOC_DMA = KMALLOC_NORMAL,
305#endif
306#ifndef CONFIG_MEMCG_KMEM
307 KMALLOC_CGROUP = KMALLOC_NORMAL,
308#else
309 KMALLOC_CGROUP,
310#endif
311 KMALLOC_RECLAIM,
312#ifdef CONFIG_ZONE_DMA
313 KMALLOC_DMA,
314#endif
315 NR_KMALLOC_TYPES
316};
317
318#ifndef CONFIG_SLOB
319extern struct kmem_cache *
320kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
321
322
323
324
325#define KMALLOC_NOT_NORMAL_BITS \
326 (__GFP_RECLAIMABLE | \
327 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
328 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
329
330static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
331{
332
333
334
335
336 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
337 return KMALLOC_NORMAL;
338
339
340
341
342
343
344
345
346 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
347 return KMALLOC_DMA;
348 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
349 return KMALLOC_RECLAIM;
350 else
351 return KMALLOC_CGROUP;
352}
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367static __always_inline unsigned int __kmalloc_index(size_t size,
368 bool size_is_constant)
369{
370 if (!size)
371 return 0;
372
373 if (size <= KMALLOC_MIN_SIZE)
374 return KMALLOC_SHIFT_LOW;
375
376 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
377 return 1;
378 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
379 return 2;
380 if (size <= 8) return 3;
381 if (size <= 16) return 4;
382 if (size <= 32) return 5;
383 if (size <= 64) return 6;
384 if (size <= 128) return 7;
385 if (size <= 256) return 8;
386 if (size <= 512) return 9;
387 if (size <= 1024) return 10;
388 if (size <= 2 * 1024) return 11;
389 if (size <= 4 * 1024) return 12;
390 if (size <= 8 * 1024) return 13;
391 if (size <= 16 * 1024) return 14;
392 if (size <= 32 * 1024) return 15;
393 if (size <= 64 * 1024) return 16;
394 if (size <= 128 * 1024) return 17;
395 if (size <= 256 * 1024) return 18;
396 if (size <= 512 * 1024) return 19;
397 if (size <= 1024 * 1024) return 20;
398 if (size <= 2 * 1024 * 1024) return 21;
399 if (size <= 4 * 1024 * 1024) return 22;
400 if (size <= 8 * 1024 * 1024) return 23;
401 if (size <= 16 * 1024 * 1024) return 24;
402 if (size <= 32 * 1024 * 1024) return 25;
403
404 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
405 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
406 else
407 BUG();
408
409
410 return -1;
411}
412#define kmalloc_index(s) __kmalloc_index(s, true)
413#endif
414
415void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
416void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
417void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
418 gfp_t gfpflags) __assume_slab_alignment __malloc;
419void kmem_cache_free(struct kmem_cache *s, void *objp);
420
421
422
423
424
425
426
427
428void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
429int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
430
431
432
433
434
435static __always_inline void kfree_bulk(size_t size, void **p)
436{
437 kmem_cache_free_bulk(NULL, size, p);
438}
439
440#ifdef CONFIG_NUMA
441void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
442 __alloc_size(1);
443void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
444 __malloc;
445#else
446static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
447{
448 return __kmalloc(size, flags);
449}
450
451static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
452{
453 return kmem_cache_alloc(s, flags);
454}
455#endif
456
457#ifdef CONFIG_TRACING
458extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
459 __assume_slab_alignment __alloc_size(3);
460
461#ifdef CONFIG_NUMA
462extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
463 int node, size_t size) __assume_slab_alignment
464 __alloc_size(4);
465#else
466static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
467 gfp_t gfpflags, int node, size_t size)
468{
469 return kmem_cache_alloc_trace(s, gfpflags, size);
470}
471#endif
472
473#else
474static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
475 gfp_t flags, size_t size)
476{
477 void *ret = kmem_cache_alloc(s, flags);
478
479 ret = kasan_kmalloc(s, ret, size, flags);
480 return ret;
481}
482
483static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
484 int node, size_t size)
485{
486 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
487
488 ret = kasan_kmalloc(s, ret, size, gfpflags);
489 return ret;
490}
491#endif
492
493extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
494 __alloc_size(1);
495
496#ifdef CONFIG_TRACING
497extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
498 __assume_page_alignment __alloc_size(1);
499#else
500static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
501 unsigned int order)
502{
503 return kmalloc_order(size, flags, order);
504}
505#endif
506
507static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
508{
509 unsigned int order = get_order(size);
510 return kmalloc_order_trace(size, flags, order);
511}
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
568{
569 if (__builtin_constant_p(size)) {
570#ifndef CONFIG_SLOB
571 unsigned int index;
572#endif
573 if (size > KMALLOC_MAX_CACHE_SIZE)
574 return kmalloc_large(size, flags);
575#ifndef CONFIG_SLOB
576 index = kmalloc_index(size);
577
578 if (!index)
579 return ZERO_SIZE_PTR;
580
581 return kmem_cache_alloc_trace(
582 kmalloc_caches[kmalloc_type(flags)][index],
583 flags, size);
584#endif
585 }
586 return __kmalloc(size, flags);
587}
588
589static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
590{
591#ifndef CONFIG_SLOB
592 if (__builtin_constant_p(size) &&
593 size <= KMALLOC_MAX_CACHE_SIZE) {
594 unsigned int i = kmalloc_index(size);
595
596 if (!i)
597 return ZERO_SIZE_PTR;
598
599 return kmem_cache_alloc_node_trace(
600 kmalloc_caches[kmalloc_type(flags)][i],
601 flags, node, size);
602 }
603#endif
604 return __kmalloc_node(size, flags, node);
605}
606
607
608
609
610
611
612
613static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
614{
615 size_t bytes;
616
617 if (unlikely(check_mul_overflow(n, size, &bytes)))
618 return NULL;
619 if (__builtin_constant_p(n) && __builtin_constant_p(size))
620 return kmalloc(bytes, flags);
621 return __kmalloc(bytes, flags);
622}
623
624
625
626
627
628
629
630
631static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
632 size_t new_n,
633 size_t new_size,
634 gfp_t flags)
635{
636 size_t bytes;
637
638 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
639 return NULL;
640
641 return krealloc(p, bytes, flags);
642}
643
644
645
646
647
648
649
650static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
651{
652 return kmalloc_array(n, size, flags | __GFP_ZERO);
653}
654
655
656
657
658
659
660
661
662
663extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
664#define kmalloc_track_caller(size, flags) \
665 __kmalloc_track_caller(size, flags, _RET_IP_)
666
667static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
668 int node)
669{
670 size_t bytes;
671
672 if (unlikely(check_mul_overflow(n, size, &bytes)))
673 return NULL;
674 if (__builtin_constant_p(n) && __builtin_constant_p(size))
675 return kmalloc_node(bytes, flags, node);
676 return __kmalloc_node(bytes, flags, node);
677}
678
679static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
680{
681 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
682}
683
684
685#ifdef CONFIG_NUMA
686extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
687 unsigned long caller) __alloc_size(1);
688#define kmalloc_node_track_caller(size, flags, node) \
689 __kmalloc_node_track_caller(size, flags, node, \
690 _RET_IP_)
691
692#else
693
694#define kmalloc_node_track_caller(size, flags, node) \
695 kmalloc_track_caller(size, flags)
696
697#endif
698
699
700
701
702static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
703{
704 return kmem_cache_alloc(k, flags | __GFP_ZERO);
705}
706
707
708
709
710
711
712static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
713{
714 return kmalloc(size, flags | __GFP_ZERO);
715}
716
717
718
719
720
721
722
723static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
724{
725 return kmalloc_node(size, flags | __GFP_ZERO, node);
726}
727
728extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
729static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
730{
731 return kvmalloc_node(size, flags, NUMA_NO_NODE);
732}
733static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
734{
735 return kvmalloc_node(size, flags | __GFP_ZERO, node);
736}
737static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
738{
739 return kvmalloc(size, flags | __GFP_ZERO);
740}
741
742static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
743{
744 size_t bytes;
745
746 if (unlikely(check_mul_overflow(n, size, &bytes)))
747 return NULL;
748
749 return kvmalloc(bytes, flags);
750}
751
752static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
753{
754 return kvmalloc_array(n, size, flags | __GFP_ZERO);
755}
756
757extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
758 __alloc_size(3);
759extern void kvfree(const void *addr);
760extern void kvfree_sensitive(const void *addr, size_t len);
761
762unsigned int kmem_cache_size(struct kmem_cache *s);
763void __init kmem_cache_init_late(void);
764
765#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
766int slab_prepare_cpu(unsigned int cpu);
767int slab_dead_cpu(unsigned int cpu);
768#else
769#define slab_prepare_cpu NULL
770#define slab_dead_cpu NULL
771#endif
772
773#endif
774