1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19#include <linux/percpu-refcount.h>
20
21
22
23
24
25
26
27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28
29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30
31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32
33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34
35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38
39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40
41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81
82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83
84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85
86
87#ifdef CONFIG_DEBUG_OBJECTS
88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89#else
90# define SLAB_DEBUG_OBJECTS 0
91#endif
92
93
94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95
96
97#ifdef CONFIG_FAILSLAB
98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99#else
100# define SLAB_FAILSLAB 0
101#endif
102
103#ifdef CONFIG_MEMCG_KMEM
104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105#else
106# define SLAB_ACCOUNT 0
107#endif
108
109#ifdef CONFIG_KASAN
110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111#else
112#define SLAB_KASAN 0
113#endif
114
115
116
117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
119
120
121#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
122
123
124
125
126
127
128
129
130
131#define ZERO_SIZE_PTR ((void *)16)
132
133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
134 (unsigned long)ZERO_SIZE_PTR)
135
136#include <linux/kasan.h>
137
138struct mem_cgroup;
139
140
141
142void __init kmem_cache_init(void);
143bool slab_is_available(void);
144
145extern bool usercopy_fallback;
146
147struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
148 unsigned int align, slab_flags_t flags,
149 void (*ctor)(void *));
150struct kmem_cache *kmem_cache_create_usercopy(const char *name,
151 unsigned int size, unsigned int align,
152 slab_flags_t flags,
153 unsigned int useroffset, unsigned int usersize,
154 void (*ctor)(void *));
155void kmem_cache_destroy(struct kmem_cache *);
156int kmem_cache_shrink(struct kmem_cache *);
157
158
159
160
161
162
163
164
165
166#define KMEM_CACHE(__struct, __flags) \
167 kmem_cache_create(#__struct, sizeof(struct __struct), \
168 __alignof__(struct __struct), (__flags), NULL)
169
170
171
172
173
174#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
175 kmem_cache_create_usercopy(#__struct, \
176 sizeof(struct __struct), \
177 __alignof__(struct __struct), (__flags), \
178 offsetof(struct __struct, __field), \
179 sizeof_field(struct __struct, __field), NULL)
180
181
182
183
184void * __must_check krealloc(const void *, size_t, gfp_t);
185void kfree(const void *);
186void kfree_sensitive(const void *);
187size_t __ksize(const void *);
188size_t ksize(const void *);
189#ifdef CONFIG_PRINTK
190bool kmem_valid_obj(void *object);
191void kmem_dump_obj(void *object);
192#endif
193
194#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
195void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
196 bool to_user);
197#else
198static inline void __check_heap_object(const void *ptr, unsigned long n,
199 struct page *page, bool to_user) { }
200#endif
201
202
203
204
205
206
207#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
208#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
209#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
210#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
211#else
212#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
213#endif
214
215
216
217
218
219
220#ifndef ARCH_SLAB_MINALIGN
221#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
222#endif
223
224
225
226
227
228
229#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
230#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
231#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
232
233
234
235
236
237#ifdef CONFIG_SLAB
238
239
240
241
242
243
244
245
246
247#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
248 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
249#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
250#ifndef KMALLOC_SHIFT_LOW
251#define KMALLOC_SHIFT_LOW 5
252#endif
253#endif
254
255#ifdef CONFIG_SLUB
256
257
258
259
260#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
261#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
262#ifndef KMALLOC_SHIFT_LOW
263#define KMALLOC_SHIFT_LOW 3
264#endif
265#endif
266
267#ifdef CONFIG_SLOB
268
269
270
271
272
273#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
274#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
275#ifndef KMALLOC_SHIFT_LOW
276#define KMALLOC_SHIFT_LOW 3
277#endif
278#endif
279
280
281#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
282
283#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
284
285#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
286
287
288
289
290#ifndef KMALLOC_MIN_SIZE
291#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
292#endif
293
294
295
296
297
298
299
300
301
302#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
303 (KMALLOC_MIN_SIZE) : 16)
304
305
306
307
308
309
310
311
312
313enum kmalloc_cache_type {
314 KMALLOC_NORMAL = 0,
315#ifndef CONFIG_ZONE_DMA
316 KMALLOC_DMA = KMALLOC_NORMAL,
317#endif
318#ifndef CONFIG_MEMCG_KMEM
319 KMALLOC_CGROUP = KMALLOC_NORMAL,
320#else
321 KMALLOC_CGROUP,
322#endif
323 KMALLOC_RECLAIM,
324#ifdef CONFIG_ZONE_DMA
325 KMALLOC_DMA,
326#endif
327 NR_KMALLOC_TYPES
328};
329
330#ifndef CONFIG_SLOB
331extern struct kmem_cache *
332kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
333
334
335
336
337#define KMALLOC_NOT_NORMAL_BITS \
338 (__GFP_RECLAIMABLE | \
339 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
340 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
341
342static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
343{
344
345
346
347
348 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
349 return KMALLOC_NORMAL;
350
351
352
353
354
355
356
357
358 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
359 return KMALLOC_DMA;
360 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
361 return KMALLOC_RECLAIM;
362 else
363 return KMALLOC_CGROUP;
364}
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379static __always_inline unsigned int __kmalloc_index(size_t size,
380 bool size_is_constant)
381{
382 if (!size)
383 return 0;
384
385 if (size <= KMALLOC_MIN_SIZE)
386 return KMALLOC_SHIFT_LOW;
387
388 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
389 return 1;
390 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
391 return 2;
392 if (size <= 8) return 3;
393 if (size <= 16) return 4;
394 if (size <= 32) return 5;
395 if (size <= 64) return 6;
396 if (size <= 128) return 7;
397 if (size <= 256) return 8;
398 if (size <= 512) return 9;
399 if (size <= 1024) return 10;
400 if (size <= 2 * 1024) return 11;
401 if (size <= 4 * 1024) return 12;
402 if (size <= 8 * 1024) return 13;
403 if (size <= 16 * 1024) return 14;
404 if (size <= 32 * 1024) return 15;
405 if (size <= 64 * 1024) return 16;
406 if (size <= 128 * 1024) return 17;
407 if (size <= 256 * 1024) return 18;
408 if (size <= 512 * 1024) return 19;
409 if (size <= 1024 * 1024) return 20;
410 if (size <= 2 * 1024 * 1024) return 21;
411 if (size <= 4 * 1024 * 1024) return 22;
412 if (size <= 8 * 1024 * 1024) return 23;
413 if (size <= 16 * 1024 * 1024) return 24;
414 if (size <= 32 * 1024 * 1024) return 25;
415
416 if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000)
417 && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
418 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
419 else
420 BUG();
421
422
423 return -1;
424}
425#define kmalloc_index(s) __kmalloc_index(s, true)
426#endif
427
428void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
429void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
430void kmem_cache_free(struct kmem_cache *, void *);
431
432
433
434
435
436
437
438
439void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
440int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
441
442
443
444
445
446static __always_inline void kfree_bulk(size_t size, void **p)
447{
448 kmem_cache_free_bulk(NULL, size, p);
449}
450
451#ifdef CONFIG_NUMA
452void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
453void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
454#else
455static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
456{
457 return __kmalloc(size, flags);
458}
459
460static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
461{
462 return kmem_cache_alloc(s, flags);
463}
464#endif
465
466#ifdef CONFIG_TRACING
467extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
468
469#ifdef CONFIG_NUMA
470extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
471 gfp_t gfpflags,
472 int node, size_t size) __assume_slab_alignment __malloc;
473#else
474static __always_inline void *
475kmem_cache_alloc_node_trace(struct kmem_cache *s,
476 gfp_t gfpflags,
477 int node, size_t size)
478{
479 return kmem_cache_alloc_trace(s, gfpflags, size);
480}
481#endif
482
483#else
484static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
485 gfp_t flags, size_t size)
486{
487 void *ret = kmem_cache_alloc(s, flags);
488
489 ret = kasan_kmalloc(s, ret, size, flags);
490 return ret;
491}
492
493static __always_inline void *
494kmem_cache_alloc_node_trace(struct kmem_cache *s,
495 gfp_t gfpflags,
496 int node, size_t size)
497{
498 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
499
500 ret = kasan_kmalloc(s, ret, size, gfpflags);
501 return ret;
502}
503#endif
504
505extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
506
507#ifdef CONFIG_TRACING
508extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
509#else
510static __always_inline void *
511kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
512{
513 return kmalloc_order(size, flags, order);
514}
515#endif
516
517static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
518{
519 unsigned int order = get_order(size);
520 return kmalloc_order_trace(size, flags, order);
521}
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577static __always_inline void *kmalloc(size_t size, gfp_t flags)
578{
579 if (__builtin_constant_p(size)) {
580#ifndef CONFIG_SLOB
581 unsigned int index;
582#endif
583 if (size > KMALLOC_MAX_CACHE_SIZE)
584 return kmalloc_large(size, flags);
585#ifndef CONFIG_SLOB
586 index = kmalloc_index(size);
587
588 if (!index)
589 return ZERO_SIZE_PTR;
590
591 return kmem_cache_alloc_trace(
592 kmalloc_caches[kmalloc_type(flags)][index],
593 flags, size);
594#endif
595 }
596 return __kmalloc(size, flags);
597}
598
599static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
600{
601#ifndef CONFIG_SLOB
602 if (__builtin_constant_p(size) &&
603 size <= KMALLOC_MAX_CACHE_SIZE) {
604 unsigned int i = kmalloc_index(size);
605
606 if (!i)
607 return ZERO_SIZE_PTR;
608
609 return kmem_cache_alloc_node_trace(
610 kmalloc_caches[kmalloc_type(flags)][i],
611 flags, node, size);
612 }
613#endif
614 return __kmalloc_node(size, flags, node);
615}
616
617
618
619
620
621
622
623static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
624{
625 size_t bytes;
626
627 if (unlikely(check_mul_overflow(n, size, &bytes)))
628 return NULL;
629 if (__builtin_constant_p(n) && __builtin_constant_p(size))
630 return kmalloc(bytes, flags);
631 return __kmalloc(bytes, flags);
632}
633
634
635
636
637
638
639
640
641static __must_check inline void *
642krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t flags)
643{
644 size_t bytes;
645
646 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
647 return NULL;
648
649 return krealloc(p, bytes, flags);
650}
651
652
653
654
655
656
657
658static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
659{
660 return kmalloc_array(n, size, flags | __GFP_ZERO);
661}
662
663
664
665
666
667
668
669
670
671extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
672#define kmalloc_track_caller(size, flags) \
673 __kmalloc_track_caller(size, flags, _RET_IP_)
674
675static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
676 int node)
677{
678 size_t bytes;
679
680 if (unlikely(check_mul_overflow(n, size, &bytes)))
681 return NULL;
682 if (__builtin_constant_p(n) && __builtin_constant_p(size))
683 return kmalloc_node(bytes, flags, node);
684 return __kmalloc_node(bytes, flags, node);
685}
686
687static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
688{
689 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
690}
691
692
693#ifdef CONFIG_NUMA
694extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
695#define kmalloc_node_track_caller(size, flags, node) \
696 __kmalloc_node_track_caller(size, flags, node, \
697 _RET_IP_)
698
699#else
700
701#define kmalloc_node_track_caller(size, flags, node) \
702 kmalloc_track_caller(size, flags)
703
704#endif
705
706
707
708
709static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
710{
711 return kmem_cache_alloc(k, flags | __GFP_ZERO);
712}
713
714
715
716
717
718
719static inline void *kzalloc(size_t size, gfp_t flags)
720{
721 return kmalloc(size, flags | __GFP_ZERO);
722}
723
724
725
726
727
728
729
730static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
731{
732 return kmalloc_node(size, flags | __GFP_ZERO, node);
733}
734
735unsigned int kmem_cache_size(struct kmem_cache *s);
736void __init kmem_cache_init_late(void);
737
738#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
739int slab_prepare_cpu(unsigned int cpu);
740int slab_dead_cpu(unsigned int cpu);
741#else
742#define slab_prepare_cpu NULL
743#define slab_dead_cpu NULL
744#endif
745
746#endif
747