1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19#include <linux/percpu-refcount.h>
20
21
22
23
24
25
26
27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28
29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30
31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32
33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34
35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38
39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40
41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81
82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83
84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85
86
87#ifdef CONFIG_DEBUG_OBJECTS
88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89#else
90# define SLAB_DEBUG_OBJECTS 0
91#endif
92
93
94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95
96
97#ifdef CONFIG_FAILSLAB
98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99#else
100# define SLAB_FAILSLAB 0
101#endif
102
103#ifdef CONFIG_MEMCG_KMEM
104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105#else
106# define SLAB_ACCOUNT 0
107#endif
108
109#ifdef CONFIG_KASAN
110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111#else
112#define SLAB_KASAN 0
113#endif
114
115
116
117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
119
120
121#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
122
123
124
125
126
127
128
129
130
131#define ZERO_SIZE_PTR ((void *)16)
132
133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
134 (unsigned long)ZERO_SIZE_PTR)
135
136#include <linux/kasan.h>
137
138struct mem_cgroup;
139
140
141
142void __init kmem_cache_init(void);
143bool slab_is_available(void);
144
145extern bool usercopy_fallback;
146
147struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
148 unsigned int align, slab_flags_t flags,
149 void (*ctor)(void *));
150struct kmem_cache *kmem_cache_create_usercopy(const char *name,
151 unsigned int size, unsigned int align,
152 slab_flags_t flags,
153 unsigned int useroffset, unsigned int usersize,
154 void (*ctor)(void *));
155void kmem_cache_destroy(struct kmem_cache *);
156int kmem_cache_shrink(struct kmem_cache *);
157
158
159
160
161
162
163
164
165
166#define KMEM_CACHE(__struct, __flags) \
167 kmem_cache_create(#__struct, sizeof(struct __struct), \
168 __alignof__(struct __struct), (__flags), NULL)
169
170
171
172
173
174#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
175 kmem_cache_create_usercopy(#__struct, \
176 sizeof(struct __struct), \
177 __alignof__(struct __struct), (__flags), \
178 offsetof(struct __struct, __field), \
179 sizeof_field(struct __struct, __field), NULL)
180
181
182
183
184void * __must_check krealloc(const void *, size_t, gfp_t);
185void kfree(const void *);
186void kfree_sensitive(const void *);
187size_t __ksize(const void *);
188size_t ksize(const void *);
189
190#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
191void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
192 bool to_user);
193#else
194static inline void __check_heap_object(const void *ptr, unsigned long n,
195 struct page *page, bool to_user) { }
196#endif
197
198
199
200
201
202
203#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
204#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
205#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
206#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
207#else
208#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
209#endif
210
211
212
213
214
215
216#ifndef ARCH_SLAB_MINALIGN
217#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
218#endif
219
220
221
222
223
224
225#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
226#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
227#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
228
229
230
231
232
233#ifdef CONFIG_SLAB
234
235
236
237
238
239
240
241
242
243#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
244 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
245#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
246#ifndef KMALLOC_SHIFT_LOW
247#define KMALLOC_SHIFT_LOW 5
248#endif
249#endif
250
251#ifdef CONFIG_SLUB
252
253
254
255
256#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
257#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
258#ifndef KMALLOC_SHIFT_LOW
259#define KMALLOC_SHIFT_LOW 3
260#endif
261#endif
262
263#ifdef CONFIG_SLOB
264
265
266
267
268
269#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
270#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
271#ifndef KMALLOC_SHIFT_LOW
272#define KMALLOC_SHIFT_LOW 3
273#endif
274#endif
275
276
277#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
278
279#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
280
281#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
282
283
284
285
286#ifndef KMALLOC_MIN_SIZE
287#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
288#endif
289
290
291
292
293
294
295
296
297
298#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
299 (KMALLOC_MIN_SIZE) : 16)
300
301
302
303
304
305enum kmalloc_cache_type {
306 KMALLOC_NORMAL = 0,
307 KMALLOC_RECLAIM,
308#ifdef CONFIG_ZONE_DMA
309 KMALLOC_DMA,
310#endif
311 NR_KMALLOC_TYPES
312};
313
314#ifndef CONFIG_SLOB
315extern struct kmem_cache *
316kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
317
318static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
319{
320#ifdef CONFIG_ZONE_DMA
321
322
323
324
325 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
326 return KMALLOC_NORMAL;
327
328
329
330
331
332 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
333#else
334 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
335#endif
336}
337
338
339
340
341
342
343
344
345
346static __always_inline unsigned int kmalloc_index(size_t size)
347{
348 if (!size)
349 return 0;
350
351 if (size <= KMALLOC_MIN_SIZE)
352 return KMALLOC_SHIFT_LOW;
353
354 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
355 return 1;
356 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
357 return 2;
358 if (size <= 8) return 3;
359 if (size <= 16) return 4;
360 if (size <= 32) return 5;
361 if (size <= 64) return 6;
362 if (size <= 128) return 7;
363 if (size <= 256) return 8;
364 if (size <= 512) return 9;
365 if (size <= 1024) return 10;
366 if (size <= 2 * 1024) return 11;
367 if (size <= 4 * 1024) return 12;
368 if (size <= 8 * 1024) return 13;
369 if (size <= 16 * 1024) return 14;
370 if (size <= 32 * 1024) return 15;
371 if (size <= 64 * 1024) return 16;
372 if (size <= 128 * 1024) return 17;
373 if (size <= 256 * 1024) return 18;
374 if (size <= 512 * 1024) return 19;
375 if (size <= 1024 * 1024) return 20;
376 if (size <= 2 * 1024 * 1024) return 21;
377 if (size <= 4 * 1024 * 1024) return 22;
378 if (size <= 8 * 1024 * 1024) return 23;
379 if (size <= 16 * 1024 * 1024) return 24;
380 if (size <= 32 * 1024 * 1024) return 25;
381 if (size <= 64 * 1024 * 1024) return 26;
382 BUG();
383
384
385 return -1;
386}
387#endif
388
389void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
390void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
391void kmem_cache_free(struct kmem_cache *, void *);
392
393
394
395
396
397
398
399
400void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
401int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
402
403
404
405
406
407static __always_inline void kfree_bulk(size_t size, void **p)
408{
409 kmem_cache_free_bulk(NULL, size, p);
410}
411
412#ifdef CONFIG_NUMA
413void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
414void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
415#else
416static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
417{
418 return __kmalloc(size, flags);
419}
420
421static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
422{
423 return kmem_cache_alloc(s, flags);
424}
425#endif
426
427#ifdef CONFIG_TRACING
428extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
429
430#ifdef CONFIG_NUMA
431extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
432 gfp_t gfpflags,
433 int node, size_t size) __assume_slab_alignment __malloc;
434#else
435static __always_inline void *
436kmem_cache_alloc_node_trace(struct kmem_cache *s,
437 gfp_t gfpflags,
438 int node, size_t size)
439{
440 return kmem_cache_alloc_trace(s, gfpflags, size);
441}
442#endif
443
444#else
445static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
446 gfp_t flags, size_t size)
447{
448 void *ret = kmem_cache_alloc(s, flags);
449
450 ret = kasan_kmalloc(s, ret, size, flags);
451 return ret;
452}
453
454static __always_inline void *
455kmem_cache_alloc_node_trace(struct kmem_cache *s,
456 gfp_t gfpflags,
457 int node, size_t size)
458{
459 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
460
461 ret = kasan_kmalloc(s, ret, size, gfpflags);
462 return ret;
463}
464#endif
465
466extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
467
468#ifdef CONFIG_TRACING
469extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
470#else
471static __always_inline void *
472kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
473{
474 return kmalloc_order(size, flags, order);
475}
476#endif
477
478static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
479{
480 unsigned int order = get_order(size);
481 return kmalloc_order_trace(size, flags, order);
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538static __always_inline void *kmalloc(size_t size, gfp_t flags)
539{
540 if (__builtin_constant_p(size)) {
541#ifndef CONFIG_SLOB
542 unsigned int index;
543#endif
544 if (size > KMALLOC_MAX_CACHE_SIZE)
545 return kmalloc_large(size, flags);
546#ifndef CONFIG_SLOB
547 index = kmalloc_index(size);
548
549 if (!index)
550 return ZERO_SIZE_PTR;
551
552 return kmem_cache_alloc_trace(
553 kmalloc_caches[kmalloc_type(flags)][index],
554 flags, size);
555#endif
556 }
557 return __kmalloc(size, flags);
558}
559
560static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
561{
562#ifndef CONFIG_SLOB
563 if (__builtin_constant_p(size) &&
564 size <= KMALLOC_MAX_CACHE_SIZE) {
565 unsigned int i = kmalloc_index(size);
566
567 if (!i)
568 return ZERO_SIZE_PTR;
569
570 return kmem_cache_alloc_node_trace(
571 kmalloc_caches[kmalloc_type(flags)][i],
572 flags, node, size);
573 }
574#endif
575 return __kmalloc_node(size, flags, node);
576}
577
578
579
580
581
582
583
584static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
585{
586 size_t bytes;
587
588 if (unlikely(check_mul_overflow(n, size, &bytes)))
589 return NULL;
590 if (__builtin_constant_p(n) && __builtin_constant_p(size))
591 return kmalloc(bytes, flags);
592 return __kmalloc(bytes, flags);
593}
594
595
596
597
598
599
600
601static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
602{
603 return kmalloc_array(n, size, flags | __GFP_ZERO);
604}
605
606
607
608
609
610
611
612
613
614extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
615#define kmalloc_track_caller(size, flags) \
616 __kmalloc_track_caller(size, flags, _RET_IP_)
617
618static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
619 int node)
620{
621 size_t bytes;
622
623 if (unlikely(check_mul_overflow(n, size, &bytes)))
624 return NULL;
625 if (__builtin_constant_p(n) && __builtin_constant_p(size))
626 return kmalloc_node(bytes, flags, node);
627 return __kmalloc_node(bytes, flags, node);
628}
629
630static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
631{
632 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
633}
634
635
636#ifdef CONFIG_NUMA
637extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
638#define kmalloc_node_track_caller(size, flags, node) \
639 __kmalloc_node_track_caller(size, flags, node, \
640 _RET_IP_)
641
642#else
643
644#define kmalloc_node_track_caller(size, flags, node) \
645 kmalloc_track_caller(size, flags)
646
647#endif
648
649
650
651
652static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
653{
654 return kmem_cache_alloc(k, flags | __GFP_ZERO);
655}
656
657
658
659
660
661
662static inline void *kzalloc(size_t size, gfp_t flags)
663{
664 return kmalloc(size, flags | __GFP_ZERO);
665}
666
667
668
669
670
671
672
673static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
674{
675 return kmalloc_node(size, flags | __GFP_ZERO, node);
676}
677
678unsigned int kmem_cache_size(struct kmem_cache *s);
679void __init kmem_cache_init_late(void);
680
681#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
682int slab_prepare_cpu(unsigned int cpu);
683int slab_dead_cpu(unsigned int cpu);
684#else
685#define slab_prepare_cpu NULL
686#define slab_dead_cpu NULL
687#endif
688
689#endif
690