1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19#include <linux/percpu-refcount.h>
20
21
22
23
24
25
26
27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
28
29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
30
31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
32
33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
34
35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
36
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
38
39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
40
41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
81
82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
83
84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
85
86
87#ifdef CONFIG_DEBUG_OBJECTS
88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
89#else
90# define SLAB_DEBUG_OBJECTS 0
91#endif
92
93
94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
95
96
97#ifdef CONFIG_FAILSLAB
98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
99#else
100# define SLAB_FAILSLAB 0
101#endif
102
103#ifdef CONFIG_MEMCG_KMEM
104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
105#else
106# define SLAB_ACCOUNT 0
107#endif
108
109#ifdef CONFIG_KASAN
110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
111#else
112#define SLAB_KASAN 0
113#endif
114
115
116
117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
119
120
121#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
122
123
124
125
126
127
128
129
130
131#define ZERO_SIZE_PTR ((void *)16)
132
133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
134 (unsigned long)ZERO_SIZE_PTR)
135
136#include <linux/kasan.h>
137
138struct mem_cgroup;
139
140
141
142void __init kmem_cache_init(void);
143bool slab_is_available(void);
144
145extern bool usercopy_fallback;
146
147struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
148 unsigned int align, slab_flags_t flags,
149 void (*ctor)(void *));
150struct kmem_cache *kmem_cache_create_usercopy(const char *name,
151 unsigned int size, unsigned int align,
152 slab_flags_t flags,
153 unsigned int useroffset, unsigned int usersize,
154 void (*ctor)(void *));
155void kmem_cache_destroy(struct kmem_cache *);
156int kmem_cache_shrink(struct kmem_cache *);
157
158
159
160
161
162
163
164
165
166#define KMEM_CACHE(__struct, __flags) \
167 kmem_cache_create(#__struct, sizeof(struct __struct), \
168 __alignof__(struct __struct), (__flags), NULL)
169
170
171
172
173
174#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
175 kmem_cache_create_usercopy(#__struct, \
176 sizeof(struct __struct), \
177 __alignof__(struct __struct), (__flags), \
178 offsetof(struct __struct, __field), \
179 sizeof_field(struct __struct, __field), NULL)
180
181
182
183
184void * __must_check krealloc(const void *, size_t, gfp_t);
185void kfree(const void *);
186void kfree_sensitive(const void *);
187size_t __ksize(const void *);
188size_t ksize(const void *);
189
190#define kzfree(x) kfree_sensitive(x)
191
192#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
193void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
194 bool to_user);
195#else
196static inline void __check_heap_object(const void *ptr, unsigned long n,
197 struct page *page, bool to_user) { }
198#endif
199
200
201
202
203
204
205#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
206#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
207#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
208#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
209#else
210#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
211#endif
212
213
214
215
216
217
218#ifndef ARCH_SLAB_MINALIGN
219#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
220#endif
221
222
223
224
225
226
227#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
228#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
229#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
230
231
232
233
234
235#ifdef CONFIG_SLAB
236
237
238
239
240
241
242
243
244
245#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
246 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
247#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
248#ifndef KMALLOC_SHIFT_LOW
249#define KMALLOC_SHIFT_LOW 5
250#endif
251#endif
252
253#ifdef CONFIG_SLUB
254
255
256
257
258#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
259#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
260#ifndef KMALLOC_SHIFT_LOW
261#define KMALLOC_SHIFT_LOW 3
262#endif
263#endif
264
265#ifdef CONFIG_SLOB
266
267
268
269
270
271#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
272#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
273#ifndef KMALLOC_SHIFT_LOW
274#define KMALLOC_SHIFT_LOW 3
275#endif
276#endif
277
278
279#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
280
281#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
282
283#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
284
285
286
287
288#ifndef KMALLOC_MIN_SIZE
289#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
290#endif
291
292
293
294
295
296
297
298
299
300#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
301 (KMALLOC_MIN_SIZE) : 16)
302
303
304
305
306
307enum kmalloc_cache_type {
308 KMALLOC_NORMAL = 0,
309 KMALLOC_RECLAIM,
310#ifdef CONFIG_ZONE_DMA
311 KMALLOC_DMA,
312#endif
313 NR_KMALLOC_TYPES
314};
315
316#ifndef CONFIG_SLOB
317extern struct kmem_cache *
318kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
319
320static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
321{
322#ifdef CONFIG_ZONE_DMA
323
324
325
326
327 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0))
328 return KMALLOC_NORMAL;
329
330
331
332
333
334 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM;
335#else
336 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL;
337#endif
338}
339
340
341
342
343
344
345
346
347
348static __always_inline unsigned int kmalloc_index(size_t size)
349{
350 if (!size)
351 return 0;
352
353 if (size <= KMALLOC_MIN_SIZE)
354 return KMALLOC_SHIFT_LOW;
355
356 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
357 return 1;
358 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
359 return 2;
360 if (size <= 8) return 3;
361 if (size <= 16) return 4;
362 if (size <= 32) return 5;
363 if (size <= 64) return 6;
364 if (size <= 128) return 7;
365 if (size <= 256) return 8;
366 if (size <= 512) return 9;
367 if (size <= 1024) return 10;
368 if (size <= 2 * 1024) return 11;
369 if (size <= 4 * 1024) return 12;
370 if (size <= 8 * 1024) return 13;
371 if (size <= 16 * 1024) return 14;
372 if (size <= 32 * 1024) return 15;
373 if (size <= 64 * 1024) return 16;
374 if (size <= 128 * 1024) return 17;
375 if (size <= 256 * 1024) return 18;
376 if (size <= 512 * 1024) return 19;
377 if (size <= 1024 * 1024) return 20;
378 if (size <= 2 * 1024 * 1024) return 21;
379 if (size <= 4 * 1024 * 1024) return 22;
380 if (size <= 8 * 1024 * 1024) return 23;
381 if (size <= 16 * 1024 * 1024) return 24;
382 if (size <= 32 * 1024 * 1024) return 25;
383 if (size <= 64 * 1024 * 1024) return 26;
384 BUG();
385
386
387 return -1;
388}
389#endif
390
391void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
392void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
393void kmem_cache_free(struct kmem_cache *, void *);
394
395
396
397
398
399
400
401
402void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
403int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
404
405
406
407
408
409static __always_inline void kfree_bulk(size_t size, void **p)
410{
411 kmem_cache_free_bulk(NULL, size, p);
412}
413
414#ifdef CONFIG_NUMA
415void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
416void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
417#else
418static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
419{
420 return __kmalloc(size, flags);
421}
422
423static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
424{
425 return kmem_cache_alloc(s, flags);
426}
427#endif
428
429#ifdef CONFIG_TRACING
430extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
431
432#ifdef CONFIG_NUMA
433extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
434 gfp_t gfpflags,
435 int node, size_t size) __assume_slab_alignment __malloc;
436#else
437static __always_inline void *
438kmem_cache_alloc_node_trace(struct kmem_cache *s,
439 gfp_t gfpflags,
440 int node, size_t size)
441{
442 return kmem_cache_alloc_trace(s, gfpflags, size);
443}
444#endif
445
446#else
447static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
448 gfp_t flags, size_t size)
449{
450 void *ret = kmem_cache_alloc(s, flags);
451
452 ret = kasan_kmalloc(s, ret, size, flags);
453 return ret;
454}
455
456static __always_inline void *
457kmem_cache_alloc_node_trace(struct kmem_cache *s,
458 gfp_t gfpflags,
459 int node, size_t size)
460{
461 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
462
463 ret = kasan_kmalloc(s, ret, size, gfpflags);
464 return ret;
465}
466#endif
467
468extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
469
470#ifdef CONFIG_TRACING
471extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
472#else
473static __always_inline void *
474kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
475{
476 return kmalloc_order(size, flags, order);
477}
478#endif
479
480static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
481{
482 unsigned int order = get_order(size);
483 return kmalloc_order_trace(size, flags, order);
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540static __always_inline void *kmalloc(size_t size, gfp_t flags)
541{
542 if (__builtin_constant_p(size)) {
543#ifndef CONFIG_SLOB
544 unsigned int index;
545#endif
546 if (size > KMALLOC_MAX_CACHE_SIZE)
547 return kmalloc_large(size, flags);
548#ifndef CONFIG_SLOB
549 index = kmalloc_index(size);
550
551 if (!index)
552 return ZERO_SIZE_PTR;
553
554 return kmem_cache_alloc_trace(
555 kmalloc_caches[kmalloc_type(flags)][index],
556 flags, size);
557#endif
558 }
559 return __kmalloc(size, flags);
560}
561
562static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
563{
564#ifndef CONFIG_SLOB
565 if (__builtin_constant_p(size) &&
566 size <= KMALLOC_MAX_CACHE_SIZE) {
567 unsigned int i = kmalloc_index(size);
568
569 if (!i)
570 return ZERO_SIZE_PTR;
571
572 return kmem_cache_alloc_node_trace(
573 kmalloc_caches[kmalloc_type(flags)][i],
574 flags, node, size);
575 }
576#endif
577 return __kmalloc_node(size, flags, node);
578}
579
580
581
582
583
584
585
586static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
587{
588 size_t bytes;
589
590 if (unlikely(check_mul_overflow(n, size, &bytes)))
591 return NULL;
592 if (__builtin_constant_p(n) && __builtin_constant_p(size))
593 return kmalloc(bytes, flags);
594 return __kmalloc(bytes, flags);
595}
596
597
598
599
600
601
602
603static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
604{
605 return kmalloc_array(n, size, flags | __GFP_ZERO);
606}
607
608
609
610
611
612
613
614
615
616extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
617#define kmalloc_track_caller(size, flags) \
618 __kmalloc_track_caller(size, flags, _RET_IP_)
619
620static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
621 int node)
622{
623 size_t bytes;
624
625 if (unlikely(check_mul_overflow(n, size, &bytes)))
626 return NULL;
627 if (__builtin_constant_p(n) && __builtin_constant_p(size))
628 return kmalloc_node(bytes, flags, node);
629 return __kmalloc_node(bytes, flags, node);
630}
631
632static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
633{
634 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
635}
636
637
638#ifdef CONFIG_NUMA
639extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
640#define kmalloc_node_track_caller(size, flags, node) \
641 __kmalloc_node_track_caller(size, flags, node, \
642 _RET_IP_)
643
644#else
645
646#define kmalloc_node_track_caller(size, flags, node) \
647 kmalloc_track_caller(size, flags)
648
649#endif
650
651
652
653
654static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
655{
656 return kmem_cache_alloc(k, flags | __GFP_ZERO);
657}
658
659
660
661
662
663
664static inline void *kzalloc(size_t size, gfp_t flags)
665{
666 return kmalloc(size, flags | __GFP_ZERO);
667}
668
669
670
671
672
673
674
675static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
676{
677 return kmalloc_node(size, flags | __GFP_ZERO, node);
678}
679
680unsigned int kmem_cache_size(struct kmem_cache *s);
681void __init kmem_cache_init_late(void);
682
683#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
684int slab_prepare_cpu(unsigned int cpu);
685int slab_dead_cpu(unsigned int cpu);
686#else
687#define slab_prepare_cpu NULL
688#define slab_dead_cpu NULL
689#endif
690
691#endif
692