1
2
3
4
5
6
7
8
9
10
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/gfp.h>
16#include <linux/overflow.h>
17#include <linux/types.h>
18#include <linux/workqueue.h>
19
20
21
22
23
24
25
26#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
27
28#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
29
30#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
31
32#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
33
34#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
35
36#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
37
38#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
78
79#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
80
81#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
82
83
84#ifdef CONFIG_DEBUG_OBJECTS
85# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
86#else
87# define SLAB_DEBUG_OBJECTS 0
88#endif
89
90
91#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
92
93
94#ifdef CONFIG_FAILSLAB
95# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
96#else
97# define SLAB_FAILSLAB 0
98#endif
99
100#ifdef CONFIG_MEMCG_KMEM
101# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
102#else
103# define SLAB_ACCOUNT 0
104#endif
105
106#ifdef CONFIG_KASAN
107#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
108#else
109#define SLAB_KASAN 0
110#endif
111
112
113
114#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
115#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
116
117
118
119
120
121
122
123
124#define ZERO_SIZE_PTR ((void *)16)
125
126#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
127 (unsigned long)ZERO_SIZE_PTR)
128
129#include <linux/kasan.h>
130
131struct mem_cgroup;
132
133
134
135void __init kmem_cache_init(void);
136bool slab_is_available(void);
137
138extern bool usercopy_fallback;
139
140struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
141 unsigned int align, slab_flags_t flags,
142 void (*ctor)(void *));
143struct kmem_cache *kmem_cache_create_usercopy(const char *name,
144 unsigned int size, unsigned int align,
145 slab_flags_t flags,
146 unsigned int useroffset, unsigned int usersize,
147 void (*ctor)(void *));
148void kmem_cache_destroy(struct kmem_cache *);
149int kmem_cache_shrink(struct kmem_cache *);
150
151void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
152void memcg_deactivate_kmem_caches(struct mem_cgroup *);
153void memcg_destroy_kmem_caches(struct mem_cgroup *);
154
155
156
157
158
159
160
161
162
163#define KMEM_CACHE(__struct, __flags) \
164 kmem_cache_create(#__struct, sizeof(struct __struct), \
165 __alignof__(struct __struct), (__flags), NULL)
166
167
168
169
170
171#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
172 kmem_cache_create_usercopy(#__struct, \
173 sizeof(struct __struct), \
174 __alignof__(struct __struct), (__flags), \
175 offsetof(struct __struct, __field), \
176 sizeof_field(struct __struct, __field), NULL)
177
178
179
180
181void * __must_check __krealloc(const void *, size_t, gfp_t);
182void * __must_check krealloc(const void *, size_t, gfp_t);
183void kfree(const void *);
184void kzfree(const void *);
185size_t ksize(const void *);
186
187#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
188void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
189 bool to_user);
190#else
191static inline void __check_heap_object(const void *ptr, unsigned long n,
192 struct page *page, bool to_user) { }
193#endif
194
195
196
197
198
199
200#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
201#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
202#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
203#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
204#else
205#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
206#endif
207
208
209
210
211
212
213#ifndef ARCH_SLAB_MINALIGN
214#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
215#endif
216
217
218
219
220
221
222#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
223#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
224#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
225
226
227
228
229
230#ifdef CONFIG_SLAB
231
232
233
234
235
236
237
238
239
240#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
241 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
242#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
243#ifndef KMALLOC_SHIFT_LOW
244#define KMALLOC_SHIFT_LOW 5
245#endif
246#endif
247
248#ifdef CONFIG_SLUB
249
250
251
252
253#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
254#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
255#ifndef KMALLOC_SHIFT_LOW
256#define KMALLOC_SHIFT_LOW 3
257#endif
258#endif
259
260#ifdef CONFIG_SLOB
261
262
263
264
265
266#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
267#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
268#ifndef KMALLOC_SHIFT_LOW
269#define KMALLOC_SHIFT_LOW 3
270#endif
271#endif
272
273
274#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
275
276#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
277
278#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
279
280
281
282
283#ifndef KMALLOC_MIN_SIZE
284#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
285#endif
286
287
288
289
290
291
292
293
294
295#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
296 (KMALLOC_MIN_SIZE) : 16)
297
298#ifndef CONFIG_SLOB
299extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
300#ifdef CONFIG_ZONE_DMA
301extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
302#endif
303
304
305
306
307
308
309
310
311
312static __always_inline unsigned int kmalloc_index(size_t size)
313{
314 if (!size)
315 return 0;
316
317 if (size <= KMALLOC_MIN_SIZE)
318 return KMALLOC_SHIFT_LOW;
319
320 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
321 return 1;
322 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
323 return 2;
324 if (size <= 8) return 3;
325 if (size <= 16) return 4;
326 if (size <= 32) return 5;
327 if (size <= 64) return 6;
328 if (size <= 128) return 7;
329 if (size <= 256) return 8;
330 if (size <= 512) return 9;
331 if (size <= 1024) return 10;
332 if (size <= 2 * 1024) return 11;
333 if (size <= 4 * 1024) return 12;
334 if (size <= 8 * 1024) return 13;
335 if (size <= 16 * 1024) return 14;
336 if (size <= 32 * 1024) return 15;
337 if (size <= 64 * 1024) return 16;
338 if (size <= 128 * 1024) return 17;
339 if (size <= 256 * 1024) return 18;
340 if (size <= 512 * 1024) return 19;
341 if (size <= 1024 * 1024) return 20;
342 if (size <= 2 * 1024 * 1024) return 21;
343 if (size <= 4 * 1024 * 1024) return 22;
344 if (size <= 8 * 1024 * 1024) return 23;
345 if (size <= 16 * 1024 * 1024) return 24;
346 if (size <= 32 * 1024 * 1024) return 25;
347 if (size <= 64 * 1024 * 1024) return 26;
348 BUG();
349
350
351 return -1;
352}
353#endif
354
355void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
356void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
357void kmem_cache_free(struct kmem_cache *, void *);
358
359
360
361
362
363
364
365
366void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
367int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
368
369
370
371
372
373static __always_inline void kfree_bulk(size_t size, void **p)
374{
375 kmem_cache_free_bulk(NULL, size, p);
376}
377
378#ifdef CONFIG_NUMA
379void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
380void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
381#else
382static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
383{
384 return __kmalloc(size, flags);
385}
386
387static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
388{
389 return kmem_cache_alloc(s, flags);
390}
391#endif
392
393#ifdef CONFIG_TRACING
394extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
395
396#ifdef CONFIG_NUMA
397extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
398 gfp_t gfpflags,
399 int node, size_t size) __assume_slab_alignment __malloc;
400#else
401static __always_inline void *
402kmem_cache_alloc_node_trace(struct kmem_cache *s,
403 gfp_t gfpflags,
404 int node, size_t size)
405{
406 return kmem_cache_alloc_trace(s, gfpflags, size);
407}
408#endif
409
410#else
411static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
412 gfp_t flags, size_t size)
413{
414 void *ret = kmem_cache_alloc(s, flags);
415
416 kasan_kmalloc(s, ret, size, flags);
417 return ret;
418}
419
420static __always_inline void *
421kmem_cache_alloc_node_trace(struct kmem_cache *s,
422 gfp_t gfpflags,
423 int node, size_t size)
424{
425 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
426
427 kasan_kmalloc(s, ret, size, gfpflags);
428 return ret;
429}
430#endif
431
432extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
433
434#ifdef CONFIG_TRACING
435extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
436#else
437static __always_inline void *
438kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
439{
440 return kmalloc_order(size, flags, order);
441}
442#endif
443
444static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
445{
446 unsigned int order = get_order(size);
447 return kmalloc_order_trace(size, flags, order);
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501static __always_inline void *kmalloc(size_t size, gfp_t flags)
502{
503 if (__builtin_constant_p(size)) {
504 if (size > KMALLOC_MAX_CACHE_SIZE)
505 return kmalloc_large(size, flags);
506#ifndef CONFIG_SLOB
507 if (!(flags & GFP_DMA)) {
508 unsigned int index = kmalloc_index(size);
509
510 if (!index)
511 return ZERO_SIZE_PTR;
512
513 return kmem_cache_alloc_trace(kmalloc_caches[index],
514 flags, size);
515 }
516#endif
517 }
518 return __kmalloc(size, flags);
519}
520
521
522
523
524
525
526static __always_inline unsigned int kmalloc_size(unsigned int n)
527{
528#ifndef CONFIG_SLOB
529 if (n > 2)
530 return 1U << n;
531
532 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
533 return 96;
534
535 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
536 return 192;
537#endif
538 return 0;
539}
540
541static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
542{
543#ifndef CONFIG_SLOB
544 if (__builtin_constant_p(size) &&
545 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
546 unsigned int i = kmalloc_index(size);
547
548 if (!i)
549 return ZERO_SIZE_PTR;
550
551 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
552 flags, node, size);
553 }
554#endif
555 return __kmalloc_node(size, flags, node);
556}
557
558struct memcg_cache_array {
559 struct rcu_head rcu;
560 struct kmem_cache *entries[0];
561};
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596struct memcg_cache_params {
597 struct kmem_cache *root_cache;
598 union {
599 struct {
600 struct memcg_cache_array __rcu *memcg_caches;
601 struct list_head __root_caches_node;
602 struct list_head children;
603 bool dying;
604 };
605 struct {
606 struct mem_cgroup *memcg;
607 struct list_head children_node;
608 struct list_head kmem_caches_node;
609
610 void (*deact_fn)(struct kmem_cache *);
611 union {
612 struct rcu_head deact_rcu_head;
613 struct work_struct deact_work;
614 };
615 };
616 };
617};
618
619int memcg_update_all_caches(int num_memcgs);
620
621
622
623
624
625
626
627static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
628{
629 size_t bytes;
630
631 if (unlikely(check_mul_overflow(n, size, &bytes)))
632 return NULL;
633 if (__builtin_constant_p(n) && __builtin_constant_p(size))
634 return kmalloc(bytes, flags);
635 return __kmalloc(bytes, flags);
636}
637
638
639
640
641
642
643
644static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
645{
646 return kmalloc_array(n, size, flags | __GFP_ZERO);
647}
648
649
650
651
652
653
654
655
656
657extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
658#define kmalloc_track_caller(size, flags) \
659 __kmalloc_track_caller(size, flags, _RET_IP_)
660
661static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
662 int node)
663{
664 size_t bytes;
665
666 if (unlikely(check_mul_overflow(n, size, &bytes)))
667 return NULL;
668 if (__builtin_constant_p(n) && __builtin_constant_p(size))
669 return kmalloc_node(bytes, flags, node);
670 return __kmalloc_node(bytes, flags, node);
671}
672
673static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
674{
675 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
676}
677
678
679#ifdef CONFIG_NUMA
680extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
681#define kmalloc_node_track_caller(size, flags, node) \
682 __kmalloc_node_track_caller(size, flags, node, \
683 _RET_IP_)
684
685#else
686
687#define kmalloc_node_track_caller(size, flags, node) \
688 kmalloc_track_caller(size, flags)
689
690#endif
691
692
693
694
695static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
696{
697 return kmem_cache_alloc(k, flags | __GFP_ZERO);
698}
699
700
701
702
703
704
705static inline void *kzalloc(size_t size, gfp_t flags)
706{
707 return kmalloc(size, flags | __GFP_ZERO);
708}
709
710
711
712
713
714
715
716static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
717{
718 return kmalloc_node(size, flags | __GFP_ZERO, node);
719}
720
721unsigned int kmem_cache_size(struct kmem_cache *s);
722void __init kmem_cache_init_late(void);
723
724#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
725int slab_prepare_cpu(unsigned int cpu);
726int slab_dead_cpu(unsigned int cpu);
727#else
728#define slab_prepare_cpu NULL
729#define slab_dead_cpu NULL
730#endif
731
732#endif
733