1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_SLAB_H
12#define _LINUX_SLAB_H
13
14#include <linux/gfp.h>
15#include <linux/types.h>
16#include <linux/workqueue.h>
17
18
19
20
21
22
23#define SLAB_CONSISTENCY_CHECKS 0x00000100UL
24#define SLAB_RED_ZONE 0x00000400UL
25#define SLAB_POISON 0x00000800UL
26#define SLAB_HWCACHE_ALIGN 0x00002000UL
27#define SLAB_CACHE_DMA 0x00004000UL
28#define SLAB_STORE_USER 0x00010000UL
29#define SLAB_PANIC 0x00040000UL
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67#define SLAB_TYPESAFE_BY_RCU 0x00080000UL
68#define SLAB_MEM_SPREAD 0x00100000UL
69#define SLAB_TRACE 0x00200000UL
70
71
72#ifdef CONFIG_DEBUG_OBJECTS
73# define SLAB_DEBUG_OBJECTS 0x00400000UL
74#else
75# define SLAB_DEBUG_OBJECTS 0x00000000UL
76#endif
77
78#define SLAB_NOLEAKTRACE 0x00800000UL
79
80
81#ifdef CONFIG_KMEMCHECK
82# define SLAB_NOTRACK 0x01000000UL
83#else
84# define SLAB_NOTRACK 0x00000000UL
85#endif
86#ifdef CONFIG_FAILSLAB
87# define SLAB_FAILSLAB 0x02000000UL
88#else
89# define SLAB_FAILSLAB 0x00000000UL
90#endif
91#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
92# define SLAB_ACCOUNT 0x04000000UL
93#else
94# define SLAB_ACCOUNT 0x00000000UL
95#endif
96
97#ifdef CONFIG_KASAN
98#define SLAB_KASAN 0x08000000UL
99#else
100#define SLAB_KASAN 0x00000000UL
101#endif
102
103
104#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
105#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
106
107
108
109
110
111
112
113
114#define ZERO_SIZE_PTR ((void *)16)
115
116#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
117 (unsigned long)ZERO_SIZE_PTR)
118
119#include <linux/kmemleak.h>
120#include <linux/kasan.h>
121
122struct mem_cgroup;
123
124
125
126void __init kmem_cache_init(void);
127bool slab_is_available(void);
128
129struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
130 unsigned long,
131 void (*)(void *));
132void kmem_cache_destroy(struct kmem_cache *);
133int kmem_cache_shrink(struct kmem_cache *);
134
135void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *);
136void memcg_deactivate_kmem_caches(struct mem_cgroup *);
137void memcg_destroy_kmem_caches(struct mem_cgroup *);
138
139
140
141
142
143
144
145
146
147#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
148 sizeof(struct __struct), __alignof__(struct __struct),\
149 (__flags), NULL)
150
151
152
153
154void * __must_check __krealloc(const void *, size_t, gfp_t);
155void * __must_check krealloc(const void *, size_t, gfp_t);
156void kfree(const void *);
157void kzfree(const void *);
158size_t ksize(const void *);
159
160#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
161const char *__check_heap_object(const void *ptr, unsigned long n,
162 struct page *page);
163#else
164static inline const char *__check_heap_object(const void *ptr,
165 unsigned long n,
166 struct page *page)
167{
168 return NULL;
169}
170#endif
171
172
173
174
175
176
177#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
178#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
179#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
180#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
181#else
182#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
183#endif
184
185
186
187
188
189
190#ifndef ARCH_SLAB_MINALIGN
191#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
192#endif
193
194
195
196
197
198
199#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
200#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
201#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
202
203
204
205
206
207#ifdef CONFIG_SLAB
208
209
210
211
212
213
214
215
216
217#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
218 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
219#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
220#ifndef KMALLOC_SHIFT_LOW
221#define KMALLOC_SHIFT_LOW 5
222#endif
223#endif
224
225#ifdef CONFIG_SLUB
226
227
228
229
230#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
231#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
232#ifndef KMALLOC_SHIFT_LOW
233#define KMALLOC_SHIFT_LOW 3
234#endif
235#endif
236
237#ifdef CONFIG_SLOB
238
239
240
241
242
243#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
244#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
245#ifndef KMALLOC_SHIFT_LOW
246#define KMALLOC_SHIFT_LOW 3
247#endif
248#endif
249
250
251#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
252
253#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
254
255#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
256
257
258
259
260#ifndef KMALLOC_MIN_SIZE
261#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
262#endif
263
264
265
266
267
268
269
270
271
272#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
273 (KMALLOC_MIN_SIZE) : 16)
274
275#ifndef CONFIG_SLOB
276extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
277#ifdef CONFIG_ZONE_DMA
278extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
279#endif
280
281
282
283
284
285
286
287
288
289static __always_inline int kmalloc_index(size_t size)
290{
291 if (!size)
292 return 0;
293
294 if (size <= KMALLOC_MIN_SIZE)
295 return KMALLOC_SHIFT_LOW;
296
297 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
298 return 1;
299 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
300 return 2;
301 if (size <= 8) return 3;
302 if (size <= 16) return 4;
303 if (size <= 32) return 5;
304 if (size <= 64) return 6;
305 if (size <= 128) return 7;
306 if (size <= 256) return 8;
307 if (size <= 512) return 9;
308 if (size <= 1024) return 10;
309 if (size <= 2 * 1024) return 11;
310 if (size <= 4 * 1024) return 12;
311 if (size <= 8 * 1024) return 13;
312 if (size <= 16 * 1024) return 14;
313 if (size <= 32 * 1024) return 15;
314 if (size <= 64 * 1024) return 16;
315 if (size <= 128 * 1024) return 17;
316 if (size <= 256 * 1024) return 18;
317 if (size <= 512 * 1024) return 19;
318 if (size <= 1024 * 1024) return 20;
319 if (size <= 2 * 1024 * 1024) return 21;
320 if (size <= 4 * 1024 * 1024) return 22;
321 if (size <= 8 * 1024 * 1024) return 23;
322 if (size <= 16 * 1024 * 1024) return 24;
323 if (size <= 32 * 1024 * 1024) return 25;
324 if (size <= 64 * 1024 * 1024) return 26;
325 BUG();
326
327
328 return -1;
329}
330#endif
331
332void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
333void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
334void kmem_cache_free(struct kmem_cache *, void *);
335
336
337
338
339
340
341
342
343void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
344int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
345
346
347
348
349
350static __always_inline void kfree_bulk(size_t size, void **p)
351{
352 kmem_cache_free_bulk(NULL, size, p);
353}
354
355#ifdef CONFIG_NUMA
356void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
357void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
358#else
359static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
360{
361 return __kmalloc(size, flags);
362}
363
364static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
365{
366 return kmem_cache_alloc(s, flags);
367}
368#endif
369
370#ifdef CONFIG_TRACING
371extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
372
373#ifdef CONFIG_NUMA
374extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
375 gfp_t gfpflags,
376 int node, size_t size) __assume_slab_alignment __malloc;
377#else
378static __always_inline void *
379kmem_cache_alloc_node_trace(struct kmem_cache *s,
380 gfp_t gfpflags,
381 int node, size_t size)
382{
383 return kmem_cache_alloc_trace(s, gfpflags, size);
384}
385#endif
386
387#else
388static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
389 gfp_t flags, size_t size)
390{
391 void *ret = kmem_cache_alloc(s, flags);
392
393 kasan_kmalloc(s, ret, size, flags);
394 return ret;
395}
396
397static __always_inline void *
398kmem_cache_alloc_node_trace(struct kmem_cache *s,
399 gfp_t gfpflags,
400 int node, size_t size)
401{
402 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
403
404 kasan_kmalloc(s, ret, size, gfpflags);
405 return ret;
406}
407#endif
408
409extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
410
411#ifdef CONFIG_TRACING
412extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
413#else
414static __always_inline void *
415kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
416{
417 return kmalloc_order(size, flags, order);
418}
419#endif
420
421static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
422{
423 unsigned int order = get_order(size);
424 return kmalloc_order_trace(size, flags, order);
425}
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481static __always_inline void *kmalloc(size_t size, gfp_t flags)
482{
483 if (__builtin_constant_p(size)) {
484 if (size > KMALLOC_MAX_CACHE_SIZE)
485 return kmalloc_large(size, flags);
486#ifndef CONFIG_SLOB
487 if (!(flags & GFP_DMA)) {
488 int index = kmalloc_index(size);
489
490 if (!index)
491 return ZERO_SIZE_PTR;
492
493 return kmem_cache_alloc_trace(kmalloc_caches[index],
494 flags, size);
495 }
496#endif
497 }
498 return __kmalloc(size, flags);
499}
500
501
502
503
504
505
506static __always_inline int kmalloc_size(int n)
507{
508#ifndef CONFIG_SLOB
509 if (n > 2)
510 return 1 << n;
511
512 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
513 return 96;
514
515 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
516 return 192;
517#endif
518 return 0;
519}
520
521static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
522{
523#ifndef CONFIG_SLOB
524 if (__builtin_constant_p(size) &&
525 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
526 int i = kmalloc_index(size);
527
528 if (!i)
529 return ZERO_SIZE_PTR;
530
531 return kmem_cache_alloc_node_trace(kmalloc_caches[i],
532 flags, node, size);
533 }
534#endif
535 return __kmalloc_node(size, flags, node);
536}
537
538struct memcg_cache_array {
539 struct rcu_head rcu;
540 struct kmem_cache *entries[0];
541};
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576struct memcg_cache_params {
577 struct kmem_cache *root_cache;
578 union {
579 struct {
580 struct memcg_cache_array __rcu *memcg_caches;
581 struct list_head __root_caches_node;
582 struct list_head children;
583 };
584 struct {
585 struct mem_cgroup *memcg;
586 struct list_head children_node;
587 struct list_head kmem_caches_node;
588
589 void (*deact_fn)(struct kmem_cache *);
590 union {
591 struct rcu_head deact_rcu_head;
592 struct work_struct deact_work;
593 };
594 };
595 };
596};
597
598int memcg_update_all_caches(int num_memcgs);
599
600
601
602
603
604
605
606static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
607{
608 if (size != 0 && n > SIZE_MAX / size)
609 return NULL;
610 if (__builtin_constant_p(n) && __builtin_constant_p(size))
611 return kmalloc(n * size, flags);
612 return __kmalloc(n * size, flags);
613}
614
615
616
617
618
619
620
621static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
622{
623 return kmalloc_array(n, size, flags | __GFP_ZERO);
624}
625
626
627
628
629
630
631
632
633
634extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
635#define kmalloc_track_caller(size, flags) \
636 __kmalloc_track_caller(size, flags, _RET_IP_)
637
638#ifdef CONFIG_NUMA
639extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
640#define kmalloc_node_track_caller(size, flags, node) \
641 __kmalloc_node_track_caller(size, flags, node, \
642 _RET_IP_)
643
644#else
645
646#define kmalloc_node_track_caller(size, flags, node) \
647 kmalloc_track_caller(size, flags)
648
649#endif
650
651
652
653
654static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
655{
656 return kmem_cache_alloc(k, flags | __GFP_ZERO);
657}
658
659
660
661
662
663
664static inline void *kzalloc(size_t size, gfp_t flags)
665{
666 return kmalloc(size, flags | __GFP_ZERO);
667}
668
669
670
671
672
673
674
675static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
676{
677 return kmalloc_node(size, flags | __GFP_ZERO, node);
678}
679
680unsigned int kmem_cache_size(struct kmem_cache *s);
681void __init kmem_cache_init_late(void);
682
683#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
684int slab_prepare_cpu(unsigned int cpu);
685int slab_dead_cpu(unsigned int cpu);
686#else
687#define slab_prepare_cpu NULL
688#define slab_dead_cpu NULL
689#endif
690
691#endif
692