1
2
3
4
5
6
7
8
9#ifndef _LINUX_SLAB_H
10#define _LINUX_SLAB_H
11
12#include <linux/gfp.h>
13#include <linux/overflow.h>
14#include <linux/types.h>
15#include <linux/workqueue.h>
16
17
18
19
20
21
22#define SLAB_CONSISTENCY_CHECKS 0x00000100UL
23#define SLAB_RED_ZONE 0x00000400UL
24#define SLAB_POISON 0x00000800UL
25#define SLAB_HWCACHE_ALIGN 0x00002000UL
26#define SLAB_CACHE_DMA 0x00004000UL
27#define SLAB_STORE_USER 0x00010000UL
28#define SLAB_PANIC 0x00040000UL
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57#define SLAB_DESTROY_BY_RCU 0x00080000UL
58#define SLAB_MEM_SPREAD 0x00100000UL
59#define SLAB_TRACE 0x00200000UL
60
61
62#ifdef CONFIG_DEBUG_OBJECTS
63# define SLAB_DEBUG_OBJECTS 0x00400000UL
64#else
65# define SLAB_DEBUG_OBJECTS 0x00000000UL
66#endif
67
68#define SLAB_NOLEAKTRACE 0x00800000UL
69
70
71#ifdef CONFIG_KMEMCHECK
72# define SLAB_NOTRACK 0x01000000UL
73#else
74# define SLAB_NOTRACK 0x00000000UL
75#endif
76#ifdef CONFIG_FAILSLAB
77# define SLAB_FAILSLAB 0x02000000UL
78#else
79# define SLAB_FAILSLAB 0x00000000UL
80#endif
81
82
83#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
84#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
85
86
87
88
89
90
91
92
93#define ZERO_SIZE_PTR ((void *)16)
94
95#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
96 (unsigned long)ZERO_SIZE_PTR)
97
98
99struct mem_cgroup;
100
101
102
103void __init kmem_cache_init(void);
104int slab_is_available(void);
105
106struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
107 unsigned long,
108 void (*)(void *));
109#ifdef CONFIG_MEMCG_KMEM
110struct kmem_cache *kmem_cache_create_memcg(struct mem_cgroup *,
111 struct kmem_cache *);
112#endif
113void kmem_cache_destroy(struct kmem_cache *);
114int kmem_cache_shrink(struct kmem_cache *);
115void kmem_cache_free(struct kmem_cache *, void *);
116
117
118
119
120
121
122
123
124
125#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
126 sizeof(struct __struct), __alignof__(struct __struct),\
127 (__flags), NULL)
128
129
130
131
132void * __must_check __krealloc(const void *, size_t, gfp_t);
133void * __must_check krealloc(const void *, size_t, gfp_t);
134void kfree(const void *);
135void kzfree(const void *);
136size_t ksize(const void *);
137
138#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
139const char *__check_heap_object(const void *ptr, unsigned long n,
140 struct page *page);
141#else
142static inline const char *__check_heap_object(const void *ptr,
143 unsigned long n,
144 struct page *page)
145{
146 return NULL;
147}
148#endif
149
150
151
152
153
154
155#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
156#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
157#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
158#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
159#else
160#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
161#endif
162
163#ifdef CONFIG_SLOB
164
165
166
167
168
169
170
171
172
173
174
175struct kmem_cache {
176 unsigned int object_size;
177 unsigned int size;
178 unsigned int align;
179 unsigned long flags;
180 const char *name;
181 int refcount;
182 void (*ctor)(void *);
183 struct list_head list;
184};
185
186#define KMALLOC_MAX_SIZE (1UL << 30)
187
188#include <linux/slob_def.h>
189
190#else
191
192
193
194
195
196#ifdef CONFIG_SLAB
197
198
199
200
201
202
203
204
205
206#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
207 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
208#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
209#ifndef KMALLOC_SHIFT_LOW
210#define KMALLOC_SHIFT_LOW 5
211#endif
212#else
213
214
215
216
217#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
218#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
219#ifndef KMALLOC_SHIFT_LOW
220#define KMALLOC_SHIFT_LOW 3
221#endif
222#endif
223
224
225#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
226
227#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
228
229#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
230
231
232
233
234#ifndef KMALLOC_MIN_SIZE
235#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
236#endif
237
238extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
239#ifdef CONFIG_ZONE_DMA
240extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
241#endif
242
243
244
245
246
247
248
249
250
251static __always_inline int kmalloc_index(size_t size)
252{
253 if (!size)
254 return 0;
255
256 if (size <= KMALLOC_MIN_SIZE)
257 return KMALLOC_SHIFT_LOW;
258
259 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
260 return 1;
261 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
262 return 2;
263 if (size <= 8) return 3;
264 if (size <= 16) return 4;
265 if (size <= 32) return 5;
266 if (size <= 64) return 6;
267 if (size <= 128) return 7;
268 if (size <= 256) return 8;
269 if (size <= 512) return 9;
270 if (size <= 1024) return 10;
271 if (size <= 2 * 1024) return 11;
272 if (size <= 4 * 1024) return 12;
273 if (size <= 8 * 1024) return 13;
274 if (size <= 16 * 1024) return 14;
275 if (size <= 32 * 1024) return 15;
276 if (size <= 64 * 1024) return 16;
277 if (size <= 128 * 1024) return 17;
278 if (size <= 256 * 1024) return 18;
279 if (size <= 512 * 1024) return 19;
280 if (size <= 1024 * 1024) return 20;
281 if (size <= 2 * 1024 * 1024) return 21;
282 if (size <= 4 * 1024 * 1024) return 22;
283 if (size <= 8 * 1024 * 1024) return 23;
284 if (size <= 16 * 1024 * 1024) return 24;
285 if (size <= 32 * 1024 * 1024) return 25;
286 if (size <= 64 * 1024 * 1024) return 26;
287 BUG();
288
289
290 return -1;
291}
292
293#ifdef CONFIG_SLAB
294#include <linux/slab_def.h>
295#elif defined(CONFIG_SLUB)
296#include <linux/slub_def.h>
297#else
298#error "Unknown slab allocator"
299#endif
300
301
302
303
304
305
306static __always_inline int kmalloc_size(int n)
307{
308 if (n > 2)
309 return 1 << n;
310
311 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
312 return 96;
313
314 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
315 return 192;
316
317 return 0;
318}
319#endif
320
321
322
323
324
325
326#ifndef ARCH_SLAB_MINALIGN
327#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
328#endif
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350struct memcg_cache_params {
351 bool is_root_cache;
352 union {
353#ifdef __GENKSYMS__
354 struct kmem_cache *memcg_caches[0];
355#else
356 struct {
357 struct kmem_cache *memcg_caches[0];
358 struct rcu_head rcu_head;
359 };
360#endif
361 struct {
362 struct mem_cgroup *memcg;
363 struct list_head list;
364 struct kmem_cache *root_cache;
365 RH_KABI_DEPRECATE(bool, dead)
366 atomic_t nr_pages;
367 RH_KABI_DEPRECATE(struct work_struct, destroy)
368 };
369 };
370};
371
372int memcg_update_all_caches(int num_memcgs);
373
374struct seq_file;
375int cache_show(struct kmem_cache *s, struct seq_file *m);
376void print_slabinfo_header(struct seq_file *m);
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
430{
431 size_t bytes;
432
433 if (unlikely(check_mul_overflow(n, size, &bytes)))
434 return NULL;
435 if (__builtin_constant_p(n) && __builtin_constant_p(size))
436 return kmalloc(bytes, flags);
437 return __kmalloc(bytes, flags);
438}
439
440
441
442
443
444
445
446static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
447{
448 return kmalloc_array(n, size, flags | __GFP_ZERO);
449}
450
451
452
453
454
455
456
457
458void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
459int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
460
461#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
462
463
464
465
466
467
468
469
470
471
472static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
473{
474 return kmalloc(size, flags);
475}
476
477static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
478{
479 return __kmalloc(size, flags);
480}
481
482void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
483
484static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
485 gfp_t flags, int node)
486{
487 return kmem_cache_alloc(cachep, flags);
488}
489#endif
490
491
492
493
494
495
496
497
498
499#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
500 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
501 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
502extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
503#define kmalloc_track_caller(size, flags) \
504 __kmalloc_track_caller(size, flags, _RET_IP_)
505#else
506#define kmalloc_track_caller(size, flags) \
507 __kmalloc(size, flags)
508#endif
509
510static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
511 int node)
512{
513 size_t bytes;
514
515 if (unlikely(check_mul_overflow(n, size, &bytes)))
516 return NULL;
517 if (__builtin_constant_p(n) && __builtin_constant_p(size))
518 return kmalloc_node(bytes, flags, node);
519 return __kmalloc_node(bytes, flags, node);
520}
521
522static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
523{
524 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
525}
526
527
528#ifdef CONFIG_NUMA
529
530
531
532
533
534
535
536
537#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
538 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
539 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
540extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
541#define kmalloc_node_track_caller(size, flags, node) \
542 __kmalloc_node_track_caller(size, flags, node, \
543 _RET_IP_)
544#else
545#define kmalloc_node_track_caller(size, flags, node) \
546 __kmalloc_node(size, flags, node)
547#endif
548
549#else
550
551#define kmalloc_node_track_caller(size, flags, node) \
552 kmalloc_track_caller(size, flags)
553
554#endif
555
556
557
558
559static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
560{
561 return kmem_cache_alloc(k, flags | __GFP_ZERO);
562}
563
564
565
566
567
568
569static inline void *kzalloc(size_t size, gfp_t flags)
570{
571 return kmalloc(size, flags | __GFP_ZERO);
572}
573
574
575
576
577
578
579
580static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
581{
582 return kmalloc_node(size, flags | __GFP_ZERO, node);
583}
584
585
586
587
588static inline unsigned int kmem_cache_size(struct kmem_cache *s)
589{
590 return s->object_size;
591}
592
593void __init kmem_cache_init_late(void);
594
595#endif
596