1
2
3
4
5
6
7
8
9#ifndef _LINUX_SLAB_H
10#define _LINUX_SLAB_H
11
12#include <linux/gfp.h>
13#include <linux/types.h>
14#include <linux/workqueue.h>
15
16
17
18
19
20
21#define SLAB_DEBUG_FREE 0x00000100UL
22#define SLAB_RED_ZONE 0x00000400UL
23#define SLAB_POISON 0x00000800UL
24#define SLAB_HWCACHE_ALIGN 0x00002000UL
25#define SLAB_CACHE_DMA 0x00004000UL
26#define SLAB_STORE_USER 0x00010000UL
27#define SLAB_PANIC 0x00040000UL
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56#define SLAB_DESTROY_BY_RCU 0x00080000UL
57#define SLAB_MEM_SPREAD 0x00100000UL
58#define SLAB_TRACE 0x00200000UL
59
60
61#ifdef CONFIG_DEBUG_OBJECTS
62# define SLAB_DEBUG_OBJECTS 0x00400000UL
63#else
64# define SLAB_DEBUG_OBJECTS 0x00000000UL
65#endif
66
67#define SLAB_NOLEAKTRACE 0x00800000UL
68
69
70#ifdef CONFIG_KMEMCHECK
71# define SLAB_NOTRACK 0x01000000UL
72#else
73# define SLAB_NOTRACK 0x00000000UL
74#endif
75#ifdef CONFIG_FAILSLAB
76# define SLAB_FAILSLAB 0x02000000UL
77#else
78# define SLAB_FAILSLAB 0x00000000UL
79#endif
80
81
82#define SLAB_RECLAIM_ACCOUNT 0x00020000UL
83#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT
84
85
86
87
88
89
90
91
92#define ZERO_SIZE_PTR ((void *)16)
93
94#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
95 (unsigned long)ZERO_SIZE_PTR)
96
97
98struct mem_cgroup;
99
100
101
102void __init kmem_cache_init(void);
103int slab_is_available(void);
104
105struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
106 unsigned long,
107 void (*)(void *));
108struct kmem_cache *
109kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
110 unsigned long, void (*)(void *), struct kmem_cache *);
111void kmem_cache_destroy(struct kmem_cache *);
112int kmem_cache_shrink(struct kmem_cache *);
113void kmem_cache_free(struct kmem_cache *, void *);
114
115
116
117
118
119
120
121
122
123#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
124 sizeof(struct __struct), __alignof__(struct __struct),\
125 (__flags), NULL)
126
127
128
129
130void * __must_check __krealloc(const void *, size_t, gfp_t);
131void * __must_check krealloc(const void *, size_t, gfp_t);
132void kfree(const void *);
133void kzfree(const void *);
134size_t ksize(const void *);
135
136
137
138
139
140
141#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
142#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
143#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
144#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
145#else
146#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
147#endif
148
149#ifdef CONFIG_SLOB
150
151
152
153
154
155
156
157
158
159
160
161struct kmem_cache {
162 unsigned int object_size;
163 unsigned int size;
164 unsigned int align;
165 unsigned long flags;
166 const char *name;
167 int refcount;
168 void (*ctor)(void *);
169 struct list_head list;
170};
171
172#define KMALLOC_MAX_SIZE (1UL << 30)
173
174#include <linux/slob_def.h>
175
176#else
177
178
179
180
181
182#ifdef CONFIG_SLAB
183
184
185
186
187
188
189
190
191
192#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
193 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
194#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
195#ifndef KMALLOC_SHIFT_LOW
196#define KMALLOC_SHIFT_LOW 5
197#endif
198#else
199
200
201
202
203#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
204#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
205#ifndef KMALLOC_SHIFT_LOW
206#define KMALLOC_SHIFT_LOW 3
207#endif
208#endif
209
210
211#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
212
213#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
214
215#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
216
217
218
219
220#ifndef KMALLOC_MIN_SIZE
221#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
222#endif
223
224extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
225#ifdef CONFIG_ZONE_DMA
226extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
227#endif
228
229
230
231
232
233
234
235
236
237static __always_inline int kmalloc_index(size_t size)
238{
239 if (!size)
240 return 0;
241
242 if (size <= KMALLOC_MIN_SIZE)
243 return KMALLOC_SHIFT_LOW;
244
245 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
246 return 1;
247 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
248 return 2;
249 if (size <= 8) return 3;
250 if (size <= 16) return 4;
251 if (size <= 32) return 5;
252 if (size <= 64) return 6;
253 if (size <= 128) return 7;
254 if (size <= 256) return 8;
255 if (size <= 512) return 9;
256 if (size <= 1024) return 10;
257 if (size <= 2 * 1024) return 11;
258 if (size <= 4 * 1024) return 12;
259 if (size <= 8 * 1024) return 13;
260 if (size <= 16 * 1024) return 14;
261 if (size <= 32 * 1024) return 15;
262 if (size <= 64 * 1024) return 16;
263 if (size <= 128 * 1024) return 17;
264 if (size <= 256 * 1024) return 18;
265 if (size <= 512 * 1024) return 19;
266 if (size <= 1024 * 1024) return 20;
267 if (size <= 2 * 1024 * 1024) return 21;
268 if (size <= 4 * 1024 * 1024) return 22;
269 if (size <= 8 * 1024 * 1024) return 23;
270 if (size <= 16 * 1024 * 1024) return 24;
271 if (size <= 32 * 1024 * 1024) return 25;
272 if (size <= 64 * 1024 * 1024) return 26;
273 BUG();
274
275
276 return -1;
277}
278
279#ifdef CONFIG_SLAB
280#include <linux/slab_def.h>
281#elif defined(CONFIG_SLUB)
282#include <linux/slub_def.h>
283#else
284#error "Unknown slab allocator"
285#endif
286
287
288
289
290
291
292static __always_inline int kmalloc_size(int n)
293{
294 if (n > 2)
295 return 1 << n;
296
297 if (n == 1 && KMALLOC_MIN_SIZE <= 32)
298 return 96;
299
300 if (n == 2 && KMALLOC_MIN_SIZE <= 64)
301 return 192;
302
303 return 0;
304}
305#endif
306
307
308
309
310
311
312#ifndef ARCH_SLAB_MINALIGN
313#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
314#endif
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337struct memcg_cache_params {
338 bool is_root_cache;
339 union {
340 struct kmem_cache *memcg_caches[0];
341 struct {
342 struct mem_cgroup *memcg;
343 struct list_head list;
344 struct kmem_cache *root_cache;
345 bool dead;
346 atomic_t nr_pages;
347 struct work_struct destroy;
348 };
349 };
350};
351
352int memcg_update_all_caches(int num_memcgs);
353
354struct seq_file;
355int cache_show(struct kmem_cache *s, struct seq_file *m);
356void print_slabinfo_header(struct seq_file *m);
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
410{
411 if (size != 0 && n > SIZE_MAX / size)
412 return NULL;
413 return __kmalloc(n * size, flags);
414}
415
416
417
418
419
420
421
422static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
423{
424 return kmalloc_array(n, size, flags | __GFP_ZERO);
425}
426
427#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
428
429
430
431
432
433
434
435
436
437
438static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
439{
440 return kmalloc(size, flags);
441}
442
443static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
444{
445 return __kmalloc(size, flags);
446}
447
448void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
449
450static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
451 gfp_t flags, int node)
452{
453 return kmem_cache_alloc(cachep, flags);
454}
455#endif
456
457
458
459
460
461
462
463
464
465#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
466 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
467 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
468extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
469#define kmalloc_track_caller(size, flags) \
470 __kmalloc_track_caller(size, flags, _RET_IP_)
471#else
472#define kmalloc_track_caller(size, flags) \
473 __kmalloc(size, flags)
474#endif
475
476#ifdef CONFIG_NUMA
477
478
479
480
481
482
483
484
485#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
486 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
487 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
488extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
489#define kmalloc_node_track_caller(size, flags, node) \
490 __kmalloc_node_track_caller(size, flags, node, \
491 _RET_IP_)
492#else
493#define kmalloc_node_track_caller(size, flags, node) \
494 __kmalloc_node(size, flags, node)
495#endif
496
497#else
498
499#define kmalloc_node_track_caller(size, flags, node) \
500 kmalloc_track_caller(size, flags)
501
502#endif
503
504
505
506
507static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
508{
509 return kmem_cache_alloc(k, flags | __GFP_ZERO);
510}
511
512
513
514
515
516
517static inline void *kzalloc(size_t size, gfp_t flags)
518{
519 return kmalloc(size, flags | __GFP_ZERO);
520}
521
522
523
524
525
526
527
528static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
529{
530 return kmalloc_node(size, flags | __GFP_ZERO, node);
531}
532
533
534
535
536static inline unsigned int kmem_cache_size(struct kmem_cache *s)
537{
538 return s->object_size;
539}
540
541void __init kmem_cache_init_late(void);
542
543#endif
544