1#ifndef MM_SLAB_H
2#define MM_SLAB_H
3
4
5
6
7#ifdef CONFIG_SLOB
8
9
10
11
12
13
14
15
16
17
18
19struct kmem_cache {
20 unsigned int object_size;
21 unsigned int size;
22 unsigned int align;
23 unsigned long flags;
24 const char *name;
25 int refcount;
26 void (*ctor)(void *);
27 struct list_head list;
28};
29
30#endif
31
32#ifdef CONFIG_SLAB
33#include <linux/slab_def.h>
34#endif
35
36#ifdef CONFIG_SLUB
37#include <linux/slub_def.h>
38#endif
39
40#include <linux/memcontrol.h>
41#include <linux/fault-inject.h>
42#include <linux/kmemcheck.h>
43#include <linux/kasan.h>
44#include <linux/kmemleak.h>
45#include <linux/random.h>
46
47
48
49
50
51
52
53
54
55enum slab_state {
56 DOWN,
57 PARTIAL,
58 PARTIAL_NODE,
59 UP,
60 FULL
61};
62
63extern enum slab_state slab_state;
64
65
66extern struct mutex slab_mutex;
67
68
69extern struct list_head slab_caches;
70
71
72extern struct kmem_cache *kmem_cache;
73
74
75extern const struct kmalloc_info_struct {
76 const char *name;
77 unsigned long size;
78} kmalloc_info[];
79
80unsigned long calculate_alignment(unsigned long flags,
81 unsigned long align, unsigned long size);
82
83#ifndef CONFIG_SLOB
84
85void setup_kmalloc_cache_index_table(void);
86void create_kmalloc_caches(unsigned long);
87
88
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
90#endif
91
92
93
94extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
95
96extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
97 unsigned long flags);
98extern void create_boot_cache(struct kmem_cache *, const char *name,
99 size_t size, unsigned long flags);
100
101int slab_unmergeable(struct kmem_cache *s);
102struct kmem_cache *find_mergeable(size_t size, size_t align,
103 unsigned long flags, const char *name, void (*ctor)(void *));
104#ifndef CONFIG_SLOB
105struct kmem_cache *
106__kmem_cache_alias(const char *name, size_t size, size_t align,
107 unsigned long flags, void (*ctor)(void *));
108
109unsigned long kmem_cache_flags(unsigned long object_size,
110 unsigned long flags, const char *name,
111 void (*ctor)(void *));
112#else
113static inline struct kmem_cache *
114__kmem_cache_alias(const char *name, size_t size, size_t align,
115 unsigned long flags, void (*ctor)(void *))
116{ return NULL; }
117
118static inline unsigned long kmem_cache_flags(unsigned long object_size,
119 unsigned long flags, const char *name,
120 void (*ctor)(void *))
121{
122 return flags;
123}
124#endif
125
126
127
128#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
129 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
130
131#if defined(CONFIG_DEBUG_SLAB)
132#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
133#elif defined(CONFIG_SLUB_DEBUG)
134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
135 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
136#else
137#define SLAB_DEBUG_FLAGS (0)
138#endif
139
140#if defined(CONFIG_SLAB)
141#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
142 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
143 SLAB_NOTRACK | SLAB_ACCOUNT)
144#elif defined(CONFIG_SLUB)
145#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
146 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
147#else
148#define SLAB_CACHE_FLAGS (0)
149#endif
150
151
152#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
153
154
155#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
156 SLAB_RED_ZONE | \
157 SLAB_POISON | \
158 SLAB_STORE_USER | \
159 SLAB_TRACE | \
160 SLAB_CONSISTENCY_CHECKS | \
161 SLAB_MEM_SPREAD | \
162 SLAB_NOLEAKTRACE | \
163 SLAB_RECLAIM_ACCOUNT | \
164 SLAB_TEMPORARY | \
165 SLAB_NOTRACK | \
166 SLAB_ACCOUNT)
167
168int __kmem_cache_shutdown(struct kmem_cache *);
169void __kmem_cache_release(struct kmem_cache *);
170int __kmem_cache_shrink(struct kmem_cache *);
171void __kmemcg_cache_deactivate(struct kmem_cache *s);
172void slab_kmem_cache_release(struct kmem_cache *);
173
174struct seq_file;
175struct file;
176
177struct slabinfo {
178 unsigned long active_objs;
179 unsigned long num_objs;
180 unsigned long active_slabs;
181 unsigned long num_slabs;
182 unsigned long shared_avail;
183 unsigned int limit;
184 unsigned int batchcount;
185 unsigned int shared;
186 unsigned int objects_per_slab;
187 unsigned int cache_order;
188};
189
190void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
191void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
192ssize_t slabinfo_write(struct file *file, const char __user *buffer,
193 size_t count, loff_t *ppos);
194
195
196
197
198
199
200
201void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
202int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
203
204#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
205
206
207extern struct list_head slab_root_caches;
208#define root_caches_node memcg_params.__root_caches_node
209
210
211
212
213
214#define for_each_memcg_cache(iter, root) \
215 list_for_each_entry(iter, &(root)->memcg_params.children, \
216 memcg_params.children_node)
217
218static inline bool is_root_cache(struct kmem_cache *s)
219{
220 return !s->memcg_params.root_cache;
221}
222
223static inline bool slab_equal_or_root(struct kmem_cache *s,
224 struct kmem_cache *p)
225{
226 return p == s || p == s->memcg_params.root_cache;
227}
228
229
230
231
232
233
234static inline const char *cache_name(struct kmem_cache *s)
235{
236 if (!is_root_cache(s))
237 s = s->memcg_params.root_cache;
238 return s->name;
239}
240
241
242
243
244
245
246static inline struct kmem_cache *
247cache_from_memcg_idx(struct kmem_cache *s, int idx)
248{
249 struct kmem_cache *cachep;
250 struct memcg_cache_array *arr;
251
252 rcu_read_lock();
253 arr = rcu_dereference(s->memcg_params.memcg_caches);
254
255
256
257
258
259
260 cachep = lockless_dereference(arr->entries[idx]);
261 rcu_read_unlock();
262
263 return cachep;
264}
265
266static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
267{
268 if (is_root_cache(s))
269 return s;
270 return s->memcg_params.root_cache;
271}
272
273static __always_inline int memcg_charge_slab(struct page *page,
274 gfp_t gfp, int order,
275 struct kmem_cache *s)
276{
277 int ret;
278
279 if (!memcg_kmem_enabled())
280 return 0;
281 if (is_root_cache(s))
282 return 0;
283
284 ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
285 if (ret)
286 return ret;
287
288 memcg_kmem_update_page_stat(page,
289 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
290 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
291 1 << order);
292 return 0;
293}
294
295static __always_inline void memcg_uncharge_slab(struct page *page, int order,
296 struct kmem_cache *s)
297{
298 if (!memcg_kmem_enabled())
299 return;
300
301 memcg_kmem_update_page_stat(page,
302 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
303 MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
304 -(1 << order));
305 memcg_kmem_uncharge(page, order);
306}
307
308extern void slab_init_memcg_params(struct kmem_cache *);
309extern void memcg_link_cache(struct kmem_cache *s);
310extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
311 void (*deact_fn)(struct kmem_cache *));
312
313#else
314
315
316#define slab_root_caches slab_caches
317#define root_caches_node list
318
319#define for_each_memcg_cache(iter, root) \
320 for ((void)(iter), (void)(root); 0; )
321
322static inline bool is_root_cache(struct kmem_cache *s)
323{
324 return true;
325}
326
327static inline bool slab_equal_or_root(struct kmem_cache *s,
328 struct kmem_cache *p)
329{
330 return true;
331}
332
333static inline const char *cache_name(struct kmem_cache *s)
334{
335 return s->name;
336}
337
338static inline struct kmem_cache *
339cache_from_memcg_idx(struct kmem_cache *s, int idx)
340{
341 return NULL;
342}
343
344static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
345{
346 return s;
347}
348
349static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
350 struct kmem_cache *s)
351{
352 return 0;
353}
354
355static inline void memcg_uncharge_slab(struct page *page, int order,
356 struct kmem_cache *s)
357{
358}
359
360static inline void slab_init_memcg_params(struct kmem_cache *s)
361{
362}
363
364static inline void memcg_link_cache(struct kmem_cache *s)
365{
366}
367
368#endif
369
370static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
371{
372 struct kmem_cache *cachep;
373 struct page *page;
374
375
376
377
378
379
380
381
382 if (!memcg_kmem_enabled() &&
383 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
384 return s;
385
386 page = virt_to_head_page(x);
387 cachep = page->slab_cache;
388 if (slab_equal_or_root(cachep, s))
389 return cachep;
390
391 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
392 __func__, s->name, cachep->name);
393 WARN_ON_ONCE(1);
394 return s;
395}
396
397static inline size_t slab_ksize(const struct kmem_cache *s)
398{
399#ifndef CONFIG_SLUB
400 return s->object_size;
401
402#else
403# ifdef CONFIG_SLUB_DEBUG
404
405
406
407
408 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
409 return s->object_size;
410# endif
411 if (s->flags & SLAB_KASAN)
412 return s->object_size;
413
414
415
416
417
418 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
419 return s->inuse;
420
421
422
423 return s->size;
424#endif
425}
426
427static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
428 gfp_t flags)
429{
430 flags &= gfp_allowed_mask;
431 lockdep_trace_alloc(flags);
432 might_sleep_if(gfpflags_allow_blocking(flags));
433
434 if (should_failslab(s, flags))
435 return NULL;
436
437 if (memcg_kmem_enabled() &&
438 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
439 return memcg_kmem_get_cache(s);
440
441 return s;
442}
443
444static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
445 size_t size, void **p)
446{
447 size_t i;
448
449 flags &= gfp_allowed_mask;
450 for (i = 0; i < size; i++) {
451 void *object = p[i];
452
453 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
454 kmemleak_alloc_recursive(object, s->object_size, 1,
455 s->flags, flags);
456 kasan_slab_alloc(s, object, flags);
457 }
458
459 if (memcg_kmem_enabled())
460 memcg_kmem_put_cache(s);
461}
462
463#ifndef CONFIG_SLOB
464
465
466
467struct kmem_cache_node {
468 spinlock_t list_lock;
469
470#ifdef CONFIG_SLAB
471 struct list_head slabs_partial;
472 struct list_head slabs_full;
473 struct list_head slabs_free;
474 unsigned long total_slabs;
475 unsigned long free_slabs;
476 unsigned long free_objects;
477 unsigned int free_limit;
478 unsigned int colour_next;
479 struct array_cache *shared;
480 struct alien_cache **alien;
481 unsigned long next_reap;
482 int free_touched;
483#endif
484
485#ifdef CONFIG_SLUB
486 unsigned long nr_partial;
487 struct list_head partial;
488#ifdef CONFIG_SLUB_DEBUG
489 atomic_long_t nr_slabs;
490 atomic_long_t total_objects;
491 struct list_head full;
492#endif
493#endif
494
495};
496
497static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
498{
499 return s->node[node];
500}
501
502
503
504
505
506#define for_each_kmem_cache_node(__s, __node, __n) \
507 for (__node = 0; __node < nr_node_ids; __node++) \
508 if ((__n = get_node(__s, __node)))
509
510#endif
511
512void *slab_start(struct seq_file *m, loff_t *pos);
513void *slab_next(struct seq_file *m, void *p, loff_t *pos);
514void slab_stop(struct seq_file *m, void *p);
515void *memcg_slab_start(struct seq_file *m, loff_t *pos);
516void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
517void memcg_slab_stop(struct seq_file *m, void *p);
518int memcg_slab_show(struct seq_file *m, void *p);
519
520void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
521
522#ifdef CONFIG_SLAB_FREELIST_RANDOM
523int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
524 gfp_t gfp);
525void cache_random_seq_destroy(struct kmem_cache *cachep);
526#else
527static inline int cache_random_seq_create(struct kmem_cache *cachep,
528 unsigned int count, gfp_t gfp)
529{
530 return 0;
531}
532static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
533#endif
534
535#endif
536