1
2#ifndef MM_SLAB_H
3#define MM_SLAB_H
4
5
6
7
8#ifdef CONFIG_SLOB
9
10
11
12
13
14
15
16
17
18
19
20struct kmem_cache {
21 unsigned int object_size;
22 unsigned int size;
23 unsigned int align;
24 slab_flags_t flags;
25 unsigned int useroffset;
26 unsigned int usersize;
27 const char *name;
28 int refcount;
29 void (*ctor)(void *);
30 struct list_head list;
31};
32
33#endif
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
44#include <linux/fault-inject.h>
45#include <linux/kasan.h>
46#include <linux/kmemleak.h>
47#include <linux/random.h>
48#include <linux/sched/mm.h>
49
50
51
52
53
54
55
56
57
58enum slab_state {
59 DOWN,
60 PARTIAL,
61 PARTIAL_NODE,
62 UP,
63 FULL
64};
65
66extern enum slab_state slab_state;
67
68
69extern struct mutex slab_mutex;
70
71
72extern struct list_head slab_caches;
73
74
75extern struct kmem_cache *kmem_cache;
76
77
78extern const struct kmalloc_info_struct {
79 const char *name;
80 unsigned int size;
81} kmalloc_info[];
82
83#ifndef CONFIG_SLOB
84
85void setup_kmalloc_cache_index_table(void);
86void create_kmalloc_caches(slab_flags_t);
87
88
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
90#endif
91
92
93
94int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
95
96struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
97 slab_flags_t flags, unsigned int useroffset,
98 unsigned int usersize);
99extern void create_boot_cache(struct kmem_cache *, const char *name,
100 unsigned int size, slab_flags_t flags,
101 unsigned int useroffset, unsigned int usersize);
102
103int slab_unmergeable(struct kmem_cache *s);
104struct kmem_cache *find_mergeable(unsigned size, unsigned align,
105 slab_flags_t flags, const char *name, void (*ctor)(void *));
106#ifndef CONFIG_SLOB
107struct kmem_cache *
108__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
109 slab_flags_t flags, void (*ctor)(void *));
110
111slab_flags_t kmem_cache_flags(unsigned int object_size,
112 slab_flags_t flags, const char *name,
113 void (*ctor)(void *));
114#else
115static inline struct kmem_cache *
116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
117 slab_flags_t flags, void (*ctor)(void *))
118{ return NULL; }
119
120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
121 slab_flags_t flags, const char *name,
122 void (*ctor)(void *))
123{
124 return flags;
125}
126#endif
127
128
129
130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132
133#if defined(CONFIG_DEBUG_SLAB)
134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135#elif defined(CONFIG_SLUB_DEBUG)
136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
138#else
139#define SLAB_DEBUG_FLAGS (0)
140#endif
141
142#if defined(CONFIG_SLAB)
143#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
145 SLAB_ACCOUNT)
146#elif defined(CONFIG_SLUB)
147#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
148 SLAB_TEMPORARY | SLAB_ACCOUNT)
149#else
150#define SLAB_CACHE_FLAGS (0)
151#endif
152
153
154#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
155
156
157#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
158 SLAB_RED_ZONE | \
159 SLAB_POISON | \
160 SLAB_STORE_USER | \
161 SLAB_TRACE | \
162 SLAB_CONSISTENCY_CHECKS | \
163 SLAB_MEM_SPREAD | \
164 SLAB_NOLEAKTRACE | \
165 SLAB_RECLAIM_ACCOUNT | \
166 SLAB_TEMPORARY | \
167 SLAB_ACCOUNT)
168
169bool __kmem_cache_empty(struct kmem_cache *);
170int __kmem_cache_shutdown(struct kmem_cache *);
171void __kmem_cache_release(struct kmem_cache *);
172int __kmem_cache_shrink(struct kmem_cache *);
173void __kmemcg_cache_deactivate(struct kmem_cache *s);
174void slab_kmem_cache_release(struct kmem_cache *);
175
176struct seq_file;
177struct file;
178
179struct slabinfo {
180 unsigned long active_objs;
181 unsigned long num_objs;
182 unsigned long active_slabs;
183 unsigned long num_slabs;
184 unsigned long shared_avail;
185 unsigned int limit;
186 unsigned int batchcount;
187 unsigned int shared;
188 unsigned int objects_per_slab;
189 unsigned int cache_order;
190};
191
192void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
193void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
194ssize_t slabinfo_write(struct file *file, const char __user *buffer,
195 size_t count, loff_t *ppos);
196
197
198
199
200
201
202
203void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
204int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
205
206#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
207
208
209extern struct list_head slab_root_caches;
210#define root_caches_node memcg_params.__root_caches_node
211
212
213
214
215
216#define for_each_memcg_cache(iter, root) \
217 list_for_each_entry(iter, &(root)->memcg_params.children, \
218 memcg_params.children_node)
219
220static inline bool is_root_cache(struct kmem_cache *s)
221{
222 return !s->memcg_params.root_cache;
223}
224
225static inline bool slab_equal_or_root(struct kmem_cache *s,
226 struct kmem_cache *p)
227{
228 return p == s || p == s->memcg_params.root_cache;
229}
230
231
232
233
234
235
236static inline const char *cache_name(struct kmem_cache *s)
237{
238 if (!is_root_cache(s))
239 s = s->memcg_params.root_cache;
240 return s->name;
241}
242
243
244
245
246
247
248static inline struct kmem_cache *
249cache_from_memcg_idx(struct kmem_cache *s, int idx)
250{
251 struct kmem_cache *cachep;
252 struct memcg_cache_array *arr;
253
254 rcu_read_lock();
255 arr = rcu_dereference(s->memcg_params.memcg_caches);
256
257
258
259
260
261
262 cachep = READ_ONCE(arr->entries[idx]);
263 rcu_read_unlock();
264
265 return cachep;
266}
267
268static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
269{
270 if (is_root_cache(s))
271 return s;
272 return s->memcg_params.root_cache;
273}
274
275static __always_inline int memcg_charge_slab(struct page *page,
276 gfp_t gfp, int order,
277 struct kmem_cache *s)
278{
279 if (!memcg_kmem_enabled())
280 return 0;
281 if (is_root_cache(s))
282 return 0;
283 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
284}
285
286static __always_inline void memcg_uncharge_slab(struct page *page, int order,
287 struct kmem_cache *s)
288{
289 if (!memcg_kmem_enabled())
290 return;
291 memcg_kmem_uncharge(page, order);
292}
293
294extern void slab_init_memcg_params(struct kmem_cache *);
295extern void memcg_link_cache(struct kmem_cache *s);
296extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
297 void (*deact_fn)(struct kmem_cache *));
298
299#else
300
301
302#define slab_root_caches slab_caches
303#define root_caches_node list
304
305#define for_each_memcg_cache(iter, root) \
306 for ((void)(iter), (void)(root); 0; )
307
308static inline bool is_root_cache(struct kmem_cache *s)
309{
310 return true;
311}
312
313static inline bool slab_equal_or_root(struct kmem_cache *s,
314 struct kmem_cache *p)
315{
316 return true;
317}
318
319static inline const char *cache_name(struct kmem_cache *s)
320{
321 return s->name;
322}
323
324static inline struct kmem_cache *
325cache_from_memcg_idx(struct kmem_cache *s, int idx)
326{
327 return NULL;
328}
329
330static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
331{
332 return s;
333}
334
335static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
336 struct kmem_cache *s)
337{
338 return 0;
339}
340
341static inline void memcg_uncharge_slab(struct page *page, int order,
342 struct kmem_cache *s)
343{
344}
345
346static inline void slab_init_memcg_params(struct kmem_cache *s)
347{
348}
349
350static inline void memcg_link_cache(struct kmem_cache *s)
351{
352}
353
354#endif
355
356static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
357{
358 struct kmem_cache *cachep;
359 struct page *page;
360
361
362
363
364
365
366
367
368 if (!memcg_kmem_enabled() &&
369 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
370 return s;
371
372 page = virt_to_head_page(x);
373 cachep = page->slab_cache;
374 if (slab_equal_or_root(cachep, s))
375 return cachep;
376
377 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
378 __func__, s->name, cachep->name);
379 WARN_ON_ONCE(1);
380 return s;
381}
382
383static inline size_t slab_ksize(const struct kmem_cache *s)
384{
385#ifndef CONFIG_SLUB
386 return s->object_size;
387
388#else
389# ifdef CONFIG_SLUB_DEBUG
390
391
392
393
394 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
395 return s->object_size;
396# endif
397 if (s->flags & SLAB_KASAN)
398 return s->object_size;
399
400
401
402
403
404 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
405 return s->inuse;
406
407
408
409 return s->size;
410#endif
411}
412
413static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
414 gfp_t flags)
415{
416 flags &= gfp_allowed_mask;
417
418 fs_reclaim_acquire(flags);
419 fs_reclaim_release(flags);
420
421 might_sleep_if(gfpflags_allow_blocking(flags));
422
423 if (should_failslab(s, flags))
424 return NULL;
425
426 if (memcg_kmem_enabled() &&
427 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
428 return memcg_kmem_get_cache(s);
429
430 return s;
431}
432
433static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
434 size_t size, void **p)
435{
436 size_t i;
437
438 flags &= gfp_allowed_mask;
439 for (i = 0; i < size; i++) {
440 void *object = p[i];
441
442 kmemleak_alloc_recursive(object, s->object_size, 1,
443 s->flags, flags);
444 kasan_slab_alloc(s, object, flags);
445 }
446
447 if (memcg_kmem_enabled())
448 memcg_kmem_put_cache(s);
449}
450
451#ifndef CONFIG_SLOB
452
453
454
455struct kmem_cache_node {
456 spinlock_t list_lock;
457
458#ifdef CONFIG_SLAB
459 struct list_head slabs_partial;
460 struct list_head slabs_full;
461 struct list_head slabs_free;
462 unsigned long total_slabs;
463 unsigned long free_slabs;
464 unsigned long free_objects;
465 unsigned int free_limit;
466 unsigned int colour_next;
467 struct array_cache *shared;
468 struct alien_cache **alien;
469 unsigned long next_reap;
470 int free_touched;
471#endif
472
473#ifdef CONFIG_SLUB
474 unsigned long nr_partial;
475 struct list_head partial;
476#ifdef CONFIG_SLUB_DEBUG
477 atomic_long_t nr_slabs;
478 atomic_long_t total_objects;
479 struct list_head full;
480#endif
481#endif
482
483};
484
485static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
486{
487 return s->node[node];
488}
489
490
491
492
493
494#define for_each_kmem_cache_node(__s, __node, __n) \
495 for (__node = 0; __node < nr_node_ids; __node++) \
496 if ((__n = get_node(__s, __node)))
497
498#endif
499
500void *slab_start(struct seq_file *m, loff_t *pos);
501void *slab_next(struct seq_file *m, void *p, loff_t *pos);
502void slab_stop(struct seq_file *m, void *p);
503void *memcg_slab_start(struct seq_file *m, loff_t *pos);
504void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
505void memcg_slab_stop(struct seq_file *m, void *p);
506int memcg_slab_show(struct seq_file *m, void *p);
507
508#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
509void dump_unreclaimable_slab(void);
510#else
511static inline void dump_unreclaimable_slab(void)
512{
513}
514#endif
515
516void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
517
518#ifdef CONFIG_SLAB_FREELIST_RANDOM
519int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
520 gfp_t gfp);
521void cache_random_seq_destroy(struct kmem_cache *cachep);
522#else
523static inline int cache_random_seq_create(struct kmem_cache *cachep,
524 unsigned int count, gfp_t gfp)
525{
526 return 0;
527}
528static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
529#endif
530
531#endif
532