1#ifndef MM_SLAB_H
2#define MM_SLAB_H
3
4
5
6
7
8
9
10
11
12
13
14
15enum slab_state {
16 DOWN,
17 PARTIAL,
18 PARTIAL_ARRAYCACHE,
19 PARTIAL_NODE,
20 UP,
21 FULL
22};
23
24extern enum slab_state slab_state;
25
26
27extern struct mutex slab_mutex;
28
29
30extern struct list_head slab_caches;
31
32
33extern struct kmem_cache *kmem_cache;
34
35unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size);
37
38#ifndef CONFIG_SLOB
39
40void create_kmalloc_caches(unsigned long);
41
42
43struct kmem_cache *kmalloc_slab(size_t, gfp_t);
44#endif
45
46
47
48extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
49
50extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
51 unsigned long flags);
52extern void create_boot_cache(struct kmem_cache *, const char *name,
53 size_t size, unsigned long flags);
54
55struct mem_cgroup;
56#ifdef CONFIG_SLUB
57struct kmem_cache *
58__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
59 size_t align, unsigned long flags, void (*ctor)(void *));
60#else
61static inline struct kmem_cache *
62__kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
63 size_t align, unsigned long flags, void (*ctor)(void *))
64{ return NULL; }
65#endif
66
67
68
69#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
71
72#if defined(CONFIG_DEBUG_SLAB)
73#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74#elif defined(CONFIG_SLUB_DEBUG)
75#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76 SLAB_TRACE | SLAB_DEBUG_FREE)
77#else
78#define SLAB_DEBUG_FLAGS (0)
79#endif
80
81#if defined(CONFIG_SLAB)
82#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
84#elif defined(CONFIG_SLUB)
85#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
86 SLAB_TEMPORARY | SLAB_NOTRACK)
87#else
88#define SLAB_CACHE_FLAGS (0)
89#endif
90
91#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
92
93int __kmem_cache_shutdown(struct kmem_cache *);
94
95struct seq_file;
96struct file;
97
98struct slabinfo {
99 unsigned long active_objs;
100 unsigned long num_objs;
101 unsigned long active_slabs;
102 unsigned long num_slabs;
103 unsigned long shared_avail;
104 unsigned int limit;
105 unsigned int batchcount;
106 unsigned int shared;
107 unsigned int objects_per_slab;
108 unsigned int cache_order;
109};
110
111void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
112void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
113ssize_t slabinfo_write(struct file *file, const char __user *buffer,
114 size_t count, loff_t *ppos);
115
116#ifdef CONFIG_MEMCG_KMEM
117static inline bool is_root_cache(struct kmem_cache *s)
118{
119 return !s->memcg_params || s->memcg_params->is_root_cache;
120}
121
122static inline bool cache_match_memcg(struct kmem_cache *cachep,
123 struct mem_cgroup *memcg)
124{
125 return (is_root_cache(cachep) && !memcg) ||
126 (cachep->memcg_params->memcg == memcg);
127}
128
129static inline void memcg_bind_pages(struct kmem_cache *s, int order)
130{
131 if (!is_root_cache(s))
132 atomic_add(1 << order, &s->memcg_params->nr_pages);
133}
134
135static inline void memcg_release_pages(struct kmem_cache *s, int order)
136{
137 if (is_root_cache(s))
138 return;
139
140 if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
141 mem_cgroup_destroy_cache(s);
142}
143
144static inline bool slab_equal_or_root(struct kmem_cache *s,
145 struct kmem_cache *p)
146{
147 return (p == s) ||
148 (s->memcg_params && (p == s->memcg_params->root_cache));
149}
150
151
152
153
154
155
156static inline const char *cache_name(struct kmem_cache *s)
157{
158 if (!is_root_cache(s))
159 return s->memcg_params->root_cache->name;
160 return s->name;
161}
162
163static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
164{
165 if (!s->memcg_params)
166 return NULL;
167 return s->memcg_params->memcg_caches[idx];
168}
169
170static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
171{
172 if (is_root_cache(s))
173 return s;
174 return s->memcg_params->root_cache;
175}
176#else
177static inline bool is_root_cache(struct kmem_cache *s)
178{
179 return true;
180}
181
182static inline bool cache_match_memcg(struct kmem_cache *cachep,
183 struct mem_cgroup *memcg)
184{
185 return true;
186}
187
188static inline void memcg_bind_pages(struct kmem_cache *s, int order)
189{
190}
191
192static inline void memcg_release_pages(struct kmem_cache *s, int order)
193{
194}
195
196static inline bool slab_equal_or_root(struct kmem_cache *s,
197 struct kmem_cache *p)
198{
199 return true;
200}
201
202static inline const char *cache_name(struct kmem_cache *s)
203{
204 return s->name;
205}
206
207static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
208{
209 return NULL;
210}
211
212static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
213{
214 return s;
215}
216#endif
217
218static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
219{
220 struct kmem_cache *cachep;
221 struct page *page;
222
223
224
225
226
227
228
229
230 if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
231 return s;
232
233 page = virt_to_head_page(x);
234 cachep = page->slab_cache;
235 if (slab_equal_or_root(cachep, s))
236 return cachep;
237
238 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
239 __FUNCTION__, cachep->name, s->name);
240 WARN_ON_ONCE(1);
241 return s;
242}
243#endif
244
245
246
247
248
249struct kmem_cache_node {
250 spinlock_t list_lock;
251
252#ifdef CONFIG_SLAB
253 struct list_head slabs_partial;
254 struct list_head slabs_full;
255 struct list_head slabs_free;
256 unsigned long free_objects;
257 unsigned int free_limit;
258 unsigned int colour_next;
259 struct array_cache *shared;
260 struct array_cache **alien;
261 unsigned long next_reap;
262 int free_touched;
263#endif
264
265#ifdef CONFIG_SLUB
266 unsigned long nr_partial;
267 struct list_head partial;
268#ifdef CONFIG_SLUB_DEBUG
269 atomic_long_t nr_slabs;
270 atomic_long_t total_objects;
271 struct list_head full;
272#endif
273#endif
274
275};
276
277void *slab_next(struct seq_file *m, void *p, loff_t *pos);
278void slab_stop(struct seq_file *m, void *p);
279