1#ifndef MM_SLAB_H
2#define MM_SLAB_H
3
4
5
6
7
8
9
10
11
12
13
14
15enum slab_state {
16 DOWN,
17 PARTIAL,
18 PARTIAL_ARRAYCACHE,
19 PARTIAL_NODE,
20 UP,
21 FULL
22};
23
24extern enum slab_state slab_state;
25
26
27extern struct mutex slab_mutex;
28
29
30extern struct list_head slab_caches;
31
32
33extern struct kmem_cache *kmem_cache;
34
35unsigned long calculate_alignment(unsigned long flags,
36 unsigned long align, unsigned long size);
37
38#ifndef CONFIG_SLOB
39
40void create_kmalloc_caches(unsigned long);
41
42
43struct kmem_cache *kmalloc_slab(size_t, gfp_t);
44#endif
45
46
47
48extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
49
50extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
51 unsigned long flags);
52extern void create_boot_cache(struct kmem_cache *, const char *name,
53 size_t size, unsigned long flags);
54
55struct mem_cgroup;
56#ifdef CONFIG_SLUB
57struct kmem_cache *
58__kmem_cache_alias(const char *name, size_t size, size_t align,
59 unsigned long flags, void (*ctor)(void *));
60#else
61static inline struct kmem_cache *
62__kmem_cache_alias(const char *name, size_t size, size_t align,
63 unsigned long flags, void (*ctor)(void *))
64{ return NULL; }
65#endif
66
67
68
69#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
70 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
71
72#if defined(CONFIG_DEBUG_SLAB)
73#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
74#elif defined(CONFIG_SLUB_DEBUG)
75#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
76 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
77#else
78#define SLAB_DEBUG_FLAGS (0)
79#endif
80
81#if defined(CONFIG_SLAB)
82#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
83 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
84 SLAB_NOTRACK | SLAB_ACCOUNT)
85#elif defined(CONFIG_SLUB)
86#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
87 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
88#else
89#define SLAB_CACHE_FLAGS (0)
90#endif
91
92#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
93
94int __kmem_cache_shutdown(struct kmem_cache *);
95
96struct seq_file;
97struct file;
98
99struct slabinfo {
100 unsigned long active_objs;
101 unsigned long num_objs;
102 unsigned long active_slabs;
103 unsigned long num_slabs;
104 unsigned long shared_avail;
105 unsigned int limit;
106 unsigned int batchcount;
107 unsigned int shared;
108 unsigned int objects_per_slab;
109 unsigned int cache_order;
110};
111
112void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
113void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
114ssize_t slabinfo_write(struct file *file, const char __user *buffer,
115 size_t count, loff_t *ppos);
116
117
118
119
120
121
122
123void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
124int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
125
126#ifdef CONFIG_MEMCG_KMEM
127static inline bool is_root_cache(struct kmem_cache *s)
128{
129 return !s->memcg_params || s->memcg_params->is_root_cache;
130}
131
132static inline void memcg_bind_pages(struct kmem_cache *s, int order)
133{
134 if (!is_root_cache(s))
135 atomic_add(1 << order, &s->memcg_params->nr_pages);
136}
137
138static inline void memcg_release_pages(struct kmem_cache *s, int order)
139{
140 if (!is_root_cache(s))
141 atomic_sub(1 << order, &s->memcg_params->nr_pages);
142}
143
144static inline bool slab_equal_or_root(struct kmem_cache *s,
145 struct kmem_cache *p)
146{
147 return (p == s) ||
148 (s->memcg_params && (p == s->memcg_params->root_cache));
149}
150
151
152
153
154
155
156static inline const char *cache_name(struct kmem_cache *s)
157{
158 if (!is_root_cache(s))
159 return s->memcg_params->root_cache->name;
160 return s->name;
161}
162
163
164
165
166
167
168
169
170
171
172static inline struct kmem_cache *
173cache_from_memcg_idx(struct kmem_cache *s, int idx)
174{
175 struct kmem_cache *cachep;
176 struct memcg_cache_params *params;
177
178 if (!s->memcg_params)
179 return NULL;
180
181 rcu_read_lock();
182 params = rcu_dereference(s->memcg_params);
183 cachep = params->memcg_caches[idx];
184 rcu_read_unlock();
185
186
187
188
189
190
191 smp_read_barrier_depends();
192 return cachep;
193}
194
195static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
196{
197 if (is_root_cache(s))
198 return s;
199 return s->memcg_params->root_cache;
200}
201
202static __always_inline int memcg_charge_slab(struct kmem_cache *s,
203 gfp_t gfp, int order)
204{
205 if (!memcg_kmem_enabled())
206 return 0;
207 if (is_root_cache(s))
208 return 0;
209 return memcg_charge_kmem(s->memcg_params->memcg, gfp, 1 << order);
210}
211
212static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
213{
214 if (!memcg_kmem_enabled())
215 return;
216 if (is_root_cache(s))
217 return;
218 memcg_uncharge_kmem(s->memcg_params->memcg, 1 << order);
219}
220#else
221static inline bool is_root_cache(struct kmem_cache *s)
222{
223 return true;
224}
225
226static inline void memcg_bind_pages(struct kmem_cache *s, int order)
227{
228}
229
230static inline void memcg_release_pages(struct kmem_cache *s, int order)
231{
232}
233
234static inline bool slab_equal_or_root(struct kmem_cache *s,
235 struct kmem_cache *p)
236{
237 return true;
238}
239
240static inline const char *cache_name(struct kmem_cache *s)
241{
242 return s->name;
243}
244
245static inline struct kmem_cache *
246cache_from_memcg_idx(struct kmem_cache *s, int idx)
247{
248 return NULL;
249}
250
251static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
252{
253 return s;
254}
255
256static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order)
257{
258 return 0;
259}
260
261static inline void memcg_uncharge_slab(struct kmem_cache *s, int order)
262{
263}
264#endif
265
266static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
267{
268 struct kmem_cache *cachep;
269 struct page *page;
270
271
272
273
274
275
276
277
278 if (!memcg_kmem_enabled() &&
279 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
280 return s;
281
282 page = virt_to_head_page(x);
283 cachep = page->slab_cache;
284 if (slab_equal_or_root(cachep, s))
285 return cachep;
286
287 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
288 __FUNCTION__, s->name, cachep->name);
289 WARN_ON_ONCE(1);
290 return s;
291}
292#endif
293
294
295
296
297
298struct kmem_cache_node {
299 spinlock_t list_lock;
300
301#ifdef CONFIG_SLAB
302 struct list_head slabs_partial;
303 struct list_head slabs_full;
304 struct list_head slabs_free;
305 unsigned long free_objects;
306 unsigned int free_limit;
307 unsigned int colour_next;
308 struct array_cache *shared;
309 struct array_cache **alien;
310 unsigned long next_reap;
311 int free_touched;
312#endif
313
314#ifdef CONFIG_SLUB
315 unsigned long nr_partial;
316 struct list_head partial;
317#ifdef CONFIG_SLUB_DEBUG
318 atomic_long_t nr_slabs;
319 atomic_long_t total_objects;
320 struct list_head full;
321#endif
322#endif
323
324};
325