1#ifndef _LINUX_SLUB_DEF_H
2#define _LINUX_SLUB_DEF_H
3
4
5
6
7
8
9#include <linux/types.h>
10#include <linux/gfp.h>
11#include <linux/bug.h>
12#include <linux/workqueue.h>
13#include <linux/kobject.h>
14
15#include <linux/kmemleak.h>
16
17enum stat_item {
18 ALLOC_FASTPATH,
19 ALLOC_SLOWPATH,
20 FREE_FASTPATH,
21 FREE_SLOWPATH,
22 FREE_FROZEN,
23 FREE_ADD_PARTIAL,
24 FREE_REMOVE_PARTIAL,
25 ALLOC_FROM_PARTIAL,
26 ALLOC_SLAB,
27 ALLOC_REFILL,
28 ALLOC_NODE_MISMATCH,
29 FREE_SLAB,
30 CPUSLAB_FLUSH,
31 DEACTIVATE_FULL,
32 DEACTIVATE_EMPTY,
33 DEACTIVATE_TO_HEAD,
34 DEACTIVATE_TO_TAIL,
35 DEACTIVATE_REMOTE_FREES,
36 DEACTIVATE_BYPASS,
37 ORDER_FALLBACK,
38 CMPXCHG_DOUBLE_CPU_FAIL,
39 CMPXCHG_DOUBLE_FAIL,
40 CPU_PARTIAL_ALLOC,
41 CPU_PARTIAL_FREE,
42 CPU_PARTIAL_NODE,
43 CPU_PARTIAL_DRAIN,
44 NR_SLUB_STAT_ITEMS };
45
46struct kmem_cache_cpu {
47 void **freelist;
48 unsigned long tid;
49 struct page *page;
50 struct page *partial;
51#ifdef CONFIG_SLUB_STATS
52 unsigned stat[NR_SLUB_STAT_ITEMS];
53#endif
54};
55
56
57
58
59
60
61struct kmem_cache_order_objects {
62 unsigned long x;
63};
64
65
66
67
68struct kmem_cache {
69 struct kmem_cache_cpu __percpu *cpu_slab;
70
71 unsigned long flags;
72 unsigned long min_partial;
73 int size;
74 int object_size;
75 int offset;
76 int cpu_partial;
77 struct kmem_cache_order_objects oo;
78
79
80 struct kmem_cache_order_objects max;
81 struct kmem_cache_order_objects min;
82 gfp_t allocflags;
83 int refcount;
84 void (*ctor)(void *);
85 int inuse;
86 int align;
87 int reserved;
88 const char *name;
89 struct list_head list;
90#ifdef CONFIG_SYSFS
91 struct kobject kobj;
92#endif
93#ifdef CONFIG_MEMCG_KMEM
94 struct memcg_cache_params *memcg_params;
95 int max_attr_size;
96#endif
97
98#ifdef CONFIG_NUMA
99
100
101
102 int remote_node_defrag_ratio;
103#endif
104 struct kmem_cache_node *node[MAX_NUMNODES];
105};
106
107void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
108void *__kmalloc(size_t size, gfp_t flags);
109
110static __always_inline void *
111kmalloc_order(size_t size, gfp_t flags, unsigned int order)
112{
113 void *ret;
114
115 flags |= (__GFP_COMP | __GFP_KMEMCG);
116 ret = (void *) __get_free_pages(flags, order);
117 kmemleak_alloc(ret, size, 1, flags);
118 return ret;
119}
120
121
122
123
124
125#ifdef CONFIG_SLUB_DEBUG
126extern bool verify_mem_not_deleted(const void *x);
127#else
128static inline bool verify_mem_not_deleted(const void *x)
129{
130 return true;
131}
132#endif
133
134#ifdef CONFIG_TRACING
135extern void *
136kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
137extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
138#else
139static __always_inline void *
140kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
141{
142 return kmem_cache_alloc(s, gfpflags);
143}
144
145static __always_inline void *
146kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
147{
148 return kmalloc_order(size, flags, order);
149}
150#endif
151
152static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
153{
154 unsigned int order = get_order(size);
155 return kmalloc_order_trace(size, flags, order);
156}
157
158static __always_inline void *kmalloc(size_t size, gfp_t flags)
159{
160 if (__builtin_constant_p(size)) {
161 if (size > KMALLOC_MAX_CACHE_SIZE)
162 return kmalloc_large(size, flags);
163
164 if (!(flags & GFP_DMA)) {
165 int index = kmalloc_index(size);
166
167 if (!index)
168 return ZERO_SIZE_PTR;
169
170 return kmem_cache_alloc_trace(kmalloc_caches[index],
171 flags, size);
172 }
173 }
174 return __kmalloc(size, flags);
175}
176
177#ifdef CONFIG_NUMA
178void *__kmalloc_node(size_t size, gfp_t flags, int node);
179void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
180
181#ifdef CONFIG_TRACING
182extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
183 gfp_t gfpflags,
184 int node, size_t size);
185#else
186static __always_inline void *
187kmem_cache_alloc_node_trace(struct kmem_cache *s,
188 gfp_t gfpflags,
189 int node, size_t size)
190{
191 return kmem_cache_alloc_node(s, gfpflags, node);
192}
193#endif
194
195static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
196{
197 if (__builtin_constant_p(size) &&
198 size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
199 int index = kmalloc_index(size);
200
201 if (!index)
202 return ZERO_SIZE_PTR;
203
204 return kmem_cache_alloc_node_trace(kmalloc_caches[index],
205 flags, node, size);
206 }
207 return __kmalloc_node(size, flags, node);
208}
209#endif
210
211#endif
212