1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/compiler.h>
15
16
17
18
19
20
21
22struct kmem_cache {
23
24 unsigned int batchcount;
25 unsigned int limit;
26 unsigned int shared;
27
28 unsigned int size;
29 u32 reciprocal_buffer_size;
30
31
32 unsigned int flags;
33 unsigned int num;
34
35
36
37 unsigned int gfporder;
38
39
40 gfp_t allocflags;
41
42 size_t colour;
43 unsigned int colour_off;
44 struct kmem_cache *slabp_cache;
45 unsigned int slab_size;
46
47
48 void (*ctor)(void *obj);
49
50
51 const char *name;
52 struct list_head list;
53 int refcount;
54 int object_size;
55 int align;
56
57
58#ifdef CONFIG_DEBUG_SLAB
59 unsigned long num_active;
60 unsigned long num_allocations;
61 unsigned long high_mark;
62 unsigned long grown;
63 unsigned long reaped;
64 unsigned long errors;
65 unsigned long max_freeable;
66 unsigned long node_allocs;
67 unsigned long node_frees;
68 unsigned long node_overflow;
69 atomic_t allochit;
70 atomic_t allocmiss;
71 atomic_t freehit;
72 atomic_t freemiss;
73
74
75
76
77
78
79
80 int obj_offset;
81#endif
82#ifdef CONFIG_MEMCG_KMEM
83 struct memcg_cache_params *memcg_params;
84#endif
85
86
87
88
89
90
91
92
93
94
95
96
97
98 struct kmem_cache_node **node;
99 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
100
101
102
103};
104
105void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
106void *__kmalloc(size_t size, gfp_t flags);
107
108#ifdef CONFIG_TRACING
109extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
110#else
111static __always_inline void *
112kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
113{
114 return kmem_cache_alloc(cachep, flags);
115}
116#endif
117
118static __always_inline void *kmalloc(size_t size, gfp_t flags)
119{
120 struct kmem_cache *cachep;
121 void *ret;
122
123 if (__builtin_constant_p(size)) {
124 int i;
125
126 if (!size)
127 return ZERO_SIZE_PTR;
128
129 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
130 return NULL;
131
132 i = kmalloc_index(size);
133
134#ifdef CONFIG_ZONE_DMA
135 if (flags & GFP_DMA)
136 cachep = kmalloc_dma_caches[i];
137 else
138#endif
139 cachep = kmalloc_caches[i];
140
141 ret = kmem_cache_alloc_trace(cachep, flags, size);
142
143 return ret;
144 }
145 return __kmalloc(size, flags);
146}
147
148#ifdef CONFIG_NUMA
149extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
150extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
151
152#ifdef CONFIG_TRACING
153extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
154 gfp_t flags,
155 int nodeid,
156 size_t size);
157#else
158static __always_inline void *
159kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
160 gfp_t flags,
161 int nodeid,
162 size_t size)
163{
164 return kmem_cache_alloc_node(cachep, flags, nodeid);
165}
166#endif
167
168static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
169{
170 struct kmem_cache *cachep;
171
172 if (__builtin_constant_p(size)) {
173 int i;
174
175 if (!size)
176 return ZERO_SIZE_PTR;
177
178 if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
179 return NULL;
180
181 i = kmalloc_index(size);
182
183#ifdef CONFIG_ZONE_DMA
184 if (flags & GFP_DMA)
185 cachep = kmalloc_dma_caches[i];
186 else
187#endif
188 cachep = kmalloc_caches[i];
189
190 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
191 }
192 return __kmalloc_node(size, flags, node);
193}
194
195#endif
196
197#endif
198