1
2#ifndef _LINUX_SLUB_DEF_H
3#define _LINUX_SLUB_DEF_H
4
5
6
7
8
9
10#include <linux/kfence.h>
11#include <linux/kobject.h>
12#include <linux/reciprocal_div.h>
13
14enum stat_item {
15 ALLOC_FASTPATH,
16 ALLOC_SLOWPATH,
17 FREE_FASTPATH,
18 FREE_SLOWPATH,
19 FREE_FROZEN,
20 FREE_ADD_PARTIAL,
21 FREE_REMOVE_PARTIAL,
22 ALLOC_FROM_PARTIAL,
23 ALLOC_SLAB,
24 ALLOC_REFILL,
25 ALLOC_NODE_MISMATCH,
26 FREE_SLAB,
27 CPUSLAB_FLUSH,
28 DEACTIVATE_FULL,
29 DEACTIVATE_EMPTY,
30 DEACTIVATE_TO_HEAD,
31 DEACTIVATE_TO_TAIL,
32 DEACTIVATE_REMOTE_FREES,
33 DEACTIVATE_BYPASS,
34 ORDER_FALLBACK,
35 CMPXCHG_DOUBLE_CPU_FAIL,
36 CMPXCHG_DOUBLE_FAIL,
37 CPU_PARTIAL_ALLOC,
38 CPU_PARTIAL_FREE,
39 CPU_PARTIAL_NODE,
40 CPU_PARTIAL_DRAIN,
41 NR_SLUB_STAT_ITEMS };
42
43struct kmem_cache_cpu {
44 void **freelist;
45 unsigned long tid;
46 struct page *page;
47#ifdef CONFIG_SLUB_CPU_PARTIAL
48 struct page *partial;
49#endif
50#ifdef CONFIG_SLUB_STATS
51 unsigned stat[NR_SLUB_STAT_ITEMS];
52#endif
53};
54
55#ifdef CONFIG_SLUB_CPU_PARTIAL
56#define slub_percpu_partial(c) ((c)->partial)
57
58#define slub_set_percpu_partial(c, p) \
59({ \
60 slub_percpu_partial(c) = (p)->next; \
61})
62
63#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
64#else
65#define slub_percpu_partial(c) NULL
66
67#define slub_set_percpu_partial(c, p)
68
69#define slub_percpu_partial_read_once(c) NULL
70#endif
71
72
73
74
75
76
77struct kmem_cache_order_objects {
78 unsigned int x;
79};
80
81
82
83
84struct kmem_cache {
85 struct kmem_cache_cpu __percpu *cpu_slab;
86
87 slab_flags_t flags;
88 unsigned long min_partial;
89 unsigned int size;
90 unsigned int object_size;
91 struct reciprocal_value reciprocal_size;
92 unsigned int offset;
93#ifdef CONFIG_SLUB_CPU_PARTIAL
94
95 unsigned int cpu_partial;
96#endif
97 struct kmem_cache_order_objects oo;
98
99
100 struct kmem_cache_order_objects max;
101 struct kmem_cache_order_objects min;
102 gfp_t allocflags;
103 int refcount;
104 void (*ctor)(void *);
105 unsigned int inuse;
106 unsigned int align;
107 unsigned int red_left_pad;
108 const char *name;
109 struct list_head list;
110#ifdef CONFIG_SYSFS
111 struct kobject kobj;
112#endif
113#ifdef CONFIG_SLAB_FREELIST_HARDENED
114 unsigned long random;
115#endif
116
117#ifdef CONFIG_NUMA
118
119
120
121 unsigned int remote_node_defrag_ratio;
122#endif
123
124#ifdef CONFIG_SLAB_FREELIST_RANDOM
125 unsigned int *random_seq;
126#endif
127
128#ifdef CONFIG_KASAN
129 struct kasan_cache kasan_info;
130#endif
131
132 unsigned int useroffset;
133 unsigned int usersize;
134
135 struct kmem_cache_node *node[MAX_NUMNODES];
136};
137
138#ifdef CONFIG_SLUB_CPU_PARTIAL
139#define slub_cpu_partial(s) ((s)->cpu_partial)
140#define slub_set_cpu_partial(s, n) \
141({ \
142 slub_cpu_partial(s) = (n); \
143})
144#else
145#define slub_cpu_partial(s) (0)
146#define slub_set_cpu_partial(s, n)
147#endif
148
149#ifdef CONFIG_SYSFS
150#define SLAB_SUPPORTS_SYSFS
151void sysfs_slab_unlink(struct kmem_cache *);
152void sysfs_slab_release(struct kmem_cache *);
153#else
154static inline void sysfs_slab_unlink(struct kmem_cache *s)
155{
156}
157static inline void sysfs_slab_release(struct kmem_cache *s)
158{
159}
160#endif
161
162void object_err(struct kmem_cache *s, struct page *page,
163 u8 *object, char *reason);
164
165void *fixup_red_left(struct kmem_cache *s, void *p);
166
167static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
168 void *x) {
169 void *object = x - (x - page_address(page)) % cache->size;
170 void *last_object = page_address(page) +
171 (page->objects - 1) * cache->size;
172 void *result = (unlikely(object > last_object)) ? last_object : object;
173
174 result = fixup_red_left(cache, result);
175 return result;
176}
177
178
179static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
180 void *addr, void *obj)
181{
182 return reciprocal_divide(kasan_reset_tag(obj) - addr,
183 cache->reciprocal_size);
184}
185
186static inline unsigned int obj_to_index(const struct kmem_cache *cache,
187 const struct page *page, void *obj)
188{
189 if (is_kfence_address(obj))
190 return 0;
191 return __obj_to_index(cache, page_address(page), obj);
192}
193
194static inline int objs_per_slab_page(const struct kmem_cache *cache,
195 const struct page *page)
196{
197 return page->objects;
198}
199#endif
200