1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/gfp.h>
23#include <linux/interrupt.h>
24#include <linux/jhash.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/percpu.h>
28#include <linux/printk.h>
29#include <linux/slab.h>
30#include <linux/stacktrace.h>
31#include <linux/stackdepot.h>
32#include <linux/string.h>
33#include <linux/types.h>
34
35#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
36
37#define STACK_ALLOC_NULL_PROTECTION_BITS 1
38#define STACK_ALLOC_ORDER 2
39#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
40#define STACK_ALLOC_ALIGN 4
41#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
42 STACK_ALLOC_ALIGN)
43#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
44 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
45#define STACK_ALLOC_SLABS_CAP 8192
46#define STACK_ALLOC_MAX_SLABS \
47 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
48 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
49
50
51union handle_parts {
52 depot_stack_handle_t handle;
53 struct {
54 u32 slabindex : STACK_ALLOC_INDEX_BITS;
55 u32 offset : STACK_ALLOC_OFFSET_BITS;
56 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
57 };
58};
59
60struct stack_record {
61 struct stack_record *next;
62 u32 hash;
63 u32 size;
64 union handle_parts handle;
65 unsigned long entries[1];
66};
67
68static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
69
70static int depot_index;
71static int next_slab_inited;
72static size_t depot_offset;
73static DEFINE_SPINLOCK(depot_lock);
74
75static bool init_stack_slab(void **prealloc)
76{
77 if (!*prealloc)
78 return false;
79
80
81
82
83 if (smp_load_acquire(&next_slab_inited))
84 return true;
85 if (stack_slabs[depot_index] == NULL) {
86 stack_slabs[depot_index] = *prealloc;
87 *prealloc = NULL;
88 } else {
89
90 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
91 stack_slabs[depot_index + 1] = *prealloc;
92 *prealloc = NULL;
93 }
94
95
96
97
98 smp_store_release(&next_slab_inited, 1);
99 }
100 return true;
101}
102
103
104static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
105 u32 hash, void **prealloc, gfp_t alloc_flags)
106{
107 int required_size = offsetof(struct stack_record, entries) +
108 sizeof(unsigned long) * size;
109 struct stack_record *stack;
110
111 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
112
113 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
114 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
115 WARN_ONCE(1, "Stack depot reached limit capacity");
116 return NULL;
117 }
118 depot_index++;
119 depot_offset = 0;
120
121
122
123
124
125 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
126 smp_store_release(&next_slab_inited, 0);
127 }
128 init_stack_slab(prealloc);
129 if (stack_slabs[depot_index] == NULL)
130 return NULL;
131
132 stack = stack_slabs[depot_index] + depot_offset;
133
134 stack->hash = hash;
135 stack->size = size;
136 stack->handle.slabindex = depot_index;
137 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
138 stack->handle.valid = 1;
139 memcpy(stack->entries, entries, size * sizeof(unsigned long));
140 depot_offset += required_size;
141
142 return stack;
143}
144
145#define STACK_HASH_ORDER 20
146#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER)
147#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
148#define STACK_HASH_SEED 0x9747b28c
149
150static struct stack_record *stack_table[STACK_HASH_SIZE] = {
151 [0 ... STACK_HASH_SIZE - 1] = NULL
152};
153
154
155static inline u32 hash_stack(unsigned long *entries, unsigned int size)
156{
157 return jhash2((u32 *)entries,
158 size * sizeof(unsigned long) / sizeof(u32),
159 STACK_HASH_SEED);
160}
161
162
163
164
165
166static inline
167int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
168 unsigned int n)
169{
170 for ( ; n-- ; u1++, u2++) {
171 if (*u1 != *u2)
172 return 1;
173 }
174 return 0;
175}
176
177
178static inline struct stack_record *find_stack(struct stack_record *bucket,
179 unsigned long *entries, int size,
180 u32 hash)
181{
182 struct stack_record *found;
183
184 for (found = bucket; found; found = found->next) {
185 if (found->hash == hash &&
186 found->size == size &&
187 !stackdepot_memcmp(entries, found->entries, size))
188 return found;
189 }
190 return NULL;
191}
192
193
194
195
196
197
198
199
200
201
202unsigned int stack_depot_fetch(depot_stack_handle_t handle,
203 unsigned long **entries)
204{
205 union handle_parts parts = { .handle = handle };
206 void *slab;
207 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
208 struct stack_record *stack;
209
210 *entries = NULL;
211 if (parts.slabindex > depot_index) {
212 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
213 parts.slabindex, depot_index, handle);
214 return 0;
215 }
216 slab = stack_slabs[parts.slabindex];
217 if (!slab)
218 return 0;
219 stack = slab + offset;
220
221 *entries = stack->entries;
222 return stack->size;
223}
224EXPORT_SYMBOL_GPL(stack_depot_fetch);
225
226
227
228
229
230
231
232
233
234
235depot_stack_handle_t stack_depot_save(unsigned long *entries,
236 unsigned int nr_entries,
237 gfp_t alloc_flags)
238{
239 struct stack_record *found = NULL, **bucket;
240 depot_stack_handle_t retval = 0;
241 struct page *page = NULL;
242 void *prealloc = NULL;
243 unsigned long flags;
244 u32 hash;
245
246 if (unlikely(nr_entries == 0))
247 goto fast_exit;
248
249 hash = hash_stack(entries, nr_entries);
250 bucket = &stack_table[hash & STACK_HASH_MASK];
251
252
253
254
255
256
257 found = find_stack(smp_load_acquire(bucket), entries,
258 nr_entries, hash);
259 if (found)
260 goto exit;
261
262
263
264
265
266
267
268
269
270 if (unlikely(!smp_load_acquire(&next_slab_inited))) {
271
272
273
274
275
276 alloc_flags &= ~GFP_ZONEMASK;
277 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
278 alloc_flags |= __GFP_NOWARN;
279 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
280 if (page)
281 prealloc = page_address(page);
282 }
283
284 spin_lock_irqsave(&depot_lock, flags);
285
286 found = find_stack(*bucket, entries, nr_entries, hash);
287 if (!found) {
288 struct stack_record *new =
289 depot_alloc_stack(entries, nr_entries,
290 hash, &prealloc, alloc_flags);
291 if (new) {
292 new->next = *bucket;
293
294
295
296
297 smp_store_release(bucket, new);
298 found = new;
299 }
300 } else if (prealloc) {
301
302
303
304
305 WARN_ON(!init_stack_slab(&prealloc));
306 }
307
308 spin_unlock_irqrestore(&depot_lock, flags);
309exit:
310 if (prealloc) {
311
312 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
313 }
314 if (found)
315 retval = found->handle.handle;
316fast_exit:
317 return retval;
318}
319EXPORT_SYMBOL_GPL(stack_depot_save);
320
321static inline int in_irqentry_text(unsigned long ptr)
322{
323 return (ptr >= (unsigned long)&__irqentry_text_start &&
324 ptr < (unsigned long)&__irqentry_text_end) ||
325 (ptr >= (unsigned long)&__softirqentry_text_start &&
326 ptr < (unsigned long)&__softirqentry_text_end);
327}
328
329unsigned int filter_irq_stacks(unsigned long *entries,
330 unsigned int nr_entries)
331{
332 unsigned int i;
333
334 for (i = 0; i < nr_entries; i++) {
335 if (in_irqentry_text(entries[i])) {
336
337 return i + 1;
338 }
339 }
340 return nr_entries;
341}
342EXPORT_SYMBOL_GPL(filter_irq_stacks);
343