1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/gfp.h>
23#include <linux/interrupt.h>
24#include <linux/jhash.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/percpu.h>
28#include <linux/printk.h>
29#include <linux/slab.h>
30#include <linux/stacktrace.h>
31#include <linux/stackdepot.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/memblock.h>
35
36#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
37
38#define STACK_ALLOC_NULL_PROTECTION_BITS 1
39#define STACK_ALLOC_ORDER 2
40#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
41#define STACK_ALLOC_ALIGN 4
42#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
43 STACK_ALLOC_ALIGN)
44#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
45 STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
46#define STACK_ALLOC_SLABS_CAP 8192
47#define STACK_ALLOC_MAX_SLABS \
48 (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
49 (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP)
50
51
52union handle_parts {
53 depot_stack_handle_t handle;
54 struct {
55 u32 slabindex : STACK_ALLOC_INDEX_BITS;
56 u32 offset : STACK_ALLOC_OFFSET_BITS;
57 u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
58 };
59};
60
61struct stack_record {
62 struct stack_record *next;
63 u32 hash;
64 u32 size;
65 union handle_parts handle;
66 unsigned long entries[];
67};
68
69static void *stack_slabs[STACK_ALLOC_MAX_SLABS];
70
71static int depot_index;
72static int next_slab_inited;
73static size_t depot_offset;
74static DEFINE_RAW_SPINLOCK(depot_lock);
75
76static bool init_stack_slab(void **prealloc)
77{
78 if (!*prealloc)
79 return false;
80
81
82
83
84 if (smp_load_acquire(&next_slab_inited))
85 return true;
86 if (stack_slabs[depot_index] == NULL) {
87 stack_slabs[depot_index] = *prealloc;
88 *prealloc = NULL;
89 } else {
90
91 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
92 stack_slabs[depot_index + 1] = *prealloc;
93 *prealloc = NULL;
94 }
95
96
97
98
99 smp_store_release(&next_slab_inited, 1);
100 }
101 return true;
102}
103
104
105static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
106 u32 hash, void **prealloc, gfp_t alloc_flags)
107{
108 struct stack_record *stack;
109 size_t required_size = struct_size(stack, entries, size);
110
111 required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN);
112
113 if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) {
114 if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) {
115 WARN_ONCE(1, "Stack depot reached limit capacity");
116 return NULL;
117 }
118 depot_index++;
119 depot_offset = 0;
120
121
122
123
124
125 if (depot_index + 1 < STACK_ALLOC_MAX_SLABS)
126 smp_store_release(&next_slab_inited, 0);
127 }
128 init_stack_slab(prealloc);
129 if (stack_slabs[depot_index] == NULL)
130 return NULL;
131
132 stack = stack_slabs[depot_index] + depot_offset;
133
134 stack->hash = hash;
135 stack->size = size;
136 stack->handle.slabindex = depot_index;
137 stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
138 stack->handle.valid = 1;
139 memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
140 depot_offset += required_size;
141
142 return stack;
143}
144
145#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER)
146#define STACK_HASH_MASK (STACK_HASH_SIZE - 1)
147#define STACK_HASH_SEED 0x9747b28c
148
149static bool stack_depot_disable;
150static struct stack_record **stack_table;
151
152static int __init is_stack_depot_disabled(char *str)
153{
154 int ret;
155
156 ret = kstrtobool(str, &stack_depot_disable);
157 if (!ret && stack_depot_disable) {
158 pr_info("Stack Depot is disabled\n");
159 stack_table = NULL;
160 }
161 return 0;
162}
163early_param("stack_depot_disable", is_stack_depot_disabled);
164
165int __init stack_depot_init(void)
166{
167 if (!stack_depot_disable) {
168 size_t size = (STACK_HASH_SIZE * sizeof(struct stack_record *));
169 int i;
170
171 stack_table = memblock_alloc(size, size);
172 for (i = 0; i < STACK_HASH_SIZE; i++)
173 stack_table[i] = NULL;
174 }
175 return 0;
176}
177
178
179static inline u32 hash_stack(unsigned long *entries, unsigned int size)
180{
181 return jhash2((u32 *)entries,
182 array_size(size, sizeof(*entries)) / sizeof(u32),
183 STACK_HASH_SEED);
184}
185
186
187
188
189
190static inline
191int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
192 unsigned int n)
193{
194 for ( ; n-- ; u1++, u2++) {
195 if (*u1 != *u2)
196 return 1;
197 }
198 return 0;
199}
200
201
202static inline struct stack_record *find_stack(struct stack_record *bucket,
203 unsigned long *entries, int size,
204 u32 hash)
205{
206 struct stack_record *found;
207
208 for (found = bucket; found; found = found->next) {
209 if (found->hash == hash &&
210 found->size == size &&
211 !stackdepot_memcmp(entries, found->entries, size))
212 return found;
213 }
214 return NULL;
215}
216
217
218
219
220
221
222
223
224
225
226unsigned int stack_depot_fetch(depot_stack_handle_t handle,
227 unsigned long **entries)
228{
229 union handle_parts parts = { .handle = handle };
230 void *slab;
231 size_t offset = parts.offset << STACK_ALLOC_ALIGN;
232 struct stack_record *stack;
233
234 *entries = NULL;
235 if (parts.slabindex > depot_index) {
236 WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
237 parts.slabindex, depot_index, handle);
238 return 0;
239 }
240 slab = stack_slabs[parts.slabindex];
241 if (!slab)
242 return 0;
243 stack = slab + offset;
244
245 *entries = stack->entries;
246 return stack->size;
247}
248EXPORT_SYMBOL_GPL(stack_depot_fetch);
249
250
251
252
253
254
255
256
257
258
259depot_stack_handle_t stack_depot_save(unsigned long *entries,
260 unsigned int nr_entries,
261 gfp_t alloc_flags)
262{
263 struct stack_record *found = NULL, **bucket;
264 depot_stack_handle_t retval = 0;
265 struct page *page = NULL;
266 void *prealloc = NULL;
267 unsigned long flags;
268 u32 hash;
269
270 if (unlikely(nr_entries == 0) || stack_depot_disable)
271 goto fast_exit;
272
273 hash = hash_stack(entries, nr_entries);
274 bucket = &stack_table[hash & STACK_HASH_MASK];
275
276
277
278
279
280
281 found = find_stack(smp_load_acquire(bucket), entries,
282 nr_entries, hash);
283 if (found)
284 goto exit;
285
286
287
288
289
290
291
292
293
294 if (unlikely(!smp_load_acquire(&next_slab_inited))) {
295
296
297
298
299
300 alloc_flags &= ~GFP_ZONEMASK;
301 alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
302 alloc_flags |= __GFP_NOWARN;
303 page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
304 if (page)
305 prealloc = page_address(page);
306 }
307
308 raw_spin_lock_irqsave(&depot_lock, flags);
309
310 found = find_stack(*bucket, entries, nr_entries, hash);
311 if (!found) {
312 struct stack_record *new =
313 depot_alloc_stack(entries, nr_entries,
314 hash, &prealloc, alloc_flags);
315 if (new) {
316 new->next = *bucket;
317
318
319
320
321 smp_store_release(bucket, new);
322 found = new;
323 }
324 } else if (prealloc) {
325
326
327
328
329 WARN_ON(!init_stack_slab(&prealloc));
330 }
331
332 raw_spin_unlock_irqrestore(&depot_lock, flags);
333exit:
334 if (prealloc) {
335
336 free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
337 }
338 if (found)
339 retval = found->handle.handle;
340fast_exit:
341 return retval;
342}
343EXPORT_SYMBOL_GPL(stack_depot_save);
344
345static inline int in_irqentry_text(unsigned long ptr)
346{
347 return (ptr >= (unsigned long)&__irqentry_text_start &&
348 ptr < (unsigned long)&__irqentry_text_end) ||
349 (ptr >= (unsigned long)&__softirqentry_text_start &&
350 ptr < (unsigned long)&__softirqentry_text_end);
351}
352
353unsigned int filter_irq_stacks(unsigned long *entries,
354 unsigned int nr_entries)
355{
356 unsigned int i;
357
358 for (i = 0; i < nr_entries; i++) {
359 if (in_irqentry_text(entries[i])) {
360
361 return i + 1;
362 }
363 }
364 return nr_entries;
365}
366EXPORT_SYMBOL_GPL(filter_irq_stacks);
367