1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifndef __ASM_STACKTRACE_H
17#define __ASM_STACKTRACE_H
18
19#include <linux/percpu.h>
20#include <linux/sched.h>
21#include <linux/sched/task_stack.h>
22
23#include <asm/memory.h>
24#include <asm/ptrace.h>
25#include <asm/sdei.h>
26
27struct stackframe {
28 unsigned long fp;
29 unsigned long pc;
30#ifdef CONFIG_FUNCTION_GRAPH_TRACER
31 int graph;
32#endif
33};
34
35extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
36extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
37 int (*fn)(struct stackframe *, void *), void *data);
38extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
39
40DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
41
42static inline bool on_irq_stack(unsigned long sp)
43{
44 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
45 unsigned long high = low + IRQ_STACK_SIZE;
46
47 if (!low)
48 return false;
49
50 return (low <= sp && sp < high);
51}
52
53static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp)
54{
55 unsigned long low = (unsigned long)task_stack_page(tsk);
56 unsigned long high = low + THREAD_SIZE;
57
58 return (low <= sp && sp < high);
59}
60
61#ifdef CONFIG_VMAP_STACK
62DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
63
64static inline bool on_overflow_stack(unsigned long sp)
65{
66 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
67 unsigned long high = low + OVERFLOW_STACK_SIZE;
68
69 return (low <= sp && sp < high);
70}
71#else
72static inline bool on_overflow_stack(unsigned long sp) { return false; }
73#endif
74
75
76
77
78
79static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp)
80{
81 if (on_task_stack(tsk, sp))
82 return true;
83 if (tsk != current || preemptible())
84 return false;
85 if (on_irq_stack(sp))
86 return true;
87 if (on_overflow_stack(sp))
88 return true;
89 if (on_sdei_stack(sp))
90 return true;
91
92 return false;
93}
94
95#endif
96