1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/perf_event.h>
19#include <linux/uaccess.h>
20
21#include <asm/stacktrace.h>
22
23struct frame_tail {
24 struct frame_tail __user *fp;
25 unsigned long lr;
26} __attribute__((packed));
27
28
29
30
31
32static struct frame_tail __user *
33user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry_ctx *entry)
35{
36 struct frame_tail buftail;
37 unsigned long err;
38
39
40 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
41 return NULL;
42
43 pagefault_disable();
44 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
45 pagefault_enable();
46
47 if (err)
48 return NULL;
49
50 perf_callchain_store(entry, buftail.lr);
51
52
53
54
55
56 if (tail >= buftail.fp)
57 return NULL;
58
59 return buftail.fp;
60}
61
62#ifdef CONFIG_COMPAT
63
64
65
66
67
68
69
70
71struct compat_frame_tail {
72 compat_uptr_t fp;
73 u32 sp;
74 u32 lr;
75} __attribute__((packed));
76
77static struct compat_frame_tail __user *
78compat_user_backtrace(struct compat_frame_tail __user *tail,
79 struct perf_callchain_entry_ctx *entry)
80{
81 struct compat_frame_tail buftail;
82 unsigned long err;
83
84
85 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
86 return NULL;
87
88 pagefault_disable();
89 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
90 pagefault_enable();
91
92 if (err)
93 return NULL;
94
95 perf_callchain_store(entry, buftail.lr);
96
97
98
99
100
101 if (tail + 1 >= (struct compat_frame_tail __user *)
102 compat_ptr(buftail.fp))
103 return NULL;
104
105 return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
106}
107#endif
108
109void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
110 struct pt_regs *regs)
111{
112 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
113
114 return;
115 }
116
117 perf_callchain_store(entry, regs->pc);
118
119 if (!compat_user_mode(regs)) {
120
121 struct frame_tail __user *tail;
122
123 tail = (struct frame_tail __user *)regs->regs[29];
124
125 while (entry->nr < entry->max_stack &&
126 tail && !((unsigned long)tail & 0xf))
127 tail = user_backtrace(tail, entry);
128 } else {
129#ifdef CONFIG_COMPAT
130
131 struct compat_frame_tail __user *tail;
132
133 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
134
135 while ((entry->nr < entry->max_stack) &&
136 tail && !((unsigned long)tail & 0x3))
137 tail = compat_user_backtrace(tail, entry);
138#endif
139 }
140}
141
142
143
144
145
146
147static int callchain_trace(struct stackframe *frame, void *data)
148{
149 struct perf_callchain_entry_ctx *entry = data;
150 perf_callchain_store(entry, frame->pc);
151 return 0;
152}
153
154void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
155 struct pt_regs *regs)
156{
157 struct stackframe frame;
158
159 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
160
161 return;
162 }
163
164 frame.fp = regs->regs[29];
165 frame.sp = regs->sp;
166 frame.pc = regs->pc;
167#ifdef CONFIG_FUNCTION_GRAPH_TRACER
168 frame.graph = current->curr_ret_stack;
169#endif
170
171 walk_stackframe(current, &frame, callchain_trace, entry);
172}
173
174unsigned long perf_instruction_pointer(struct pt_regs *regs)
175{
176 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
177 return perf_guest_cbs->get_guest_ip();
178
179 return instruction_pointer(regs);
180}
181
182unsigned long perf_misc_flags(struct pt_regs *regs)
183{
184 int misc = 0;
185
186 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
187 if (perf_guest_cbs->is_user_mode())
188 misc |= PERF_RECORD_MISC_GUEST_USER;
189 else
190 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
191 } else {
192 if (user_mode(regs))
193 misc |= PERF_RECORD_MISC_USER;
194 else
195 misc |= PERF_RECORD_MISC_KERNEL;
196 }
197
198 return misc;
199}
200