1
2
3
4
5
6
7
8
9#include <bpf/libbpf.h>
10#include "perf.h"
11#include "debug.h"
12#include "bpf-loader.h"
13#include "bpf-prologue.h"
14#include "probe-finder.h"
15#include <errno.h>
16#include <dwarf-regs.h>
17#include <linux/filter.h>
18
19#define BPF_REG_SIZE 8
20
21#define JMP_TO_ERROR_CODE -1
22#define JMP_TO_SUCCESS_CODE -2
23#define JMP_TO_USER_CODE -3
24
25struct bpf_insn_pos {
26 struct bpf_insn *begin;
27 struct bpf_insn *end;
28 struct bpf_insn *pos;
29};
30
31static inline int
32pos_get_cnt(struct bpf_insn_pos *pos)
33{
34 return pos->pos - pos->begin;
35}
36
37static int
38append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
39{
40 if (!pos->pos)
41 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
42
43 if (pos->pos + 1 >= pos->end) {
44 pr_err("bpf prologue: prologue too long\n");
45 pos->pos = NULL;
46 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
47 }
48
49 *(pos->pos)++ = new_insn;
50 return 0;
51}
52
53static int
54check_pos(struct bpf_insn_pos *pos)
55{
56 if (!pos->pos || pos->pos >= pos->end)
57 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
58 return 0;
59}
60
61
62#define ins(i, p) append_insn((i), (p))
63
64
65
66
67
68
69
70static int
71gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
72 const char *reg, int target_reg)
73{
74 int offset = regs_query_register_offset(reg);
75
76 if (offset < 0) {
77 pr_err("bpf: prologue: failed to get register %s\n",
78 reg);
79 return offset;
80 }
81 ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
82
83 return check_pos(pos);
84}
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100static int
101gen_read_mem(struct bpf_insn_pos *pos,
102 int src_base_addr_reg,
103 int dst_addr_reg,
104 long offset)
105{
106
107 if (src_base_addr_reg != BPF_REG_ARG3)
108 ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
109
110 if (offset)
111 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
112
113
114 ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
115
116
117 if (dst_addr_reg != BPF_REG_ARG1)
118 ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
119
120
121 ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
122
123
124
125
126
127 ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
128 pos);
129
130 return check_pos(pos);
131}
132
133
134
135
136
137
138
139
140static int
141gen_prologue_fastpath(struct bpf_insn_pos *pos,
142 struct probe_trace_arg *args, int nargs)
143{
144 int i, err = 0;
145
146 for (i = 0; i < nargs; i++) {
147 err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
148 BPF_PROLOGUE_START_ARG_REG + i);
149 if (err)
150 goto errout;
151 }
152
153 return check_pos(pos);
154errout:
155 return err;
156}
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199static int
200gen_prologue_slowpath(struct bpf_insn_pos *pos,
201 struct probe_trace_arg *args, int nargs)
202{
203 int err, i;
204
205 for (i = 0; i < nargs; i++) {
206 struct probe_trace_arg *arg = &args[i];
207 const char *reg = arg->value;
208 struct probe_trace_arg_ref *ref = NULL;
209 int stack_offset = (i + 1) * -8;
210
211 pr_debug("prologue: fetch arg %d, base reg is %s\n",
212 i, reg);
213
214
215 err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
216 BPF_REG_ARG3);
217 if (err) {
218 pr_err("prologue: failed to get offset of register %s\n",
219 reg);
220 goto errout;
221 }
222
223
224 ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
225
226 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
227
228
229
230
231
232
233
234 ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
235 stack_offset), pos);
236
237 ref = arg->ref;
238 while (ref) {
239 pr_debug("prologue: arg %d: offset %ld\n",
240 i, ref->offset);
241 err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
242 ref->offset);
243 if (err) {
244 pr_err("prologue: failed to generate probe_read function call\n");
245 goto errout;
246 }
247
248 ref = ref->next;
249
250
251
252
253
254 if (ref)
255 ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
256 BPF_REG_FP, stack_offset), pos);
257 }
258 }
259
260
261 for (i = 0; i < nargs; i++)
262 ins(BPF_LDX_MEM(BPF_DW, BPF_PROLOGUE_START_ARG_REG + i,
263 BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
264
265 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
266
267 return check_pos(pos);
268errout:
269 return err;
270}
271
272static int
273prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
274 struct bpf_insn *success_code, struct bpf_insn *user_code)
275{
276 struct bpf_insn *insn;
277
278 if (check_pos(pos))
279 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
280
281 for (insn = pos->begin; insn < pos->pos; insn++) {
282 struct bpf_insn *target;
283 u8 class = BPF_CLASS(insn->code);
284 u8 opcode;
285
286 if (class != BPF_JMP)
287 continue;
288 opcode = BPF_OP(insn->code);
289 if (opcode == BPF_CALL)
290 continue;
291
292 switch (insn->off) {
293 case JMP_TO_ERROR_CODE:
294 target = error_code;
295 break;
296 case JMP_TO_SUCCESS_CODE:
297 target = success_code;
298 break;
299 case JMP_TO_USER_CODE:
300 target = user_code;
301 break;
302 default:
303 pr_err("bpf prologue: internal error: relocation failed\n");
304 return -BPF_LOADER_ERRNO__PROLOGUE;
305 }
306
307 insn->off = target - (insn + 1);
308 }
309 return 0;
310}
311
312int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
313 struct bpf_insn *new_prog, size_t *new_cnt,
314 size_t cnt_space)
315{
316 struct bpf_insn *success_code = NULL;
317 struct bpf_insn *error_code = NULL;
318 struct bpf_insn *user_code = NULL;
319 struct bpf_insn_pos pos;
320 bool fastpath = true;
321 int err = 0, i;
322
323 if (!new_prog || !new_cnt)
324 return -EINVAL;
325
326 if (cnt_space > BPF_MAXINSNS)
327 cnt_space = BPF_MAXINSNS;
328
329 pos.begin = new_prog;
330 pos.end = new_prog + cnt_space;
331 pos.pos = new_prog;
332
333 if (!nargs) {
334 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
335 &pos);
336
337 if (check_pos(&pos))
338 goto errout;
339
340 *new_cnt = pos_get_cnt(&pos);
341 return 0;
342 }
343
344 if (nargs > BPF_PROLOGUE_MAX_ARGS) {
345 pr_warning("bpf: prologue: %d arguments are dropped\n",
346 nargs - BPF_PROLOGUE_MAX_ARGS);
347 nargs = BPF_PROLOGUE_MAX_ARGS;
348 }
349
350
351 for (i = 0; i < nargs; i++) {
352 struct probe_trace_arg_ref *ref = args[i].ref;
353
354 if (args[i].value[0] == '@') {
355
356 pr_err("bpf: prologue: global %s%+ld not support\n",
357 args[i].value, ref ? ref->offset : 0);
358 return -ENOTSUP;
359 }
360
361 while (ref) {
362
363 fastpath = false;
364
365
366
367
368
369
370
371#ifdef __LP64__
372#define OFFSET_MAX ((1LL << 31) - 1)
373#define OFFSET_MIN ((1LL << 31) * -1)
374 if (ref->offset > OFFSET_MAX ||
375 ref->offset < OFFSET_MIN) {
376 pr_err("bpf: prologue: offset out of bound: %ld\n",
377 ref->offset);
378 return -BPF_LOADER_ERRNO__PROLOGUEOOB;
379 }
380#endif
381 ref = ref->next;
382 }
383 }
384 pr_debug("prologue: pass validation\n");
385
386 if (fastpath) {
387
388 pr_debug("prologue: fast path\n");
389 err = gen_prologue_fastpath(&pos, args, nargs);
390 if (err)
391 goto errout;
392 } else {
393 pr_debug("prologue: slow path\n");
394
395
396 ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
397
398 err = gen_prologue_slowpath(&pos, args, nargs);
399 if (err)
400 goto errout;
401
402
403
404
405
406
407
408
409
410 error_code = pos.pos;
411 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
412 &pos);
413
414 for (i = 0; i < nargs; i++)
415 ins(BPF_ALU64_IMM(BPF_MOV,
416 BPF_PROLOGUE_START_ARG_REG + i,
417 0),
418 &pos);
419 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
420 &pos);
421 }
422
423
424
425
426
427
428 success_code = pos.pos;
429 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
430
431
432
433
434
435 user_code = pos.pos;
436 if (!fastpath) {
437
438
439
440
441 ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
442 err = prologue_relocate(&pos, error_code, success_code,
443 user_code);
444 if (err)
445 goto errout;
446 }
447
448 err = check_pos(&pos);
449 if (err)
450 goto errout;
451
452 *new_cnt = pos_get_cnt(&pos);
453 return 0;
454errout:
455 return err;
456}
457