1
2
3
4
5
6
7
8
9
10#include <bpf/libbpf.h>
11#include "perf.h"
12#include "debug.h"
13#include "bpf-loader.h"
14#include "bpf-prologue.h"
15#include "probe-finder.h"
16#include <errno.h>
17#include <dwarf-regs.h>
18#include <linux/filter.h>
19
20#define BPF_REG_SIZE 8
21
22#define JMP_TO_ERROR_CODE -1
23#define JMP_TO_SUCCESS_CODE -2
24#define JMP_TO_USER_CODE -3
25
26struct bpf_insn_pos {
27 struct bpf_insn *begin;
28 struct bpf_insn *end;
29 struct bpf_insn *pos;
30};
31
32static inline int
33pos_get_cnt(struct bpf_insn_pos *pos)
34{
35 return pos->pos - pos->begin;
36}
37
38static int
39append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
40{
41 if (!pos->pos)
42 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
43
44 if (pos->pos + 1 >= pos->end) {
45 pr_err("bpf prologue: prologue too long\n");
46 pos->pos = NULL;
47 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
48 }
49
50 *(pos->pos)++ = new_insn;
51 return 0;
52}
53
54static int
55check_pos(struct bpf_insn_pos *pos)
56{
57 if (!pos->pos || pos->pos >= pos->end)
58 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
59 return 0;
60}
61
62
63
64
65
66
67static int
68argtype_to_ldx_size(const char *type)
69{
70 int arg_size = type ? atoi(&type[1]) : 64;
71
72 switch (arg_size) {
73 case 8:
74 return BPF_B;
75 case 16:
76 return BPF_H;
77 case 32:
78 return BPF_W;
79 case 64:
80 default:
81 return BPF_DW;
82 }
83}
84
85static const char *
86insn_sz_to_str(int insn_sz)
87{
88 switch (insn_sz) {
89 case BPF_B:
90 return "BPF_B";
91 case BPF_H:
92 return "BPF_H";
93 case BPF_W:
94 return "BPF_W";
95 case BPF_DW:
96 return "BPF_DW";
97 default:
98 return "UNKNOWN";
99 }
100}
101
102
103#define ins(i, p) append_insn((i), (p))
104
105
106
107
108
109
110
111static int
112gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
113 const char *reg, int target_reg)
114{
115 int offset = regs_query_register_offset(reg);
116
117 if (offset < 0) {
118 pr_err("bpf: prologue: failed to get register %s\n",
119 reg);
120 return offset;
121 }
122 ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
123
124 return check_pos(pos);
125}
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141static int
142gen_read_mem(struct bpf_insn_pos *pos,
143 int src_base_addr_reg,
144 int dst_addr_reg,
145 long offset)
146{
147
148 if (src_base_addr_reg != BPF_REG_ARG3)
149 ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
150
151 if (offset)
152 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
153
154
155 ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
156
157
158 if (dst_addr_reg != BPF_REG_ARG1)
159 ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
160
161
162 ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
163
164
165
166
167
168 ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
169 pos);
170
171 return check_pos(pos);
172}
173
174
175
176
177
178
179
180
181static int
182gen_prologue_fastpath(struct bpf_insn_pos *pos,
183 struct probe_trace_arg *args, int nargs)
184{
185 int i, err = 0;
186
187 for (i = 0; i < nargs; i++) {
188 err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
189 BPF_PROLOGUE_START_ARG_REG + i);
190 if (err)
191 goto errout;
192 }
193
194 return check_pos(pos);
195errout:
196 return err;
197}
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240static int
241gen_prologue_slowpath(struct bpf_insn_pos *pos,
242 struct probe_trace_arg *args, int nargs)
243{
244 int err, i;
245
246 for (i = 0; i < nargs; i++) {
247 struct probe_trace_arg *arg = &args[i];
248 const char *reg = arg->value;
249 struct probe_trace_arg_ref *ref = NULL;
250 int stack_offset = (i + 1) * -8;
251
252 pr_debug("prologue: fetch arg %d, base reg is %s\n",
253 i, reg);
254
255
256 err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
257 BPF_REG_ARG3);
258 if (err) {
259 pr_err("prologue: failed to get offset of register %s\n",
260 reg);
261 goto errout;
262 }
263
264
265 ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
266
267 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
268
269
270
271
272
273
274
275 ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
276 stack_offset), pos);
277
278 ref = arg->ref;
279 while (ref) {
280 pr_debug("prologue: arg %d: offset %ld\n",
281 i, ref->offset);
282 err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
283 ref->offset);
284 if (err) {
285 pr_err("prologue: failed to generate probe_read function call\n");
286 goto errout;
287 }
288
289 ref = ref->next;
290
291
292
293
294
295 if (ref)
296 ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
297 BPF_REG_FP, stack_offset), pos);
298 }
299 }
300
301
302 for (i = 0; i < nargs; i++) {
303 int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
304
305 pr_debug("prologue: load arg %d, insn_sz is %s\n",
306 i, insn_sz_to_str(insn_sz));
307 ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
308 BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
309 }
310
311 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
312
313 return check_pos(pos);
314errout:
315 return err;
316}
317
318static int
319prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
320 struct bpf_insn *success_code, struct bpf_insn *user_code)
321{
322 struct bpf_insn *insn;
323
324 if (check_pos(pos))
325 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
326
327 for (insn = pos->begin; insn < pos->pos; insn++) {
328 struct bpf_insn *target;
329 u8 class = BPF_CLASS(insn->code);
330 u8 opcode;
331
332 if (class != BPF_JMP)
333 continue;
334 opcode = BPF_OP(insn->code);
335 if (opcode == BPF_CALL)
336 continue;
337
338 switch (insn->off) {
339 case JMP_TO_ERROR_CODE:
340 target = error_code;
341 break;
342 case JMP_TO_SUCCESS_CODE:
343 target = success_code;
344 break;
345 case JMP_TO_USER_CODE:
346 target = user_code;
347 break;
348 default:
349 pr_err("bpf prologue: internal error: relocation failed\n");
350 return -BPF_LOADER_ERRNO__PROLOGUE;
351 }
352
353 insn->off = target - (insn + 1);
354 }
355 return 0;
356}
357
358int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
359 struct bpf_insn *new_prog, size_t *new_cnt,
360 size_t cnt_space)
361{
362 struct bpf_insn *success_code = NULL;
363 struct bpf_insn *error_code = NULL;
364 struct bpf_insn *user_code = NULL;
365 struct bpf_insn_pos pos;
366 bool fastpath = true;
367 int err = 0, i;
368
369 if (!new_prog || !new_cnt)
370 return -EINVAL;
371
372 if (cnt_space > BPF_MAXINSNS)
373 cnt_space = BPF_MAXINSNS;
374
375 pos.begin = new_prog;
376 pos.end = new_prog + cnt_space;
377 pos.pos = new_prog;
378
379 if (!nargs) {
380 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
381 &pos);
382
383 if (check_pos(&pos))
384 goto errout;
385
386 *new_cnt = pos_get_cnt(&pos);
387 return 0;
388 }
389
390 if (nargs > BPF_PROLOGUE_MAX_ARGS) {
391 pr_warning("bpf: prologue: %d arguments are dropped\n",
392 nargs - BPF_PROLOGUE_MAX_ARGS);
393 nargs = BPF_PROLOGUE_MAX_ARGS;
394 }
395
396
397 for (i = 0; i < nargs; i++) {
398 struct probe_trace_arg_ref *ref = args[i].ref;
399
400 if (args[i].value[0] == '@') {
401
402 pr_err("bpf: prologue: global %s%+ld not support\n",
403 args[i].value, ref ? ref->offset : 0);
404 return -ENOTSUP;
405 }
406
407 while (ref) {
408
409 fastpath = false;
410
411
412
413
414
415
416
417#ifdef __LP64__
418#define OFFSET_MAX ((1LL << 31) - 1)
419#define OFFSET_MIN ((1LL << 31) * -1)
420 if (ref->offset > OFFSET_MAX ||
421 ref->offset < OFFSET_MIN) {
422 pr_err("bpf: prologue: offset out of bound: %ld\n",
423 ref->offset);
424 return -BPF_LOADER_ERRNO__PROLOGUEOOB;
425 }
426#endif
427 ref = ref->next;
428 }
429 }
430 pr_debug("prologue: pass validation\n");
431
432 if (fastpath) {
433
434 pr_debug("prologue: fast path\n");
435 err = gen_prologue_fastpath(&pos, args, nargs);
436 if (err)
437 goto errout;
438 } else {
439 pr_debug("prologue: slow path\n");
440
441
442 ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
443
444 err = gen_prologue_slowpath(&pos, args, nargs);
445 if (err)
446 goto errout;
447
448
449
450
451
452
453
454
455
456 error_code = pos.pos;
457 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
458 &pos);
459
460 for (i = 0; i < nargs; i++)
461 ins(BPF_ALU64_IMM(BPF_MOV,
462 BPF_PROLOGUE_START_ARG_REG + i,
463 0),
464 &pos);
465 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
466 &pos);
467 }
468
469
470
471
472
473
474 success_code = pos.pos;
475 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
476
477
478
479
480
481 user_code = pos.pos;
482 if (!fastpath) {
483
484
485
486
487 ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
488 err = prologue_relocate(&pos, error_code, success_code,
489 user_code);
490 if (err)
491 goto errout;
492 }
493
494 err = check_pos(&pos);
495 if (err)
496 goto errout;
497
498 *new_cnt = pos_get_cnt(&pos);
499 return 0;
500errout:
501 return err;
502}
503