1
2
3
4
5
6
7
8
9
10#include <bpf/libbpf.h>
11#include "debug.h"
12#include "bpf-loader.h"
13#include "bpf-prologue.h"
14#include "probe-finder.h"
15#include <errno.h>
16#include <stdlib.h>
17#include <dwarf-regs.h>
18#include <linux/filter.h>
19
20#define BPF_REG_SIZE 8
21
22#define JMP_TO_ERROR_CODE -1
23#define JMP_TO_SUCCESS_CODE -2
24#define JMP_TO_USER_CODE -3
25
26struct bpf_insn_pos {
27 struct bpf_insn *begin;
28 struct bpf_insn *end;
29 struct bpf_insn *pos;
30};
31
32static inline int
33pos_get_cnt(struct bpf_insn_pos *pos)
34{
35 return pos->pos - pos->begin;
36}
37
38static int
39append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
40{
41 if (!pos->pos)
42 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
43
44 if (pos->pos + 1 >= pos->end) {
45 pr_err("bpf prologue: prologue too long\n");
46 pos->pos = NULL;
47 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
48 }
49
50 *(pos->pos)++ = new_insn;
51 return 0;
52}
53
54static int
55check_pos(struct bpf_insn_pos *pos)
56{
57 if (!pos->pos || pos->pos >= pos->end)
58 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
59 return 0;
60}
61
62
63
64
65
66
67static int
68argtype_to_ldx_size(const char *type)
69{
70 int arg_size = type ? atoi(&type[1]) : 64;
71
72 switch (arg_size) {
73 case 8:
74 return BPF_B;
75 case 16:
76 return BPF_H;
77 case 32:
78 return BPF_W;
79 case 64:
80 default:
81 return BPF_DW;
82 }
83}
84
85static const char *
86insn_sz_to_str(int insn_sz)
87{
88 switch (insn_sz) {
89 case BPF_B:
90 return "BPF_B";
91 case BPF_H:
92 return "BPF_H";
93 case BPF_W:
94 return "BPF_W";
95 case BPF_DW:
96 return "BPF_DW";
97 default:
98 return "UNKNOWN";
99 }
100}
101
102
103#define ins(i, p) append_insn((i), (p))
104
105
106
107
108
109
110
111static int
112gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
113 const char *reg, int target_reg)
114{
115 int offset = regs_query_register_offset(reg);
116
117 if (offset < 0) {
118 pr_err("bpf: prologue: failed to get register %s\n",
119 reg);
120 return offset;
121 }
122 ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
123
124 return check_pos(pos);
125}
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141static int
142gen_read_mem(struct bpf_insn_pos *pos,
143 int src_base_addr_reg,
144 int dst_addr_reg,
145 long offset,
146 int probeid)
147{
148
149 if (src_base_addr_reg != BPF_REG_ARG3)
150 ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
151
152 if (offset)
153 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
154
155
156 ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
157
158
159 if (dst_addr_reg != BPF_REG_ARG1)
160 ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
161
162
163 ins(BPF_EMIT_CALL(probeid), pos);
164
165
166
167
168
169 ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
170 pos);
171
172 return check_pos(pos);
173}
174
175
176
177
178
179
180
181
182static int
183gen_prologue_fastpath(struct bpf_insn_pos *pos,
184 struct probe_trace_arg *args, int nargs)
185{
186 int i, err = 0;
187
188 for (i = 0; i < nargs; i++) {
189 err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
190 BPF_PROLOGUE_START_ARG_REG + i);
191 if (err)
192 goto errout;
193 }
194
195 return check_pos(pos);
196errout:
197 return err;
198}
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241static int
242gen_prologue_slowpath(struct bpf_insn_pos *pos,
243 struct probe_trace_arg *args, int nargs)
244{
245 int err, i, probeid;
246
247 for (i = 0; i < nargs; i++) {
248 struct probe_trace_arg *arg = &args[i];
249 const char *reg = arg->value;
250 struct probe_trace_arg_ref *ref = NULL;
251 int stack_offset = (i + 1) * -8;
252
253 pr_debug("prologue: fetch arg %d, base reg is %s\n",
254 i, reg);
255
256
257 err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
258 BPF_REG_ARG3);
259 if (err) {
260 pr_err("prologue: failed to get offset of register %s\n",
261 reg);
262 goto errout;
263 }
264
265
266 ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
267
268 ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
269
270
271
272
273
274
275
276 ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
277 stack_offset), pos);
278
279 ref = arg->ref;
280 probeid = BPF_FUNC_probe_read_kernel;
281 while (ref) {
282 pr_debug("prologue: arg %d: offset %ld\n",
283 i, ref->offset);
284
285 if (ref->user_access)
286 probeid = BPF_FUNC_probe_read_user;
287
288 err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
289 ref->offset, probeid);
290 if (err) {
291 pr_err("prologue: failed to generate probe_read function call\n");
292 goto errout;
293 }
294
295 ref = ref->next;
296
297
298
299
300
301 if (ref)
302 ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
303 BPF_REG_FP, stack_offset), pos);
304 }
305 }
306
307
308 for (i = 0; i < nargs; i++) {
309 int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
310
311 pr_debug("prologue: load arg %d, insn_sz is %s\n",
312 i, insn_sz_to_str(insn_sz));
313 ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
314 BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
315 }
316
317 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
318
319 return check_pos(pos);
320errout:
321 return err;
322}
323
324static int
325prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
326 struct bpf_insn *success_code, struct bpf_insn *user_code)
327{
328 struct bpf_insn *insn;
329
330 if (check_pos(pos))
331 return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
332
333 for (insn = pos->begin; insn < pos->pos; insn++) {
334 struct bpf_insn *target;
335 u8 class = BPF_CLASS(insn->code);
336 u8 opcode;
337
338 if (class != BPF_JMP)
339 continue;
340 opcode = BPF_OP(insn->code);
341 if (opcode == BPF_CALL)
342 continue;
343
344 switch (insn->off) {
345 case JMP_TO_ERROR_CODE:
346 target = error_code;
347 break;
348 case JMP_TO_SUCCESS_CODE:
349 target = success_code;
350 break;
351 case JMP_TO_USER_CODE:
352 target = user_code;
353 break;
354 default:
355 pr_err("bpf prologue: internal error: relocation failed\n");
356 return -BPF_LOADER_ERRNO__PROLOGUE;
357 }
358
359 insn->off = target - (insn + 1);
360 }
361 return 0;
362}
363
364int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
365 struct bpf_insn *new_prog, size_t *new_cnt,
366 size_t cnt_space)
367{
368 struct bpf_insn *success_code = NULL;
369 struct bpf_insn *error_code = NULL;
370 struct bpf_insn *user_code = NULL;
371 struct bpf_insn_pos pos;
372 bool fastpath = true;
373 int err = 0, i;
374
375 if (!new_prog || !new_cnt)
376 return -EINVAL;
377
378 if (cnt_space > BPF_MAXINSNS)
379 cnt_space = BPF_MAXINSNS;
380
381 pos.begin = new_prog;
382 pos.end = new_prog + cnt_space;
383 pos.pos = new_prog;
384
385 if (!nargs) {
386 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
387 &pos);
388
389 if (check_pos(&pos))
390 goto errout;
391
392 *new_cnt = pos_get_cnt(&pos);
393 return 0;
394 }
395
396 if (nargs > BPF_PROLOGUE_MAX_ARGS) {
397 pr_warning("bpf: prologue: %d arguments are dropped\n",
398 nargs - BPF_PROLOGUE_MAX_ARGS);
399 nargs = BPF_PROLOGUE_MAX_ARGS;
400 }
401
402
403 for (i = 0; i < nargs; i++) {
404 struct probe_trace_arg_ref *ref = args[i].ref;
405
406 if (args[i].value[0] == '@') {
407
408 pr_err("bpf: prologue: global %s%+ld not support\n",
409 args[i].value, ref ? ref->offset : 0);
410 return -ENOTSUP;
411 }
412
413 while (ref) {
414
415 fastpath = false;
416
417
418
419
420
421
422
423#ifdef __LP64__
424#define OFFSET_MAX ((1LL << 31) - 1)
425#define OFFSET_MIN ((1LL << 31) * -1)
426 if (ref->offset > OFFSET_MAX ||
427 ref->offset < OFFSET_MIN) {
428 pr_err("bpf: prologue: offset out of bound: %ld\n",
429 ref->offset);
430 return -BPF_LOADER_ERRNO__PROLOGUEOOB;
431 }
432#endif
433 ref = ref->next;
434 }
435 }
436 pr_debug("prologue: pass validation\n");
437
438 if (fastpath) {
439
440 pr_debug("prologue: fast path\n");
441 err = gen_prologue_fastpath(&pos, args, nargs);
442 if (err)
443 goto errout;
444 } else {
445 pr_debug("prologue: slow path\n");
446
447
448 ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
449
450 err = gen_prologue_slowpath(&pos, args, nargs);
451 if (err)
452 goto errout;
453
454
455
456
457
458
459
460
461
462 error_code = pos.pos;
463 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
464 &pos);
465
466 for (i = 0; i < nargs; i++)
467 ins(BPF_ALU64_IMM(BPF_MOV,
468 BPF_PROLOGUE_START_ARG_REG + i,
469 0),
470 &pos);
471 ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
472 &pos);
473 }
474
475
476
477
478
479
480 success_code = pos.pos;
481 ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
482
483
484
485
486
487 user_code = pos.pos;
488 if (!fastpath) {
489
490
491
492
493 ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
494 err = prologue_relocate(&pos, error_code, success_code,
495 user_code);
496 if (err)
497 goto errout;
498 }
499
500 err = check_pos(&pos);
501 if (err)
502 goto errout;
503
504 *new_cnt = pos_get_cnt(&pos);
505 return 0;
506errout:
507 return err;
508}
509