1
2
3
4#ifdef __KERNEL__
5#include <linux/bpf.h>
6#include <linux/btf.h>
7#include <linux/string.h>
8#include <linux/bpf_verifier.h>
9#include "relo_core.h"
10
11static const char *btf_kind_str(const struct btf_type *t)
12{
13 return btf_type_str(t);
14}
15
16static bool is_ldimm64_insn(struct bpf_insn *insn)
17{
18 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
19}
20
21static const struct btf_type *
22skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
23{
24 return btf_type_skip_modifiers(btf, id, res_id);
25}
26
27static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
28{
29 return btf_name_by_offset(btf, offset);
30}
31
32static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
33{
34 const struct btf_type *t;
35 int size;
36
37 t = btf_type_by_id(btf, type_id);
38 t = btf_resolve_size(btf, t, &size);
39 if (IS_ERR(t))
40 return PTR_ERR(t);
41 return size;
42}
43
44enum libbpf_print_level {
45 LIBBPF_WARN,
46 LIBBPF_INFO,
47 LIBBPF_DEBUG,
48};
49
50#undef pr_warn
51#undef pr_info
52#undef pr_debug
53#define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
54#define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
55#define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
56#define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
57#else
58#include <stdio.h>
59#include <string.h>
60#include <errno.h>
61#include <ctype.h>
62#include <linux/err.h>
63
64#include "libbpf.h"
65#include "bpf.h"
66#include "btf.h"
67#include "str_error.h"
68#include "libbpf_internal.h"
69#endif
70
71static bool is_flex_arr(const struct btf *btf,
72 const struct bpf_core_accessor *acc,
73 const struct btf_array *arr)
74{
75 const struct btf_type *t;
76
77
78 if (!acc->name || arr->nelems > 0)
79 return false;
80
81
82 t = btf_type_by_id(btf, acc->type_id);
83 return acc->idx == btf_vlen(t) - 1;
84}
85
86static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
87{
88 switch (kind) {
89 case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
90 case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
91 case BPF_CORE_FIELD_EXISTS: return "field_exists";
92 case BPF_CORE_FIELD_SIGNED: return "signed";
93 case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
94 case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
95 case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
96 case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
97 case BPF_CORE_TYPE_EXISTS: return "type_exists";
98 case BPF_CORE_TYPE_SIZE: return "type_size";
99 case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
100 case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
101 default: return "unknown";
102 }
103}
104
105static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
106{
107 switch (kind) {
108 case BPF_CORE_FIELD_BYTE_OFFSET:
109 case BPF_CORE_FIELD_BYTE_SIZE:
110 case BPF_CORE_FIELD_EXISTS:
111 case BPF_CORE_FIELD_SIGNED:
112 case BPF_CORE_FIELD_LSHIFT_U64:
113 case BPF_CORE_FIELD_RSHIFT_U64:
114 return true;
115 default:
116 return false;
117 }
118}
119
120static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
121{
122 switch (kind) {
123 case BPF_CORE_TYPE_ID_LOCAL:
124 case BPF_CORE_TYPE_ID_TARGET:
125 case BPF_CORE_TYPE_EXISTS:
126 case BPF_CORE_TYPE_SIZE:
127 return true;
128 default:
129 return false;
130 }
131}
132
133static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
134{
135 switch (kind) {
136 case BPF_CORE_ENUMVAL_EXISTS:
137 case BPF_CORE_ENUMVAL_VALUE:
138 return true;
139 default:
140 return false;
141 }
142}
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
182 __u32 type_id,
183 const char *spec_str,
184 enum bpf_core_relo_kind relo_kind,
185 struct bpf_core_spec *spec)
186{
187 int access_idx, parsed_len, i;
188 struct bpf_core_accessor *acc;
189 const struct btf_type *t;
190 const char *name;
191 __u32 id;
192 __s64 sz;
193
194 if (str_is_empty(spec_str) || *spec_str == ':')
195 return -EINVAL;
196
197 memset(spec, 0, sizeof(*spec));
198 spec->btf = btf;
199 spec->root_type_id = type_id;
200 spec->relo_kind = relo_kind;
201
202
203 if (core_relo_is_type_based(relo_kind)) {
204 if (strcmp(spec_str, "0"))
205 return -EINVAL;
206 return 0;
207 }
208
209
210 while (*spec_str) {
211 if (*spec_str == ':')
212 ++spec_str;
213 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
214 return -EINVAL;
215 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
216 return -E2BIG;
217 spec_str += parsed_len;
218 spec->raw_spec[spec->raw_len++] = access_idx;
219 }
220
221 if (spec->raw_len == 0)
222 return -EINVAL;
223
224 t = skip_mods_and_typedefs(btf, type_id, &id);
225 if (!t)
226 return -EINVAL;
227
228 access_idx = spec->raw_spec[0];
229 acc = &spec->spec[0];
230 acc->type_id = id;
231 acc->idx = access_idx;
232 spec->len++;
233
234 if (core_relo_is_enumval_based(relo_kind)) {
235 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
236 return -EINVAL;
237
238
239 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
240 return 0;
241 }
242
243 if (!core_relo_is_field_based(relo_kind))
244 return -EINVAL;
245
246 sz = btf__resolve_size(btf, id);
247 if (sz < 0)
248 return sz;
249 spec->bit_offset = access_idx * sz * 8;
250
251 for (i = 1; i < spec->raw_len; i++) {
252 t = skip_mods_and_typedefs(btf, id, &id);
253 if (!t)
254 return -EINVAL;
255
256 access_idx = spec->raw_spec[i];
257 acc = &spec->spec[spec->len];
258
259 if (btf_is_composite(t)) {
260 const struct btf_member *m;
261 __u32 bit_offset;
262
263 if (access_idx >= btf_vlen(t))
264 return -EINVAL;
265
266 bit_offset = btf_member_bit_offset(t, access_idx);
267 spec->bit_offset += bit_offset;
268
269 m = btf_members(t) + access_idx;
270 if (m->name_off) {
271 name = btf__name_by_offset(btf, m->name_off);
272 if (str_is_empty(name))
273 return -EINVAL;
274
275 acc->type_id = id;
276 acc->idx = access_idx;
277 acc->name = name;
278 spec->len++;
279 }
280
281 id = m->type;
282 } else if (btf_is_array(t)) {
283 const struct btf_array *a = btf_array(t);
284 bool flex;
285
286 t = skip_mods_and_typedefs(btf, a->type, &id);
287 if (!t)
288 return -EINVAL;
289
290 flex = is_flex_arr(btf, acc - 1, a);
291 if (!flex && access_idx >= a->nelems)
292 return -EINVAL;
293
294 spec->spec[spec->len].type_id = id;
295 spec->spec[spec->len].idx = access_idx;
296 spec->len++;
297
298 sz = btf__resolve_size(btf, id);
299 if (sz < 0)
300 return sz;
301 spec->bit_offset += access_idx * sz * 8;
302 } else {
303 pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
304 prog_name, type_id, spec_str, i, id, btf_kind_str(t));
305 return -EINVAL;
306 }
307 }
308
309 return 0;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static int bpf_core_fields_are_compat(const struct btf *local_btf,
330 __u32 local_id,
331 const struct btf *targ_btf,
332 __u32 targ_id)
333{
334 const struct btf_type *local_type, *targ_type;
335
336recur:
337 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
338 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
339 if (!local_type || !targ_type)
340 return -EINVAL;
341
342 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
343 return 1;
344 if (btf_kind(local_type) != btf_kind(targ_type))
345 return 0;
346
347 switch (btf_kind(local_type)) {
348 case BTF_KIND_PTR:
349 case BTF_KIND_FLOAT:
350 return 1;
351 case BTF_KIND_FWD:
352 case BTF_KIND_ENUM: {
353 const char *local_name, *targ_name;
354 size_t local_len, targ_len;
355
356 local_name = btf__name_by_offset(local_btf,
357 local_type->name_off);
358 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
359 local_len = bpf_core_essential_name_len(local_name);
360 targ_len = bpf_core_essential_name_len(targ_name);
361
362 return local_len == 0 || targ_len == 0 ||
363 (local_len == targ_len &&
364 strncmp(local_name, targ_name, local_len) == 0);
365 }
366 case BTF_KIND_INT:
367
368
369
370 return btf_int_offset(local_type) == 0 &&
371 btf_int_offset(targ_type) == 0;
372 case BTF_KIND_ARRAY:
373 local_id = btf_array(local_type)->type;
374 targ_id = btf_array(targ_type)->type;
375 goto recur;
376 default:
377 return 0;
378 }
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397static int bpf_core_match_member(const struct btf *local_btf,
398 const struct bpf_core_accessor *local_acc,
399 const struct btf *targ_btf,
400 __u32 targ_id,
401 struct bpf_core_spec *spec,
402 __u32 *next_targ_id)
403{
404 const struct btf_type *local_type, *targ_type;
405 const struct btf_member *local_member, *m;
406 const char *local_name, *targ_name;
407 __u32 local_id;
408 int i, n, found;
409
410 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
411 if (!targ_type)
412 return -EINVAL;
413 if (!btf_is_composite(targ_type))
414 return 0;
415
416 local_id = local_acc->type_id;
417 local_type = btf_type_by_id(local_btf, local_id);
418 local_member = btf_members(local_type) + local_acc->idx;
419 local_name = btf__name_by_offset(local_btf, local_member->name_off);
420
421 n = btf_vlen(targ_type);
422 m = btf_members(targ_type);
423 for (i = 0; i < n; i++, m++) {
424 __u32 bit_offset;
425
426 bit_offset = btf_member_bit_offset(targ_type, i);
427
428
429 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
430 return -E2BIG;
431
432
433 spec->bit_offset += bit_offset;
434 spec->raw_spec[spec->raw_len++] = i;
435
436 targ_name = btf__name_by_offset(targ_btf, m->name_off);
437 if (str_is_empty(targ_name)) {
438
439 found = bpf_core_match_member(local_btf, local_acc,
440 targ_btf, m->type,
441 spec, next_targ_id);
442 if (found)
443 return found;
444 } else if (strcmp(local_name, targ_name) == 0) {
445
446 struct bpf_core_accessor *targ_acc;
447
448 targ_acc = &spec->spec[spec->len++];
449 targ_acc->type_id = targ_id;
450 targ_acc->idx = i;
451 targ_acc->name = targ_name;
452
453 *next_targ_id = m->type;
454 found = bpf_core_fields_are_compat(local_btf,
455 local_member->type,
456 targ_btf, m->type);
457 if (!found)
458 spec->len--;
459 return found;
460 }
461
462 spec->bit_offset -= bit_offset;
463 spec->raw_len--;
464 }
465
466 return 0;
467}
468
469
470
471
472
473static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
474 const struct btf *targ_btf, __u32 targ_id,
475 struct bpf_core_spec *targ_spec)
476{
477 const struct btf_type *targ_type;
478 const struct bpf_core_accessor *local_acc;
479 struct bpf_core_accessor *targ_acc;
480 int i, sz, matched;
481
482 memset(targ_spec, 0, sizeof(*targ_spec));
483 targ_spec->btf = targ_btf;
484 targ_spec->root_type_id = targ_id;
485 targ_spec->relo_kind = local_spec->relo_kind;
486
487 if (core_relo_is_type_based(local_spec->relo_kind)) {
488 return bpf_core_types_are_compat(local_spec->btf,
489 local_spec->root_type_id,
490 targ_btf, targ_id);
491 }
492
493 local_acc = &local_spec->spec[0];
494 targ_acc = &targ_spec->spec[0];
495
496 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
497 size_t local_essent_len, targ_essent_len;
498 const struct btf_enum *e;
499 const char *targ_name;
500
501
502 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
503 if (!btf_is_enum(targ_type))
504 return 0;
505
506 local_essent_len = bpf_core_essential_name_len(local_acc->name);
507
508 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
509 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
510 targ_essent_len = bpf_core_essential_name_len(targ_name);
511 if (targ_essent_len != local_essent_len)
512 continue;
513 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
514 targ_acc->type_id = targ_id;
515 targ_acc->idx = i;
516 targ_acc->name = targ_name;
517 targ_spec->len++;
518 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
519 targ_spec->raw_len++;
520 return 1;
521 }
522 }
523 return 0;
524 }
525
526 if (!core_relo_is_field_based(local_spec->relo_kind))
527 return -EINVAL;
528
529 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
530 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
531 &targ_id);
532 if (!targ_type)
533 return -EINVAL;
534
535 if (local_acc->name) {
536 matched = bpf_core_match_member(local_spec->btf,
537 local_acc,
538 targ_btf, targ_id,
539 targ_spec, &targ_id);
540 if (matched <= 0)
541 return matched;
542 } else {
543
544
545
546
547 if (i > 0) {
548 const struct btf_array *a;
549 bool flex;
550
551 if (!btf_is_array(targ_type))
552 return 0;
553
554 a = btf_array(targ_type);
555 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
556 if (!flex && local_acc->idx >= a->nelems)
557 return 0;
558 if (!skip_mods_and_typedefs(targ_btf, a->type,
559 &targ_id))
560 return -EINVAL;
561 }
562
563
564 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
565 return -E2BIG;
566
567 targ_acc->type_id = targ_id;
568 targ_acc->idx = local_acc->idx;
569 targ_acc->name = NULL;
570 targ_spec->len++;
571 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
572 targ_spec->raw_len++;
573
574 sz = btf__resolve_size(targ_btf, targ_id);
575 if (sz < 0)
576 return sz;
577 targ_spec->bit_offset += local_acc->idx * sz * 8;
578 }
579 }
580
581 return 1;
582}
583
584static int bpf_core_calc_field_relo(const char *prog_name,
585 const struct bpf_core_relo *relo,
586 const struct bpf_core_spec *spec,
587 __u32 *val, __u32 *field_sz, __u32 *type_id,
588 bool *validate)
589{
590 const struct bpf_core_accessor *acc;
591 const struct btf_type *t;
592 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
593 const struct btf_member *m;
594 const struct btf_type *mt;
595 bool bitfield;
596 __s64 sz;
597
598 *field_sz = 0;
599
600 if (relo->kind == BPF_CORE_FIELD_EXISTS) {
601 *val = spec ? 1 : 0;
602 return 0;
603 }
604
605 if (!spec)
606 return -EUCLEAN;
607
608 acc = &spec->spec[spec->len - 1];
609 t = btf_type_by_id(spec->btf, acc->type_id);
610
611
612 if (!acc->name) {
613 if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
614 *val = spec->bit_offset / 8;
615
616 sz = btf__resolve_size(spec->btf, acc->type_id);
617 if (sz < 0)
618 return -EINVAL;
619 *field_sz = sz;
620 *type_id = acc->type_id;
621 } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
622 sz = btf__resolve_size(spec->btf, acc->type_id);
623 if (sz < 0)
624 return -EINVAL;
625 *val = sz;
626 } else {
627 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
628 prog_name, relo->kind, relo->insn_off / 8);
629 return -EINVAL;
630 }
631 if (validate)
632 *validate = true;
633 return 0;
634 }
635
636 m = btf_members(t) + acc->idx;
637 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
638 bit_off = spec->bit_offset;
639 bit_sz = btf_member_bitfield_size(t, acc->idx);
640
641 bitfield = bit_sz > 0;
642 if (bitfield) {
643 byte_sz = mt->size;
644 byte_off = bit_off / 8 / byte_sz * byte_sz;
645
646 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
647 if (byte_sz >= 8) {
648
649 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
650 prog_name, relo->kind, relo->insn_off / 8);
651 return -E2BIG;
652 }
653 byte_sz *= 2;
654 byte_off = bit_off / 8 / byte_sz * byte_sz;
655 }
656 } else {
657 sz = btf__resolve_size(spec->btf, field_type_id);
658 if (sz < 0)
659 return -EINVAL;
660 byte_sz = sz;
661 byte_off = spec->bit_offset / 8;
662 bit_sz = byte_sz * 8;
663 }
664
665
666
667
668
669 if (validate)
670 *validate = !bitfield;
671
672 switch (relo->kind) {
673 case BPF_CORE_FIELD_BYTE_OFFSET:
674 *val = byte_off;
675 if (!bitfield) {
676 *field_sz = byte_sz;
677 *type_id = field_type_id;
678 }
679 break;
680 case BPF_CORE_FIELD_BYTE_SIZE:
681 *val = byte_sz;
682 break;
683 case BPF_CORE_FIELD_SIGNED:
684
685 *val = btf_is_enum(mt) ||
686 (btf_int_encoding(mt) & BTF_INT_SIGNED);
687 if (validate)
688 *validate = true;
689 break;
690 case BPF_CORE_FIELD_LSHIFT_U64:
691#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
692 *val = 64 - (bit_off + bit_sz - byte_off * 8);
693#else
694 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
695#endif
696 break;
697 case BPF_CORE_FIELD_RSHIFT_U64:
698 *val = 64 - bit_sz;
699 if (validate)
700 *validate = true;
701 break;
702 case BPF_CORE_FIELD_EXISTS:
703 default:
704 return -EOPNOTSUPP;
705 }
706
707 return 0;
708}
709
710static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
711 const struct bpf_core_spec *spec,
712 __u32 *val, bool *validate)
713{
714 __s64 sz;
715
716
717 if (validate)
718 *validate = true;
719
720
721 if (!spec) {
722 *val = 0;
723 return 0;
724 }
725
726 switch (relo->kind) {
727 case BPF_CORE_TYPE_ID_TARGET:
728 *val = spec->root_type_id;
729
730
731
732 if (validate)
733 *validate = false;
734 break;
735 case BPF_CORE_TYPE_EXISTS:
736 *val = 1;
737 break;
738 case BPF_CORE_TYPE_SIZE:
739 sz = btf__resolve_size(spec->btf, spec->root_type_id);
740 if (sz < 0)
741 return -EINVAL;
742 *val = sz;
743 break;
744 case BPF_CORE_TYPE_ID_LOCAL:
745
746 default:
747 return -EOPNOTSUPP;
748 }
749
750 return 0;
751}
752
753static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
754 const struct bpf_core_spec *spec,
755 __u32 *val)
756{
757 const struct btf_type *t;
758 const struct btf_enum *e;
759
760 switch (relo->kind) {
761 case BPF_CORE_ENUMVAL_EXISTS:
762 *val = spec ? 1 : 0;
763 break;
764 case BPF_CORE_ENUMVAL_VALUE:
765 if (!spec)
766 return -EUCLEAN;
767 t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
768 e = btf_enum(t) + spec->spec[0].idx;
769 *val = e->val;
770 break;
771 default:
772 return -EOPNOTSUPP;
773 }
774
775 return 0;
776}
777
778
779
780
781
782
783
784static int bpf_core_calc_relo(const char *prog_name,
785 const struct bpf_core_relo *relo,
786 int relo_idx,
787 const struct bpf_core_spec *local_spec,
788 const struct bpf_core_spec *targ_spec,
789 struct bpf_core_relo_res *res)
790{
791 int err = -EOPNOTSUPP;
792
793 res->orig_val = 0;
794 res->new_val = 0;
795 res->poison = false;
796 res->validate = true;
797 res->fail_memsz_adjust = false;
798 res->orig_sz = res->new_sz = 0;
799 res->orig_type_id = res->new_type_id = 0;
800
801 if (core_relo_is_field_based(relo->kind)) {
802 err = bpf_core_calc_field_relo(prog_name, relo, local_spec,
803 &res->orig_val, &res->orig_sz,
804 &res->orig_type_id, &res->validate);
805 err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec,
806 &res->new_val, &res->new_sz,
807 &res->new_type_id, NULL);
808 if (err)
809 goto done;
810
811
812
813
814 res->fail_memsz_adjust = false;
815 if (res->orig_sz != res->new_sz) {
816 const struct btf_type *orig_t, *new_t;
817
818 orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
819 new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
836 goto done;
837 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
838 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
839 btf_int_encoding(new_t) != BTF_INT_SIGNED)
840 goto done;
841
842
843
844
845 res->fail_memsz_adjust = true;
846 }
847 } else if (core_relo_is_type_based(relo->kind)) {
848 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
849 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
850 } else if (core_relo_is_enumval_based(relo->kind)) {
851 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
852 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
853 }
854
855done:
856 if (err == -EUCLEAN) {
857
858 res->poison = true;
859 err = 0;
860 } else if (err == -EOPNOTSUPP) {
861
862 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
863 prog_name, relo_idx, core_relo_kind_str(relo->kind),
864 relo->kind, relo->insn_off / 8);
865 }
866
867 return err;
868}
869
870
871
872
873
874static void bpf_core_poison_insn(const char *prog_name, int relo_idx,
875 int insn_idx, struct bpf_insn *insn)
876{
877 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
878 prog_name, relo_idx, insn_idx);
879 insn->code = BPF_JMP | BPF_CALL;
880 insn->dst_reg = 0;
881 insn->src_reg = 0;
882 insn->off = 0;
883
884
885
886
887 insn->imm = 195896080;
888}
889
890static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
891{
892 switch (BPF_SIZE(insn->code)) {
893 case BPF_DW: return 8;
894 case BPF_W: return 4;
895 case BPF_H: return 2;
896 case BPF_B: return 1;
897 default: return -1;
898 }
899}
900
901static int insn_bytes_to_bpf_size(__u32 sz)
902{
903 switch (sz) {
904 case 8: return BPF_DW;
905 case 4: return BPF_W;
906 case 2: return BPF_H;
907 case 1: return BPF_B;
908 default: return -1;
909 }
910}
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
930 int insn_idx, const struct bpf_core_relo *relo,
931 int relo_idx, const struct bpf_core_relo_res *res)
932{
933 __u32 orig_val, new_val;
934 __u8 class;
935
936 class = BPF_CLASS(insn->code);
937
938 if (res->poison) {
939poison:
940
941
942
943 if (is_ldimm64_insn(insn))
944 bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1);
945 bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn);
946 return 0;
947 }
948
949 orig_val = res->orig_val;
950 new_val = res->new_val;
951
952 switch (class) {
953 case BPF_ALU:
954 case BPF_ALU64:
955 if (BPF_SRC(insn->code) != BPF_K)
956 return -EINVAL;
957 if (res->validate && insn->imm != orig_val) {
958 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
959 prog_name, relo_idx,
960 insn_idx, insn->imm, orig_val, new_val);
961 return -EINVAL;
962 }
963 orig_val = insn->imm;
964 insn->imm = new_val;
965 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
966 prog_name, relo_idx, insn_idx,
967 orig_val, new_val);
968 break;
969 case BPF_LDX:
970 case BPF_ST:
971 case BPF_STX:
972 if (res->validate && insn->off != orig_val) {
973 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
974 prog_name, relo_idx, insn_idx, insn->off, orig_val, new_val);
975 return -EINVAL;
976 }
977 if (new_val > SHRT_MAX) {
978 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
979 prog_name, relo_idx, insn_idx, new_val);
980 return -ERANGE;
981 }
982 if (res->fail_memsz_adjust) {
983 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
984 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
985 prog_name, relo_idx, insn_idx);
986 goto poison;
987 }
988
989 orig_val = insn->off;
990 insn->off = new_val;
991 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
992 prog_name, relo_idx, insn_idx, orig_val, new_val);
993
994 if (res->new_sz != res->orig_sz) {
995 int insn_bytes_sz, insn_bpf_sz;
996
997 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
998 if (insn_bytes_sz != res->orig_sz) {
999 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
1000 prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
1001 return -EINVAL;
1002 }
1003
1004 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
1005 if (insn_bpf_sz < 0) {
1006 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
1007 prog_name, relo_idx, insn_idx, res->new_sz);
1008 return -EINVAL;
1009 }
1010
1011 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
1012 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
1013 prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
1014 }
1015 break;
1016 case BPF_LD: {
1017 __u64 imm;
1018
1019 if (!is_ldimm64_insn(insn) ||
1020 insn[0].src_reg != 0 || insn[0].off != 0 ||
1021 insn[1].code != 0 || insn[1].dst_reg != 0 ||
1022 insn[1].src_reg != 0 || insn[1].off != 0) {
1023 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
1024 prog_name, relo_idx, insn_idx);
1025 return -EINVAL;
1026 }
1027
1028 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
1029 if (res->validate && imm != orig_val) {
1030 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
1031 prog_name, relo_idx,
1032 insn_idx, (unsigned long long)imm,
1033 orig_val, new_val);
1034 return -EINVAL;
1035 }
1036
1037 insn[0].imm = new_val;
1038 insn[1].imm = 0;
1039 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
1040 prog_name, relo_idx, insn_idx,
1041 (unsigned long long)imm, new_val);
1042 break;
1043 }
1044 default:
1045 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
1046 prog_name, relo_idx, insn_idx, insn->code,
1047 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
1048 return -EINVAL;
1049 }
1050
1051 return 0;
1052}
1053
1054
1055
1056
1057
1058static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec)
1059{
1060 const struct btf_type *t;
1061 const struct btf_enum *e;
1062 const char *s;
1063 __u32 type_id;
1064 int i;
1065
1066 type_id = spec->root_type_id;
1067 t = btf_type_by_id(spec->btf, type_id);
1068 s = btf__name_by_offset(spec->btf, t->name_off);
1069
1070 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
1071
1072 if (core_relo_is_type_based(spec->relo_kind))
1073 return;
1074
1075 if (core_relo_is_enumval_based(spec->relo_kind)) {
1076 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
1077 e = btf_enum(t) + spec->raw_spec[0];
1078 s = btf__name_by_offset(spec->btf, e->name_off);
1079
1080 libbpf_print(level, "::%s = %u", s, e->val);
1081 return;
1082 }
1083
1084 if (core_relo_is_field_based(spec->relo_kind)) {
1085 for (i = 0; i < spec->len; i++) {
1086 if (spec->spec[i].name)
1087 libbpf_print(level, ".%s", spec->spec[i].name);
1088 else if (i > 0 || spec->spec[i].idx > 0)
1089 libbpf_print(level, "[%u]", spec->spec[i].idx);
1090 }
1091
1092 libbpf_print(level, " (");
1093 for (i = 0; i < spec->raw_len; i++)
1094 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
1095
1096 if (spec->bit_offset % 8)
1097 libbpf_print(level, " @ offset %u.%u)",
1098 spec->bit_offset / 8, spec->bit_offset % 8);
1099 else
1100 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
1101 return;
1102 }
1103}
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155int bpf_core_calc_relo_insn(const char *prog_name,
1156 const struct bpf_core_relo *relo,
1157 int relo_idx,
1158 const struct btf *local_btf,
1159 struct bpf_core_cand_list *cands,
1160 struct bpf_core_spec *specs_scratch,
1161 struct bpf_core_relo_res *targ_res)
1162{
1163 struct bpf_core_spec *local_spec = &specs_scratch[0];
1164 struct bpf_core_spec *cand_spec = &specs_scratch[1];
1165 struct bpf_core_spec *targ_spec = &specs_scratch[2];
1166 struct bpf_core_relo_res cand_res;
1167 const struct btf_type *local_type;
1168 const char *local_name;
1169 __u32 local_id;
1170 const char *spec_str;
1171 int i, j, err;
1172
1173 local_id = relo->type_id;
1174 local_type = btf_type_by_id(local_btf, local_id);
1175 local_name = btf__name_by_offset(local_btf, local_type->name_off);
1176 if (!local_name)
1177 return -EINVAL;
1178
1179 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
1180 if (str_is_empty(spec_str))
1181 return -EINVAL;
1182
1183 err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str,
1184 relo->kind, local_spec);
1185 if (err) {
1186 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
1187 prog_name, relo_idx, local_id, btf_kind_str(local_type),
1188 str_is_empty(local_name) ? "<anon>" : local_name,
1189 spec_str, err);
1190 return -EINVAL;
1191 }
1192
1193 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
1194 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1195 bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec);
1196 libbpf_print(LIBBPF_DEBUG, "\n");
1197
1198
1199 if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
1200
1201 memset(targ_res, 0, sizeof(*targ_res));
1202 targ_res->validate = false;
1203 targ_res->poison = false;
1204 targ_res->orig_val = local_spec->root_type_id;
1205 targ_res->new_val = local_spec->root_type_id;
1206 return 0;
1207 }
1208
1209
1210 if (str_is_empty(spec_str)) {
1211 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
1212 prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
1213 return -EOPNOTSUPP;
1214 }
1215
1216 for (i = 0, j = 0; i < cands->len; i++) {
1217 err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
1218 cands->cands[i].id, cand_spec);
1219 if (err < 0) {
1220 pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
1221 prog_name, relo_idx, i);
1222 bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec);
1223 libbpf_print(LIBBPF_WARN, ": %d\n", err);
1224 return err;
1225 }
1226
1227 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
1228 relo_idx, err == 0 ? "non-matching" : "matching", i);
1229 bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec);
1230 libbpf_print(LIBBPF_DEBUG, "\n");
1231
1232 if (err == 0)
1233 continue;
1234
1235 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
1236 if (err)
1237 return err;
1238
1239 if (j == 0) {
1240 *targ_res = cand_res;
1241 *targ_spec = *cand_spec;
1242 } else if (cand_spec->bit_offset != targ_spec->bit_offset) {
1243
1244
1245
1246 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
1247 prog_name, relo_idx, cand_spec->bit_offset,
1248 targ_spec->bit_offset);
1249 return -EINVAL;
1250 } else if (cand_res.poison != targ_res->poison ||
1251 cand_res.new_val != targ_res->new_val) {
1252
1253
1254
1255
1256 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
1257 prog_name, relo_idx,
1258 cand_res.poison ? "failure" : "success", cand_res.new_val,
1259 targ_res->poison ? "failure" : "success", targ_res->new_val);
1260 return -EINVAL;
1261 }
1262
1263 cands->cands[j++] = cands->cands[i];
1264 }
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 if (j > 0)
1275 cands->len = j;
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288 if (j == 0) {
1289 pr_debug("prog '%s': relo #%d: no matching targets found\n",
1290 prog_name, relo_idx);
1291
1292
1293 err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res);
1294 if (err)
1295 return err;
1296 }
1297
1298 return 0;
1299}
1300