1
2
3
4#include <uapi/linux/btf.h>
5#include <uapi/linux/bpf.h>
6#include <uapi/linux/bpf_perf_event.h>
7#include <uapi/linux/types.h>
8#include <linux/seq_file.h>
9#include <linux/compiler.h>
10#include <linux/ctype.h>
11#include <linux/errno.h>
12#include <linux/slab.h>
13#include <linux/anon_inodes.h>
14#include <linux/file.h>
15#include <linux/uaccess.h>
16#include <linux/kernel.h>
17#include <linux/idr.h>
18#include <linux/sort.h>
19#include <linux/bpf_verifier.h>
20#include <linux/btf.h>
21#include <linux/skmsg.h>
22#include <linux/perf_event.h>
23#include <net/sock.h>
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
166#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
167#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
168#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
169#define BITS_ROUNDUP_BYTES(bits) \
170 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
171
172#define BTF_INFO_MASK 0x8f00ffff
173#define BTF_INT_MASK 0x0fffffff
174#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
175#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
176
177
178
179
180
181#define BTF_MAX_SIZE (16 * 1024 * 1024)
182
183#define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
186 i++, member++)
187
188#define for_each_vsi(i, struct_type, member) \
189 for (i = 0, member = btf_type_var_secinfo(struct_type); \
190 i < btf_type_vlen(struct_type); \
191 i++, member++)
192
193#define for_each_vsi_from(i, from, struct_type, member) \
194 for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
195 i < btf_type_vlen(struct_type); \
196 i++, member++)
197
198DEFINE_IDR(btf_idr);
199DEFINE_SPINLOCK(btf_idr_lock);
200
201struct btf {
202 void *data;
203 struct btf_type **types;
204 u32 *resolved_ids;
205 u32 *resolved_sizes;
206 const char *strings;
207 void *nohdr_data;
208 struct btf_header hdr;
209 u32 nr_types;
210 u32 types_size;
211 u32 data_size;
212 refcount_t refcnt;
213 u32 id;
214 struct rcu_head rcu;
215};
216
217enum verifier_phase {
218 CHECK_META,
219 CHECK_TYPE,
220};
221
222struct resolve_vertex {
223 const struct btf_type *t;
224 u32 type_id;
225 u16 next_member;
226};
227
228enum visit_state {
229 NOT_VISITED,
230 VISITED,
231 RESOLVED,
232};
233
234enum resolve_mode {
235 RESOLVE_TBD,
236 RESOLVE_PTR,
237 RESOLVE_STRUCT_OR_ARRAY,
238
239
240};
241
242#define MAX_RESOLVE_DEPTH 32
243
244struct btf_sec_info {
245 u32 off;
246 u32 len;
247};
248
249struct btf_verifier_env {
250 struct btf *btf;
251 u8 *visit_states;
252 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
253 struct bpf_verifier_log log;
254 u32 log_type_id;
255 u32 top_stack;
256 enum verifier_phase phase;
257 enum resolve_mode resolve_mode;
258};
259
260static const char * const btf_kind_str[NR_BTF_KINDS] = {
261 [BTF_KIND_UNKN] = "UNKNOWN",
262 [BTF_KIND_INT] = "INT",
263 [BTF_KIND_PTR] = "PTR",
264 [BTF_KIND_ARRAY] = "ARRAY",
265 [BTF_KIND_STRUCT] = "STRUCT",
266 [BTF_KIND_UNION] = "UNION",
267 [BTF_KIND_ENUM] = "ENUM",
268 [BTF_KIND_FWD] = "FWD",
269 [BTF_KIND_TYPEDEF] = "TYPEDEF",
270 [BTF_KIND_VOLATILE] = "VOLATILE",
271 [BTF_KIND_CONST] = "CONST",
272 [BTF_KIND_RESTRICT] = "RESTRICT",
273 [BTF_KIND_FUNC] = "FUNC",
274 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
275 [BTF_KIND_VAR] = "VAR",
276 [BTF_KIND_DATASEC] = "DATASEC",
277};
278
279static const char *btf_type_str(const struct btf_type *t)
280{
281 return btf_kind_str[BTF_INFO_KIND(t->info)];
282}
283
284struct btf_kind_operations {
285 s32 (*check_meta)(struct btf_verifier_env *env,
286 const struct btf_type *t,
287 u32 meta_left);
288 int (*resolve)(struct btf_verifier_env *env,
289 const struct resolve_vertex *v);
290 int (*check_member)(struct btf_verifier_env *env,
291 const struct btf_type *struct_type,
292 const struct btf_member *member,
293 const struct btf_type *member_type);
294 int (*check_kflag_member)(struct btf_verifier_env *env,
295 const struct btf_type *struct_type,
296 const struct btf_member *member,
297 const struct btf_type *member_type);
298 void (*log_details)(struct btf_verifier_env *env,
299 const struct btf_type *t);
300 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
301 u32 type_id, void *data, u8 bits_offsets,
302 struct seq_file *m);
303};
304
305static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
306static struct btf_type btf_void;
307
308static int btf_resolve(struct btf_verifier_env *env,
309 const struct btf_type *t, u32 type_id);
310
311static bool btf_type_is_modifier(const struct btf_type *t)
312{
313
314
315
316
317
318
319
320
321
322
323 switch (BTF_INFO_KIND(t->info)) {
324 case BTF_KIND_TYPEDEF:
325 case BTF_KIND_VOLATILE:
326 case BTF_KIND_CONST:
327 case BTF_KIND_RESTRICT:
328 return true;
329 }
330
331 return false;
332}
333
334bool btf_type_is_void(const struct btf_type *t)
335{
336 return t == &btf_void;
337}
338
339static bool btf_type_is_fwd(const struct btf_type *t)
340{
341 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
342}
343
344static bool btf_type_nosize(const struct btf_type *t)
345{
346 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
347 btf_type_is_func(t) || btf_type_is_func_proto(t);
348}
349
350static bool btf_type_nosize_or_null(const struct btf_type *t)
351{
352 return !t || btf_type_nosize(t);
353}
354
355
356
357
358static bool btf_type_is_struct(const struct btf_type *t)
359{
360 u8 kind = BTF_INFO_KIND(t->info);
361
362 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
363}
364
365static bool __btf_type_is_struct(const struct btf_type *t)
366{
367 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
368}
369
370static bool btf_type_is_array(const struct btf_type *t)
371{
372 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
373}
374
375static bool btf_type_is_var(const struct btf_type *t)
376{
377 return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
378}
379
380static bool btf_type_is_datasec(const struct btf_type *t)
381{
382 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
383}
384
385s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
386{
387 const struct btf_type *t;
388 const char *tname;
389 u32 i;
390
391 for (i = 1; i <= btf->nr_types; i++) {
392 t = btf->types[i];
393 if (BTF_INFO_KIND(t->info) != kind)
394 continue;
395
396 tname = btf_name_by_offset(btf, t->name_off);
397 if (!strcmp(tname, name))
398 return i;
399 }
400
401 return -ENOENT;
402}
403
404const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
405 u32 id, u32 *res_id)
406{
407 const struct btf_type *t = btf_type_by_id(btf, id);
408
409 while (btf_type_is_modifier(t)) {
410 id = t->type;
411 t = btf_type_by_id(btf, t->type);
412 }
413
414 if (res_id)
415 *res_id = id;
416
417 return t;
418}
419
420const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
421 u32 id, u32 *res_id)
422{
423 const struct btf_type *t;
424
425 t = btf_type_skip_modifiers(btf, id, NULL);
426 if (!btf_type_is_ptr(t))
427 return NULL;
428
429 return btf_type_skip_modifiers(btf, t->type, res_id);
430}
431
432const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
433 u32 id, u32 *res_id)
434{
435 const struct btf_type *ptype;
436
437 ptype = btf_type_resolve_ptr(btf, id, res_id);
438 if (ptype && btf_type_is_func_proto(ptype))
439 return ptype;
440
441 return NULL;
442}
443
444
445
446
447static bool btf_type_is_resolve_source_only(const struct btf_type *t)
448{
449 return btf_type_is_var(t) ||
450 btf_type_is_datasec(t);
451}
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469static bool btf_type_needs_resolve(const struct btf_type *t)
470{
471 return btf_type_is_modifier(t) ||
472 btf_type_is_ptr(t) ||
473 btf_type_is_struct(t) ||
474 btf_type_is_array(t) ||
475 btf_type_is_var(t) ||
476 btf_type_is_datasec(t);
477}
478
479
480static bool btf_type_has_size(const struct btf_type *t)
481{
482 switch (BTF_INFO_KIND(t->info)) {
483 case BTF_KIND_INT:
484 case BTF_KIND_STRUCT:
485 case BTF_KIND_UNION:
486 case BTF_KIND_ENUM:
487 case BTF_KIND_DATASEC:
488 return true;
489 }
490
491 return false;
492}
493
494static const char *btf_int_encoding_str(u8 encoding)
495{
496 if (encoding == 0)
497 return "(none)";
498 else if (encoding == BTF_INT_SIGNED)
499 return "SIGNED";
500 else if (encoding == BTF_INT_CHAR)
501 return "CHAR";
502 else if (encoding == BTF_INT_BOOL)
503 return "BOOL";
504 else
505 return "UNKN";
506}
507
508static u32 btf_type_int(const struct btf_type *t)
509{
510 return *(u32 *)(t + 1);
511}
512
513static const struct btf_array *btf_type_array(const struct btf_type *t)
514{
515 return (const struct btf_array *)(t + 1);
516}
517
518static const struct btf_enum *btf_type_enum(const struct btf_type *t)
519{
520 return (const struct btf_enum *)(t + 1);
521}
522
523static const struct btf_var *btf_type_var(const struct btf_type *t)
524{
525 return (const struct btf_var *)(t + 1);
526}
527
528static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
529{
530 return (const struct btf_var_secinfo *)(t + 1);
531}
532
533static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
534{
535 return kind_ops[BTF_INFO_KIND(t->info)];
536}
537
538static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
539{
540 return BTF_STR_OFFSET_VALID(offset) &&
541 offset < btf->hdr.str_len;
542}
543
544static bool __btf_name_char_ok(char c, bool first, bool dot_ok)
545{
546 if ((first ? !isalpha(c) :
547 !isalnum(c)) &&
548 c != '_' &&
549 ((c == '.' && !dot_ok) ||
550 c != '.'))
551 return false;
552 return true;
553}
554
555static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok)
556{
557
558 const char *src = &btf->strings[offset];
559 const char *src_limit;
560
561 if (!__btf_name_char_ok(*src, true, dot_ok))
562 return false;
563
564
565 src_limit = src + KSYM_NAME_LEN;
566 src++;
567 while (*src && src < src_limit) {
568 if (!__btf_name_char_ok(*src, false, dot_ok))
569 return false;
570 src++;
571 }
572
573 return !*src;
574}
575
576
577
578
579static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
580{
581 return __btf_name_valid(btf, offset, false);
582}
583
584static bool btf_name_valid_section(const struct btf *btf, u32 offset)
585{
586 return __btf_name_valid(btf, offset, true);
587}
588
589static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
590{
591 if (!offset)
592 return "(anon)";
593 else if (offset < btf->hdr.str_len)
594 return &btf->strings[offset];
595 else
596 return "(invalid-name-offset)";
597}
598
599const char *btf_name_by_offset(const struct btf *btf, u32 offset)
600{
601 if (offset < btf->hdr.str_len)
602 return &btf->strings[offset];
603
604 return NULL;
605}
606
607const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
608{
609 if (type_id > btf->nr_types)
610 return NULL;
611
612 return btf->types[type_id];
613}
614
615
616
617
618
619static bool btf_type_int_is_regular(const struct btf_type *t)
620{
621 u8 nr_bits, nr_bytes;
622 u32 int_data;
623
624 int_data = btf_type_int(t);
625 nr_bits = BTF_INT_BITS(int_data);
626 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
627 if (BITS_PER_BYTE_MASKED(nr_bits) ||
628 BTF_INT_OFFSET(int_data) ||
629 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
630 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
631 nr_bytes != (2 * sizeof(u64)))) {
632 return false;
633 }
634
635 return true;
636}
637
638
639
640
641
642bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
643 const struct btf_member *m,
644 u32 expected_offset, u32 expected_size)
645{
646 const struct btf_type *t;
647 u32 id, int_data;
648 u8 nr_bits;
649
650 id = m->type;
651 t = btf_type_id_size(btf, &id, NULL);
652 if (!t || !btf_type_is_int(t))
653 return false;
654
655 int_data = btf_type_int(t);
656 nr_bits = BTF_INT_BITS(int_data);
657 if (btf_type_kflag(s)) {
658 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
659 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
660
661
662
663
664 return !bitfield_size &&
665 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
666 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
667 }
668
669 if (BTF_INT_OFFSET(int_data) ||
670 BITS_PER_BYTE_MASKED(m->offset) ||
671 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
672 BITS_PER_BYTE_MASKED(nr_bits) ||
673 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
674 return false;
675
676 return true;
677}
678
679__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
680 const char *fmt, ...)
681{
682 va_list args;
683
684 va_start(args, fmt);
685 bpf_verifier_vlog(log, fmt, args);
686 va_end(args);
687}
688
689__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
690 const char *fmt, ...)
691{
692 struct bpf_verifier_log *log = &env->log;
693 va_list args;
694
695 if (!bpf_verifier_log_needed(log))
696 return;
697
698 va_start(args, fmt);
699 bpf_verifier_vlog(log, fmt, args);
700 va_end(args);
701}
702
703__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
704 const struct btf_type *t,
705 bool log_details,
706 const char *fmt, ...)
707{
708 struct bpf_verifier_log *log = &env->log;
709 u8 kind = BTF_INFO_KIND(t->info);
710 struct btf *btf = env->btf;
711 va_list args;
712
713 if (!bpf_verifier_log_needed(log))
714 return;
715
716
717
718
719
720 if (log->level == BPF_LOG_KERNEL && !fmt)
721 return;
722
723 __btf_verifier_log(log, "[%u] %s %s%s",
724 env->log_type_id,
725 btf_kind_str[kind],
726 __btf_name_by_offset(btf, t->name_off),
727 log_details ? " " : "");
728
729 if (log_details)
730 btf_type_ops(t)->log_details(env, t);
731
732 if (fmt && *fmt) {
733 __btf_verifier_log(log, " ");
734 va_start(args, fmt);
735 bpf_verifier_vlog(log, fmt, args);
736 va_end(args);
737 }
738
739 __btf_verifier_log(log, "\n");
740}
741
742#define btf_verifier_log_type(env, t, ...) \
743 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
744#define btf_verifier_log_basic(env, t, ...) \
745 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
746
747__printf(4, 5)
748static void btf_verifier_log_member(struct btf_verifier_env *env,
749 const struct btf_type *struct_type,
750 const struct btf_member *member,
751 const char *fmt, ...)
752{
753 struct bpf_verifier_log *log = &env->log;
754 struct btf *btf = env->btf;
755 va_list args;
756
757 if (!bpf_verifier_log_needed(log))
758 return;
759
760 if (log->level == BPF_LOG_KERNEL && !fmt)
761 return;
762
763
764
765
766
767
768 if (env->phase != CHECK_META)
769 btf_verifier_log_type(env, struct_type, NULL);
770
771 if (btf_type_kflag(struct_type))
772 __btf_verifier_log(log,
773 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
774 __btf_name_by_offset(btf, member->name_off),
775 member->type,
776 BTF_MEMBER_BITFIELD_SIZE(member->offset),
777 BTF_MEMBER_BIT_OFFSET(member->offset));
778 else
779 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
780 __btf_name_by_offset(btf, member->name_off),
781 member->type, member->offset);
782
783 if (fmt && *fmt) {
784 __btf_verifier_log(log, " ");
785 va_start(args, fmt);
786 bpf_verifier_vlog(log, fmt, args);
787 va_end(args);
788 }
789
790 __btf_verifier_log(log, "\n");
791}
792
793__printf(4, 5)
794static void btf_verifier_log_vsi(struct btf_verifier_env *env,
795 const struct btf_type *datasec_type,
796 const struct btf_var_secinfo *vsi,
797 const char *fmt, ...)
798{
799 struct bpf_verifier_log *log = &env->log;
800 va_list args;
801
802 if (!bpf_verifier_log_needed(log))
803 return;
804 if (log->level == BPF_LOG_KERNEL && !fmt)
805 return;
806 if (env->phase != CHECK_META)
807 btf_verifier_log_type(env, datasec_type, NULL);
808
809 __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u",
810 vsi->type, vsi->offset, vsi->size);
811 if (fmt && *fmt) {
812 __btf_verifier_log(log, " ");
813 va_start(args, fmt);
814 bpf_verifier_vlog(log, fmt, args);
815 va_end(args);
816 }
817
818 __btf_verifier_log(log, "\n");
819}
820
821static void btf_verifier_log_hdr(struct btf_verifier_env *env,
822 u32 btf_data_size)
823{
824 struct bpf_verifier_log *log = &env->log;
825 const struct btf *btf = env->btf;
826 const struct btf_header *hdr;
827
828 if (!bpf_verifier_log_needed(log))
829 return;
830
831 if (log->level == BPF_LOG_KERNEL)
832 return;
833 hdr = &btf->hdr;
834 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
835 __btf_verifier_log(log, "version: %u\n", hdr->version);
836 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
837 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
838 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
839 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
840 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
841 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
842 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
843}
844
845static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
846{
847 struct btf *btf = env->btf;
848
849
850
851
852
853 if (btf->types_size - btf->nr_types < 2) {
854
855
856 struct btf_type **new_types;
857 u32 expand_by, new_size;
858
859 if (btf->types_size == BTF_MAX_TYPE) {
860 btf_verifier_log(env, "Exceeded max num of types");
861 return -E2BIG;
862 }
863
864 expand_by = max_t(u32, btf->types_size >> 2, 16);
865 new_size = min_t(u32, BTF_MAX_TYPE,
866 btf->types_size + expand_by);
867
868 new_types = kvcalloc(new_size, sizeof(*new_types),
869 GFP_KERNEL | __GFP_NOWARN);
870 if (!new_types)
871 return -ENOMEM;
872
873 if (btf->nr_types == 0)
874 new_types[0] = &btf_void;
875 else
876 memcpy(new_types, btf->types,
877 sizeof(*btf->types) * (btf->nr_types + 1));
878
879 kvfree(btf->types);
880 btf->types = new_types;
881 btf->types_size = new_size;
882 }
883
884 btf->types[++(btf->nr_types)] = t;
885
886 return 0;
887}
888
889static int btf_alloc_id(struct btf *btf)
890{
891 int id;
892
893 idr_preload(GFP_KERNEL);
894 spin_lock_bh(&btf_idr_lock);
895 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
896 if (id > 0)
897 btf->id = id;
898 spin_unlock_bh(&btf_idr_lock);
899 idr_preload_end();
900
901 if (WARN_ON_ONCE(!id))
902 return -ENOSPC;
903
904 return id > 0 ? 0 : id;
905}
906
907static void btf_free_id(struct btf *btf)
908{
909 unsigned long flags;
910
911
912
913
914
915
916
917
918
919
920 spin_lock_irqsave(&btf_idr_lock, flags);
921 idr_remove(&btf_idr, btf->id);
922 spin_unlock_irqrestore(&btf_idr_lock, flags);
923}
924
925static void btf_free(struct btf *btf)
926{
927 kvfree(btf->types);
928 kvfree(btf->resolved_sizes);
929 kvfree(btf->resolved_ids);
930 kvfree(btf->data);
931 kfree(btf);
932}
933
934static void btf_free_rcu(struct rcu_head *rcu)
935{
936 struct btf *btf = container_of(rcu, struct btf, rcu);
937
938 btf_free(btf);
939}
940
941void btf_put(struct btf *btf)
942{
943 if (btf && refcount_dec_and_test(&btf->refcnt)) {
944 btf_free_id(btf);
945 call_rcu(&btf->rcu, btf_free_rcu);
946 }
947}
948
949static int env_resolve_init(struct btf_verifier_env *env)
950{
951 struct btf *btf = env->btf;
952 u32 nr_types = btf->nr_types;
953 u32 *resolved_sizes = NULL;
954 u32 *resolved_ids = NULL;
955 u8 *visit_states = NULL;
956
957
958 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
959 GFP_KERNEL | __GFP_NOWARN);
960 if (!resolved_sizes)
961 goto nomem;
962
963 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
964 GFP_KERNEL | __GFP_NOWARN);
965 if (!resolved_ids)
966 goto nomem;
967
968 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
969 GFP_KERNEL | __GFP_NOWARN);
970 if (!visit_states)
971 goto nomem;
972
973 btf->resolved_sizes = resolved_sizes;
974 btf->resolved_ids = resolved_ids;
975 env->visit_states = visit_states;
976
977 return 0;
978
979nomem:
980 kvfree(resolved_sizes);
981 kvfree(resolved_ids);
982 kvfree(visit_states);
983 return -ENOMEM;
984}
985
986static void btf_verifier_env_free(struct btf_verifier_env *env)
987{
988 kvfree(env->visit_states);
989 kfree(env);
990}
991
992static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
993 const struct btf_type *next_type)
994{
995 switch (env->resolve_mode) {
996 case RESOLVE_TBD:
997
998 return !btf_type_needs_resolve(next_type);
999 case RESOLVE_PTR:
1000
1001
1002
1003 return !btf_type_is_modifier(next_type) &&
1004 !btf_type_is_ptr(next_type);
1005 case RESOLVE_STRUCT_OR_ARRAY:
1006
1007
1008
1009 return !btf_type_is_modifier(next_type) &&
1010 !btf_type_is_array(next_type) &&
1011 !btf_type_is_struct(next_type);
1012 default:
1013 BUG();
1014 }
1015}
1016
1017static bool env_type_is_resolved(const struct btf_verifier_env *env,
1018 u32 type_id)
1019{
1020 return env->visit_states[type_id] == RESOLVED;
1021}
1022
1023static int env_stack_push(struct btf_verifier_env *env,
1024 const struct btf_type *t, u32 type_id)
1025{
1026 struct resolve_vertex *v;
1027
1028 if (env->top_stack == MAX_RESOLVE_DEPTH)
1029 return -E2BIG;
1030
1031 if (env->visit_states[type_id] != NOT_VISITED)
1032 return -EEXIST;
1033
1034 env->visit_states[type_id] = VISITED;
1035
1036 v = &env->stack[env->top_stack++];
1037 v->t = t;
1038 v->type_id = type_id;
1039 v->next_member = 0;
1040
1041 if (env->resolve_mode == RESOLVE_TBD) {
1042 if (btf_type_is_ptr(t))
1043 env->resolve_mode = RESOLVE_PTR;
1044 else if (btf_type_is_struct(t) || btf_type_is_array(t))
1045 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
1046 }
1047
1048 return 0;
1049}
1050
1051static void env_stack_set_next_member(struct btf_verifier_env *env,
1052 u16 next_member)
1053{
1054 env->stack[env->top_stack - 1].next_member = next_member;
1055}
1056
1057static void env_stack_pop_resolved(struct btf_verifier_env *env,
1058 u32 resolved_type_id,
1059 u32 resolved_size)
1060{
1061 u32 type_id = env->stack[--(env->top_stack)].type_id;
1062 struct btf *btf = env->btf;
1063
1064 btf->resolved_sizes[type_id] = resolved_size;
1065 btf->resolved_ids[type_id] = resolved_type_id;
1066 env->visit_states[type_id] = RESOLVED;
1067}
1068
1069static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
1070{
1071 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090const struct btf_type *
1091btf_resolve_size(const struct btf *btf, const struct btf_type *type,
1092 u32 *type_size, const struct btf_type **elem_type,
1093 u32 *total_nelems)
1094{
1095 const struct btf_type *array_type = NULL;
1096 const struct btf_array *array;
1097 u32 i, size, nelems = 1;
1098
1099 for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
1100 switch (BTF_INFO_KIND(type->info)) {
1101
1102 case BTF_KIND_INT:
1103 case BTF_KIND_STRUCT:
1104 case BTF_KIND_UNION:
1105 case BTF_KIND_ENUM:
1106 size = type->size;
1107 goto resolved;
1108
1109 case BTF_KIND_PTR:
1110 size = sizeof(void *);
1111 goto resolved;
1112
1113
1114 case BTF_KIND_TYPEDEF:
1115 case BTF_KIND_VOLATILE:
1116 case BTF_KIND_CONST:
1117 case BTF_KIND_RESTRICT:
1118 type = btf_type_by_id(btf, type->type);
1119 break;
1120
1121 case BTF_KIND_ARRAY:
1122 if (!array_type)
1123 array_type = type;
1124 array = btf_type_array(type);
1125 if (nelems && array->nelems > U32_MAX / nelems)
1126 return ERR_PTR(-EINVAL);
1127 nelems *= array->nelems;
1128 type = btf_type_by_id(btf, array->type);
1129 break;
1130
1131
1132 default:
1133 return ERR_PTR(-EINVAL);
1134 }
1135 }
1136
1137 return ERR_PTR(-EINVAL);
1138
1139resolved:
1140 if (nelems && size > U32_MAX / nelems)
1141 return ERR_PTR(-EINVAL);
1142
1143 *type_size = nelems * size;
1144 if (total_nelems)
1145 *total_nelems = nelems;
1146 if (elem_type)
1147 *elem_type = type;
1148
1149 return array_type ? : type;
1150}
1151
1152
1153static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
1154 u32 *type_id)
1155{
1156 *type_id = btf->resolved_ids[*type_id];
1157 return btf_type_by_id(btf, *type_id);
1158}
1159
1160const struct btf_type *btf_type_id_size(const struct btf *btf,
1161 u32 *type_id, u32 *ret_size)
1162{
1163 const struct btf_type *size_type;
1164 u32 size_type_id = *type_id;
1165 u32 size = 0;
1166
1167 size_type = btf_type_by_id(btf, size_type_id);
1168 if (btf_type_nosize_or_null(size_type))
1169 return NULL;
1170
1171 if (btf_type_has_size(size_type)) {
1172 size = size_type->size;
1173 } else if (btf_type_is_array(size_type)) {
1174 size = btf->resolved_sizes[size_type_id];
1175 } else if (btf_type_is_ptr(size_type)) {
1176 size = sizeof(void *);
1177 } else {
1178 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) &&
1179 !btf_type_is_var(size_type)))
1180 return NULL;
1181
1182 size_type_id = btf->resolved_ids[size_type_id];
1183 size_type = btf_type_by_id(btf, size_type_id);
1184 if (btf_type_nosize_or_null(size_type))
1185 return NULL;
1186 else if (btf_type_has_size(size_type))
1187 size = size_type->size;
1188 else if (btf_type_is_array(size_type))
1189 size = btf->resolved_sizes[size_type_id];
1190 else if (btf_type_is_ptr(size_type))
1191 size = sizeof(void *);
1192 else
1193 return NULL;
1194 }
1195
1196 *type_id = size_type_id;
1197 if (ret_size)
1198 *ret_size = size;
1199
1200 return size_type;
1201}
1202
1203static int btf_df_check_member(struct btf_verifier_env *env,
1204 const struct btf_type *struct_type,
1205 const struct btf_member *member,
1206 const struct btf_type *member_type)
1207{
1208 btf_verifier_log_basic(env, struct_type,
1209 "Unsupported check_member");
1210 return -EINVAL;
1211}
1212
1213static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1214 const struct btf_type *struct_type,
1215 const struct btf_member *member,
1216 const struct btf_type *member_type)
1217{
1218 btf_verifier_log_basic(env, struct_type,
1219 "Unsupported check_kflag_member");
1220 return -EINVAL;
1221}
1222
1223
1224
1225
1226static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1227 const struct btf_type *struct_type,
1228 const struct btf_member *member,
1229 const struct btf_type *member_type)
1230{
1231 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1232 btf_verifier_log_member(env, struct_type, member,
1233 "Invalid member bitfield_size");
1234 return -EINVAL;
1235 }
1236
1237
1238
1239
1240 return btf_type_ops(member_type)->check_member(env, struct_type,
1241 member,
1242 member_type);
1243}
1244
1245static int btf_df_resolve(struct btf_verifier_env *env,
1246 const struct resolve_vertex *v)
1247{
1248 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1249 return -EINVAL;
1250}
1251
1252static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1253 u32 type_id, void *data, u8 bits_offsets,
1254 struct seq_file *m)
1255{
1256 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1257}
1258
1259static int btf_int_check_member(struct btf_verifier_env *env,
1260 const struct btf_type *struct_type,
1261 const struct btf_member *member,
1262 const struct btf_type *member_type)
1263{
1264 u32 int_data = btf_type_int(member_type);
1265 u32 struct_bits_off = member->offset;
1266 u32 struct_size = struct_type->size;
1267 u32 nr_copy_bits;
1268 u32 bytes_offset;
1269
1270 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1271 btf_verifier_log_member(env, struct_type, member,
1272 "bits_offset exceeds U32_MAX");
1273 return -EINVAL;
1274 }
1275
1276 struct_bits_off += BTF_INT_OFFSET(int_data);
1277 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1278 nr_copy_bits = BTF_INT_BITS(int_data) +
1279 BITS_PER_BYTE_MASKED(struct_bits_off);
1280
1281 if (nr_copy_bits > BITS_PER_U128) {
1282 btf_verifier_log_member(env, struct_type, member,
1283 "nr_copy_bits exceeds 128");
1284 return -EINVAL;
1285 }
1286
1287 if (struct_size < bytes_offset ||
1288 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1289 btf_verifier_log_member(env, struct_type, member,
1290 "Member exceeds struct_size");
1291 return -EINVAL;
1292 }
1293
1294 return 0;
1295}
1296
1297static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1298 const struct btf_type *struct_type,
1299 const struct btf_member *member,
1300 const struct btf_type *member_type)
1301{
1302 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1303 u32 int_data = btf_type_int(member_type);
1304 u32 struct_size = struct_type->size;
1305 u32 nr_copy_bits;
1306
1307
1308 if (!btf_type_int_is_regular(member_type)) {
1309 btf_verifier_log_member(env, struct_type, member,
1310 "Invalid member base type");
1311 return -EINVAL;
1312 }
1313
1314
1315 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1316 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1317 nr_int_data_bits = BTF_INT_BITS(int_data);
1318 if (!nr_bits) {
1319
1320
1321
1322 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1323 btf_verifier_log_member(env, struct_type, member,
1324 "Invalid member offset");
1325 return -EINVAL;
1326 }
1327
1328 nr_bits = nr_int_data_bits;
1329 } else if (nr_bits > nr_int_data_bits) {
1330 btf_verifier_log_member(env, struct_type, member,
1331 "Invalid member bitfield_size");
1332 return -EINVAL;
1333 }
1334
1335 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1336 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1337 if (nr_copy_bits > BITS_PER_U128) {
1338 btf_verifier_log_member(env, struct_type, member,
1339 "nr_copy_bits exceeds 128");
1340 return -EINVAL;
1341 }
1342
1343 if (struct_size < bytes_offset ||
1344 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1345 btf_verifier_log_member(env, struct_type, member,
1346 "Member exceeds struct_size");
1347 return -EINVAL;
1348 }
1349
1350 return 0;
1351}
1352
1353static s32 btf_int_check_meta(struct btf_verifier_env *env,
1354 const struct btf_type *t,
1355 u32 meta_left)
1356{
1357 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1358 u16 encoding;
1359
1360 if (meta_left < meta_needed) {
1361 btf_verifier_log_basic(env, t,
1362 "meta_left:%u meta_needed:%u",
1363 meta_left, meta_needed);
1364 return -EINVAL;
1365 }
1366
1367 if (btf_type_vlen(t)) {
1368 btf_verifier_log_type(env, t, "vlen != 0");
1369 return -EINVAL;
1370 }
1371
1372 if (btf_type_kflag(t)) {
1373 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1374 return -EINVAL;
1375 }
1376
1377 int_data = btf_type_int(t);
1378 if (int_data & ~BTF_INT_MASK) {
1379 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1380 int_data);
1381 return -EINVAL;
1382 }
1383
1384 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1385
1386 if (nr_bits > BITS_PER_U128) {
1387 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1388 BITS_PER_U128);
1389 return -EINVAL;
1390 }
1391
1392 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1393 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1394 return -EINVAL;
1395 }
1396
1397
1398
1399
1400
1401
1402
1403 encoding = BTF_INT_ENCODING(int_data);
1404 if (encoding &&
1405 encoding != BTF_INT_SIGNED &&
1406 encoding != BTF_INT_CHAR &&
1407 encoding != BTF_INT_BOOL) {
1408 btf_verifier_log_type(env, t, "Unsupported encoding");
1409 return -ENOTSUPP;
1410 }
1411
1412 btf_verifier_log_type(env, t, NULL);
1413
1414 return meta_needed;
1415}
1416
1417static void btf_int_log(struct btf_verifier_env *env,
1418 const struct btf_type *t)
1419{
1420 int int_data = btf_type_int(t);
1421
1422 btf_verifier_log(env,
1423 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1424 t->size, BTF_INT_OFFSET(int_data),
1425 BTF_INT_BITS(int_data),
1426 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1427}
1428
1429static void btf_int128_print(struct seq_file *m, void *data)
1430{
1431
1432
1433
1434
1435
1436
1437
1438 u64 upper_num, lower_num;
1439
1440#ifdef __BIG_ENDIAN_BITFIELD
1441 upper_num = *(u64 *)data;
1442 lower_num = *(u64 *)(data + 8);
1443#else
1444 upper_num = *(u64 *)(data + 8);
1445 lower_num = *(u64 *)data;
1446#endif
1447 if (upper_num == 0)
1448 seq_printf(m, "0x%llx", lower_num);
1449 else
1450 seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1451}
1452
1453static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1454 u16 right_shift_bits)
1455{
1456 u64 upper_num, lower_num;
1457
1458#ifdef __BIG_ENDIAN_BITFIELD
1459 upper_num = print_num[0];
1460 lower_num = print_num[1];
1461#else
1462 upper_num = print_num[1];
1463 lower_num = print_num[0];
1464#endif
1465
1466
1467 if (left_shift_bits >= 64) {
1468 upper_num = lower_num << (left_shift_bits - 64);
1469 lower_num = 0;
1470 } else {
1471 upper_num = (upper_num << left_shift_bits) |
1472 (lower_num >> (64 - left_shift_bits));
1473 lower_num = lower_num << left_shift_bits;
1474 }
1475
1476 if (right_shift_bits >= 64) {
1477 lower_num = upper_num >> (right_shift_bits - 64);
1478 upper_num = 0;
1479 } else {
1480 lower_num = (lower_num >> right_shift_bits) |
1481 (upper_num << (64 - right_shift_bits));
1482 upper_num = upper_num >> right_shift_bits;
1483 }
1484
1485#ifdef __BIG_ENDIAN_BITFIELD
1486 print_num[0] = upper_num;
1487 print_num[1] = lower_num;
1488#else
1489 print_num[0] = lower_num;
1490 print_num[1] = upper_num;
1491#endif
1492}
1493
1494static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1495 u8 nr_bits, struct seq_file *m)
1496{
1497 u16 left_shift_bits, right_shift_bits;
1498 u8 nr_copy_bytes;
1499 u8 nr_copy_bits;
1500 u64 print_num[2] = {};
1501
1502 nr_copy_bits = nr_bits + bits_offset;
1503 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1504
1505 memcpy(print_num, data, nr_copy_bytes);
1506
1507#ifdef __BIG_ENDIAN_BITFIELD
1508 left_shift_bits = bits_offset;
1509#else
1510 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1511#endif
1512 right_shift_bits = BITS_PER_U128 - nr_bits;
1513
1514 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1515 btf_int128_print(m, print_num);
1516}
1517
1518
1519static void btf_int_bits_seq_show(const struct btf *btf,
1520 const struct btf_type *t,
1521 void *data, u8 bits_offset,
1522 struct seq_file *m)
1523{
1524 u32 int_data = btf_type_int(t);
1525 u8 nr_bits = BTF_INT_BITS(int_data);
1526 u8 total_bits_offset;
1527
1528
1529
1530
1531
1532 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1533 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1534 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1535 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1536}
1537
1538static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1539 u32 type_id, void *data, u8 bits_offset,
1540 struct seq_file *m)
1541{
1542 u32 int_data = btf_type_int(t);
1543 u8 encoding = BTF_INT_ENCODING(int_data);
1544 bool sign = encoding & BTF_INT_SIGNED;
1545 u8 nr_bits = BTF_INT_BITS(int_data);
1546
1547 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1548 BITS_PER_BYTE_MASKED(nr_bits)) {
1549 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1550 return;
1551 }
1552
1553 switch (nr_bits) {
1554 case 128:
1555 btf_int128_print(m, data);
1556 break;
1557 case 64:
1558 if (sign)
1559 seq_printf(m, "%lld", *(s64 *)data);
1560 else
1561 seq_printf(m, "%llu", *(u64 *)data);
1562 break;
1563 case 32:
1564 if (sign)
1565 seq_printf(m, "%d", *(s32 *)data);
1566 else
1567 seq_printf(m, "%u", *(u32 *)data);
1568 break;
1569 case 16:
1570 if (sign)
1571 seq_printf(m, "%d", *(s16 *)data);
1572 else
1573 seq_printf(m, "%u", *(u16 *)data);
1574 break;
1575 case 8:
1576 if (sign)
1577 seq_printf(m, "%d", *(s8 *)data);
1578 else
1579 seq_printf(m, "%u", *(u8 *)data);
1580 break;
1581 default:
1582 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1583 }
1584}
1585
1586static const struct btf_kind_operations int_ops = {
1587 .check_meta = btf_int_check_meta,
1588 .resolve = btf_df_resolve,
1589 .check_member = btf_int_check_member,
1590 .check_kflag_member = btf_int_check_kflag_member,
1591 .log_details = btf_int_log,
1592 .seq_show = btf_int_seq_show,
1593};
1594
1595static int btf_modifier_check_member(struct btf_verifier_env *env,
1596 const struct btf_type *struct_type,
1597 const struct btf_member *member,
1598 const struct btf_type *member_type)
1599{
1600 const struct btf_type *resolved_type;
1601 u32 resolved_type_id = member->type;
1602 struct btf_member resolved_member;
1603 struct btf *btf = env->btf;
1604
1605 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1606 if (!resolved_type) {
1607 btf_verifier_log_member(env, struct_type, member,
1608 "Invalid member");
1609 return -EINVAL;
1610 }
1611
1612 resolved_member = *member;
1613 resolved_member.type = resolved_type_id;
1614
1615 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1616 &resolved_member,
1617 resolved_type);
1618}
1619
1620static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1621 const struct btf_type *struct_type,
1622 const struct btf_member *member,
1623 const struct btf_type *member_type)
1624{
1625 const struct btf_type *resolved_type;
1626 u32 resolved_type_id = member->type;
1627 struct btf_member resolved_member;
1628 struct btf *btf = env->btf;
1629
1630 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1631 if (!resolved_type) {
1632 btf_verifier_log_member(env, struct_type, member,
1633 "Invalid member");
1634 return -EINVAL;
1635 }
1636
1637 resolved_member = *member;
1638 resolved_member.type = resolved_type_id;
1639
1640 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1641 &resolved_member,
1642 resolved_type);
1643}
1644
1645static int btf_ptr_check_member(struct btf_verifier_env *env,
1646 const struct btf_type *struct_type,
1647 const struct btf_member *member,
1648 const struct btf_type *member_type)
1649{
1650 u32 struct_size, struct_bits_off, bytes_offset;
1651
1652 struct_size = struct_type->size;
1653 struct_bits_off = member->offset;
1654 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1655
1656 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1657 btf_verifier_log_member(env, struct_type, member,
1658 "Member is not byte aligned");
1659 return -EINVAL;
1660 }
1661
1662 if (struct_size - bytes_offset < sizeof(void *)) {
1663 btf_verifier_log_member(env, struct_type, member,
1664 "Member exceeds struct_size");
1665 return -EINVAL;
1666 }
1667
1668 return 0;
1669}
1670
1671static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1672 const struct btf_type *t,
1673 u32 meta_left)
1674{
1675 if (btf_type_vlen(t)) {
1676 btf_verifier_log_type(env, t, "vlen != 0");
1677 return -EINVAL;
1678 }
1679
1680 if (btf_type_kflag(t)) {
1681 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1682 return -EINVAL;
1683 }
1684
1685 if (!BTF_TYPE_ID_VALID(t->type)) {
1686 btf_verifier_log_type(env, t, "Invalid type_id");
1687 return -EINVAL;
1688 }
1689
1690
1691
1692
1693 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1694 if (!t->name_off ||
1695 !btf_name_valid_identifier(env->btf, t->name_off)) {
1696 btf_verifier_log_type(env, t, "Invalid name");
1697 return -EINVAL;
1698 }
1699 } else {
1700 if (t->name_off) {
1701 btf_verifier_log_type(env, t, "Invalid name");
1702 return -EINVAL;
1703 }
1704 }
1705
1706 btf_verifier_log_type(env, t, NULL);
1707
1708 return 0;
1709}
1710
1711static int btf_modifier_resolve(struct btf_verifier_env *env,
1712 const struct resolve_vertex *v)
1713{
1714 const struct btf_type *t = v->t;
1715 const struct btf_type *next_type;
1716 u32 next_type_id = t->type;
1717 struct btf *btf = env->btf;
1718
1719 next_type = btf_type_by_id(btf, next_type_id);
1720 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1721 btf_verifier_log_type(env, v->t, "Invalid type_id");
1722 return -EINVAL;
1723 }
1724
1725 if (!env_type_is_resolve_sink(env, next_type) &&
1726 !env_type_is_resolved(env, next_type_id))
1727 return env_stack_push(env, next_type, next_type_id);
1728
1729
1730
1731
1732
1733
1734
1735 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1736 if (env_type_is_resolved(env, next_type_id))
1737 next_type = btf_type_id_resolve(btf, &next_type_id);
1738
1739
1740 if (!btf_type_is_void(next_type) &&
1741 !btf_type_is_fwd(next_type) &&
1742 !btf_type_is_func_proto(next_type)) {
1743 btf_verifier_log_type(env, v->t, "Invalid type_id");
1744 return -EINVAL;
1745 }
1746 }
1747
1748 env_stack_pop_resolved(env, next_type_id, 0);
1749
1750 return 0;
1751}
1752
1753static int btf_var_resolve(struct btf_verifier_env *env,
1754 const struct resolve_vertex *v)
1755{
1756 const struct btf_type *next_type;
1757 const struct btf_type *t = v->t;
1758 u32 next_type_id = t->type;
1759 struct btf *btf = env->btf;
1760
1761 next_type = btf_type_by_id(btf, next_type_id);
1762 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1763 btf_verifier_log_type(env, v->t, "Invalid type_id");
1764 return -EINVAL;
1765 }
1766
1767 if (!env_type_is_resolve_sink(env, next_type) &&
1768 !env_type_is_resolved(env, next_type_id))
1769 return env_stack_push(env, next_type, next_type_id);
1770
1771 if (btf_type_is_modifier(next_type)) {
1772 const struct btf_type *resolved_type;
1773 u32 resolved_type_id;
1774
1775 resolved_type_id = next_type_id;
1776 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1777
1778 if (btf_type_is_ptr(resolved_type) &&
1779 !env_type_is_resolve_sink(env, resolved_type) &&
1780 !env_type_is_resolved(env, resolved_type_id))
1781 return env_stack_push(env, resolved_type,
1782 resolved_type_id);
1783 }
1784
1785
1786
1787
1788
1789 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1790 btf_verifier_log_type(env, v->t, "Invalid type_id");
1791 return -EINVAL;
1792 }
1793
1794 env_stack_pop_resolved(env, next_type_id, 0);
1795
1796 return 0;
1797}
1798
1799static int btf_ptr_resolve(struct btf_verifier_env *env,
1800 const struct resolve_vertex *v)
1801{
1802 const struct btf_type *next_type;
1803 const struct btf_type *t = v->t;
1804 u32 next_type_id = t->type;
1805 struct btf *btf = env->btf;
1806
1807 next_type = btf_type_by_id(btf, next_type_id);
1808 if (!next_type || btf_type_is_resolve_source_only(next_type)) {
1809 btf_verifier_log_type(env, v->t, "Invalid type_id");
1810 return -EINVAL;
1811 }
1812
1813 if (!env_type_is_resolve_sink(env, next_type) &&
1814 !env_type_is_resolved(env, next_type_id))
1815 return env_stack_push(env, next_type, next_type_id);
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 if (btf_type_is_modifier(next_type)) {
1826 const struct btf_type *resolved_type;
1827 u32 resolved_type_id;
1828
1829 resolved_type_id = next_type_id;
1830 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1831
1832 if (btf_type_is_ptr(resolved_type) &&
1833 !env_type_is_resolve_sink(env, resolved_type) &&
1834 !env_type_is_resolved(env, resolved_type_id))
1835 return env_stack_push(env, resolved_type,
1836 resolved_type_id);
1837 }
1838
1839 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1840 if (env_type_is_resolved(env, next_type_id))
1841 next_type = btf_type_id_resolve(btf, &next_type_id);
1842
1843 if (!btf_type_is_void(next_type) &&
1844 !btf_type_is_fwd(next_type) &&
1845 !btf_type_is_func_proto(next_type)) {
1846 btf_verifier_log_type(env, v->t, "Invalid type_id");
1847 return -EINVAL;
1848 }
1849 }
1850
1851 env_stack_pop_resolved(env, next_type_id, 0);
1852
1853 return 0;
1854}
1855
1856static void btf_modifier_seq_show(const struct btf *btf,
1857 const struct btf_type *t,
1858 u32 type_id, void *data,
1859 u8 bits_offset, struct seq_file *m)
1860{
1861 if (btf->resolved_ids)
1862 t = btf_type_id_resolve(btf, &type_id);
1863 else
1864 t = btf_type_skip_modifiers(btf, type_id, NULL);
1865
1866 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1867}
1868
1869static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
1870 u32 type_id, void *data, u8 bits_offset,
1871 struct seq_file *m)
1872{
1873 t = btf_type_id_resolve(btf, &type_id);
1874
1875 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1876}
1877
1878static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1879 u32 type_id, void *data, u8 bits_offset,
1880 struct seq_file *m)
1881{
1882
1883 seq_printf(m, "%p", *(void **)data);
1884}
1885
1886static void btf_ref_type_log(struct btf_verifier_env *env,
1887 const struct btf_type *t)
1888{
1889 btf_verifier_log(env, "type_id=%u", t->type);
1890}
1891
1892static struct btf_kind_operations modifier_ops = {
1893 .check_meta = btf_ref_type_check_meta,
1894 .resolve = btf_modifier_resolve,
1895 .check_member = btf_modifier_check_member,
1896 .check_kflag_member = btf_modifier_check_kflag_member,
1897 .log_details = btf_ref_type_log,
1898 .seq_show = btf_modifier_seq_show,
1899};
1900
1901static struct btf_kind_operations ptr_ops = {
1902 .check_meta = btf_ref_type_check_meta,
1903 .resolve = btf_ptr_resolve,
1904 .check_member = btf_ptr_check_member,
1905 .check_kflag_member = btf_generic_check_kflag_member,
1906 .log_details = btf_ref_type_log,
1907 .seq_show = btf_ptr_seq_show,
1908};
1909
1910static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1911 const struct btf_type *t,
1912 u32 meta_left)
1913{
1914 if (btf_type_vlen(t)) {
1915 btf_verifier_log_type(env, t, "vlen != 0");
1916 return -EINVAL;
1917 }
1918
1919 if (t->type) {
1920 btf_verifier_log_type(env, t, "type != 0");
1921 return -EINVAL;
1922 }
1923
1924
1925 if (!t->name_off ||
1926 !btf_name_valid_identifier(env->btf, t->name_off)) {
1927 btf_verifier_log_type(env, t, "Invalid name");
1928 return -EINVAL;
1929 }
1930
1931 btf_verifier_log_type(env, t, NULL);
1932
1933 return 0;
1934}
1935
1936static void btf_fwd_type_log(struct btf_verifier_env *env,
1937 const struct btf_type *t)
1938{
1939 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1940}
1941
1942static struct btf_kind_operations fwd_ops = {
1943 .check_meta = btf_fwd_check_meta,
1944 .resolve = btf_df_resolve,
1945 .check_member = btf_df_check_member,
1946 .check_kflag_member = btf_df_check_kflag_member,
1947 .log_details = btf_fwd_type_log,
1948 .seq_show = btf_df_seq_show,
1949};
1950
1951static int btf_array_check_member(struct btf_verifier_env *env,
1952 const struct btf_type *struct_type,
1953 const struct btf_member *member,
1954 const struct btf_type *member_type)
1955{
1956 u32 struct_bits_off = member->offset;
1957 u32 struct_size, bytes_offset;
1958 u32 array_type_id, array_size;
1959 struct btf *btf = env->btf;
1960
1961 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1962 btf_verifier_log_member(env, struct_type, member,
1963 "Member is not byte aligned");
1964 return -EINVAL;
1965 }
1966
1967 array_type_id = member->type;
1968 btf_type_id_size(btf, &array_type_id, &array_size);
1969 struct_size = struct_type->size;
1970 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1971 if (struct_size - bytes_offset < array_size) {
1972 btf_verifier_log_member(env, struct_type, member,
1973 "Member exceeds struct_size");
1974 return -EINVAL;
1975 }
1976
1977 return 0;
1978}
1979
1980static s32 btf_array_check_meta(struct btf_verifier_env *env,
1981 const struct btf_type *t,
1982 u32 meta_left)
1983{
1984 const struct btf_array *array = btf_type_array(t);
1985 u32 meta_needed = sizeof(*array);
1986
1987 if (meta_left < meta_needed) {
1988 btf_verifier_log_basic(env, t,
1989 "meta_left:%u meta_needed:%u",
1990 meta_left, meta_needed);
1991 return -EINVAL;
1992 }
1993
1994
1995 if (t->name_off) {
1996 btf_verifier_log_type(env, t, "Invalid name");
1997 return -EINVAL;
1998 }
1999
2000 if (btf_type_vlen(t)) {
2001 btf_verifier_log_type(env, t, "vlen != 0");
2002 return -EINVAL;
2003 }
2004
2005 if (btf_type_kflag(t)) {
2006 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2007 return -EINVAL;
2008 }
2009
2010 if (t->size) {
2011 btf_verifier_log_type(env, t, "size != 0");
2012 return -EINVAL;
2013 }
2014
2015
2016
2017
2018 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
2019 btf_verifier_log_type(env, t, "Invalid elem");
2020 return -EINVAL;
2021 }
2022
2023 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
2024 btf_verifier_log_type(env, t, "Invalid index");
2025 return -EINVAL;
2026 }
2027
2028 btf_verifier_log_type(env, t, NULL);
2029
2030 return meta_needed;
2031}
2032
2033static int btf_array_resolve(struct btf_verifier_env *env,
2034 const struct resolve_vertex *v)
2035{
2036 const struct btf_array *array = btf_type_array(v->t);
2037 const struct btf_type *elem_type, *index_type;
2038 u32 elem_type_id, index_type_id;
2039 struct btf *btf = env->btf;
2040 u32 elem_size;
2041
2042
2043 index_type_id = array->index_type;
2044 index_type = btf_type_by_id(btf, index_type_id);
2045 if (btf_type_nosize_or_null(index_type) ||
2046 btf_type_is_resolve_source_only(index_type)) {
2047 btf_verifier_log_type(env, v->t, "Invalid index");
2048 return -EINVAL;
2049 }
2050
2051 if (!env_type_is_resolve_sink(env, index_type) &&
2052 !env_type_is_resolved(env, index_type_id))
2053 return env_stack_push(env, index_type, index_type_id);
2054
2055 index_type = btf_type_id_size(btf, &index_type_id, NULL);
2056 if (!index_type || !btf_type_is_int(index_type) ||
2057 !btf_type_int_is_regular(index_type)) {
2058 btf_verifier_log_type(env, v->t, "Invalid index");
2059 return -EINVAL;
2060 }
2061
2062
2063 elem_type_id = array->type;
2064 elem_type = btf_type_by_id(btf, elem_type_id);
2065 if (btf_type_nosize_or_null(elem_type) ||
2066 btf_type_is_resolve_source_only(elem_type)) {
2067 btf_verifier_log_type(env, v->t,
2068 "Invalid elem");
2069 return -EINVAL;
2070 }
2071
2072 if (!env_type_is_resolve_sink(env, elem_type) &&
2073 !env_type_is_resolved(env, elem_type_id))
2074 return env_stack_push(env, elem_type, elem_type_id);
2075
2076 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2077 if (!elem_type) {
2078 btf_verifier_log_type(env, v->t, "Invalid elem");
2079 return -EINVAL;
2080 }
2081
2082 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
2083 btf_verifier_log_type(env, v->t, "Invalid array of int");
2084 return -EINVAL;
2085 }
2086
2087 if (array->nelems && elem_size > U32_MAX / array->nelems) {
2088 btf_verifier_log_type(env, v->t,
2089 "Array size overflows U32_MAX");
2090 return -EINVAL;
2091 }
2092
2093 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
2094
2095 return 0;
2096}
2097
2098static void btf_array_log(struct btf_verifier_env *env,
2099 const struct btf_type *t)
2100{
2101 const struct btf_array *array = btf_type_array(t);
2102
2103 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
2104 array->type, array->index_type, array->nelems);
2105}
2106
2107static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
2108 u32 type_id, void *data, u8 bits_offset,
2109 struct seq_file *m)
2110{
2111 const struct btf_array *array = btf_type_array(t);
2112 const struct btf_kind_operations *elem_ops;
2113 const struct btf_type *elem_type;
2114 u32 i, elem_size, elem_type_id;
2115
2116 elem_type_id = array->type;
2117 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2118 elem_ops = btf_type_ops(elem_type);
2119 seq_puts(m, "[");
2120 for (i = 0; i < array->nelems; i++) {
2121 if (i)
2122 seq_puts(m, ",");
2123
2124 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
2125 bits_offset, m);
2126 data += elem_size;
2127 }
2128 seq_puts(m, "]");
2129}
2130
2131static struct btf_kind_operations array_ops = {
2132 .check_meta = btf_array_check_meta,
2133 .resolve = btf_array_resolve,
2134 .check_member = btf_array_check_member,
2135 .check_kflag_member = btf_generic_check_kflag_member,
2136 .log_details = btf_array_log,
2137 .seq_show = btf_array_seq_show,
2138};
2139
2140static int btf_struct_check_member(struct btf_verifier_env *env,
2141 const struct btf_type *struct_type,
2142 const struct btf_member *member,
2143 const struct btf_type *member_type)
2144{
2145 u32 struct_bits_off = member->offset;
2146 u32 struct_size, bytes_offset;
2147
2148 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2149 btf_verifier_log_member(env, struct_type, member,
2150 "Member is not byte aligned");
2151 return -EINVAL;
2152 }
2153
2154 struct_size = struct_type->size;
2155 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2156 if (struct_size - bytes_offset < member_type->size) {
2157 btf_verifier_log_member(env, struct_type, member,
2158 "Member exceeds struct_size");
2159 return -EINVAL;
2160 }
2161
2162 return 0;
2163}
2164
2165static s32 btf_struct_check_meta(struct btf_verifier_env *env,
2166 const struct btf_type *t,
2167 u32 meta_left)
2168{
2169 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
2170 const struct btf_member *member;
2171 u32 meta_needed, last_offset;
2172 struct btf *btf = env->btf;
2173 u32 struct_size = t->size;
2174 u32 offset;
2175 u16 i;
2176
2177 meta_needed = btf_type_vlen(t) * sizeof(*member);
2178 if (meta_left < meta_needed) {
2179 btf_verifier_log_basic(env, t,
2180 "meta_left:%u meta_needed:%u",
2181 meta_left, meta_needed);
2182 return -EINVAL;
2183 }
2184
2185
2186 if (t->name_off &&
2187 !btf_name_valid_identifier(env->btf, t->name_off)) {
2188 btf_verifier_log_type(env, t, "Invalid name");
2189 return -EINVAL;
2190 }
2191
2192 btf_verifier_log_type(env, t, NULL);
2193
2194 last_offset = 0;
2195 for_each_member(i, t, member) {
2196 if (!btf_name_offset_valid(btf, member->name_off)) {
2197 btf_verifier_log_member(env, t, member,
2198 "Invalid member name_offset:%u",
2199 member->name_off);
2200 return -EINVAL;
2201 }
2202
2203
2204 if (member->name_off &&
2205 !btf_name_valid_identifier(btf, member->name_off)) {
2206 btf_verifier_log_member(env, t, member, "Invalid name");
2207 return -EINVAL;
2208 }
2209
2210 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
2211 btf_verifier_log_member(env, t, member,
2212 "Invalid type_id");
2213 return -EINVAL;
2214 }
2215
2216 offset = btf_member_bit_offset(t, member);
2217 if (is_union && offset) {
2218 btf_verifier_log_member(env, t, member,
2219 "Invalid member bits_offset");
2220 return -EINVAL;
2221 }
2222
2223
2224
2225
2226
2227 if (last_offset > offset) {
2228 btf_verifier_log_member(env, t, member,
2229 "Invalid member bits_offset");
2230 return -EINVAL;
2231 }
2232
2233 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
2234 btf_verifier_log_member(env, t, member,
2235 "Member bits_offset exceeds its struct size");
2236 return -EINVAL;
2237 }
2238
2239 btf_verifier_log_member(env, t, member, NULL);
2240 last_offset = offset;
2241 }
2242
2243 return meta_needed;
2244}
2245
2246static int btf_struct_resolve(struct btf_verifier_env *env,
2247 const struct resolve_vertex *v)
2248{
2249 const struct btf_member *member;
2250 int err;
2251 u16 i;
2252
2253
2254
2255
2256
2257 if (v->next_member) {
2258 const struct btf_type *last_member_type;
2259 const struct btf_member *last_member;
2260 u16 last_member_type_id;
2261
2262 last_member = btf_type_member(v->t) + v->next_member - 1;
2263 last_member_type_id = last_member->type;
2264 if (WARN_ON_ONCE(!env_type_is_resolved(env,
2265 last_member_type_id)))
2266 return -EINVAL;
2267
2268 last_member_type = btf_type_by_id(env->btf,
2269 last_member_type_id);
2270 if (btf_type_kflag(v->t))
2271 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2272 last_member,
2273 last_member_type);
2274 else
2275 err = btf_type_ops(last_member_type)->check_member(env, v->t,
2276 last_member,
2277 last_member_type);
2278 if (err)
2279 return err;
2280 }
2281
2282 for_each_member_from(i, v->next_member, v->t, member) {
2283 u32 member_type_id = member->type;
2284 const struct btf_type *member_type = btf_type_by_id(env->btf,
2285 member_type_id);
2286
2287 if (btf_type_nosize_or_null(member_type) ||
2288 btf_type_is_resolve_source_only(member_type)) {
2289 btf_verifier_log_member(env, v->t, member,
2290 "Invalid member");
2291 return -EINVAL;
2292 }
2293
2294 if (!env_type_is_resolve_sink(env, member_type) &&
2295 !env_type_is_resolved(env, member_type_id)) {
2296 env_stack_set_next_member(env, i + 1);
2297 return env_stack_push(env, member_type, member_type_id);
2298 }
2299
2300 if (btf_type_kflag(v->t))
2301 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2302 member,
2303 member_type);
2304 else
2305 err = btf_type_ops(member_type)->check_member(env, v->t,
2306 member,
2307 member_type);
2308 if (err)
2309 return err;
2310 }
2311
2312 env_stack_pop_resolved(env, 0, 0);
2313
2314 return 0;
2315}
2316
2317static void btf_struct_log(struct btf_verifier_env *env,
2318 const struct btf_type *t)
2319{
2320 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2321}
2322
2323
2324
2325
2326
2327int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2328{
2329 const struct btf_member *member;
2330 u32 i, off = -ENOENT;
2331
2332 if (!__btf_type_is_struct(t))
2333 return -EINVAL;
2334
2335 for_each_member(i, t, member) {
2336 const struct btf_type *member_type = btf_type_by_id(btf,
2337 member->type);
2338 if (!__btf_type_is_struct(member_type))
2339 continue;
2340 if (member_type->size != sizeof(struct bpf_spin_lock))
2341 continue;
2342 if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2343 "bpf_spin_lock"))
2344 continue;
2345 if (off != -ENOENT)
2346
2347 return -E2BIG;
2348 off = btf_member_bit_offset(t, member);
2349 if (off % 8)
2350
2351 return -EINVAL;
2352 off /= 8;
2353 if (off % __alignof__(struct bpf_spin_lock))
2354
2355 return -EINVAL;
2356 }
2357 return off;
2358}
2359
2360static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2361 u32 type_id, void *data, u8 bits_offset,
2362 struct seq_file *m)
2363{
2364 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2365 const struct btf_member *member;
2366 u32 i;
2367
2368 seq_puts(m, "{");
2369 for_each_member(i, t, member) {
2370 const struct btf_type *member_type = btf_type_by_id(btf,
2371 member->type);
2372 const struct btf_kind_operations *ops;
2373 u32 member_offset, bitfield_size;
2374 u32 bytes_offset;
2375 u8 bits8_offset;
2376
2377 if (i)
2378 seq_puts(m, seq);
2379
2380 member_offset = btf_member_bit_offset(t, member);
2381 bitfield_size = btf_member_bitfield_size(t, member);
2382 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2383 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2384 if (bitfield_size) {
2385 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2386 bitfield_size, m);
2387 } else {
2388 ops = btf_type_ops(member_type);
2389 ops->seq_show(btf, member_type, member->type,
2390 data + bytes_offset, bits8_offset, m);
2391 }
2392 }
2393 seq_puts(m, "}");
2394}
2395
2396static struct btf_kind_operations struct_ops = {
2397 .check_meta = btf_struct_check_meta,
2398 .resolve = btf_struct_resolve,
2399 .check_member = btf_struct_check_member,
2400 .check_kflag_member = btf_generic_check_kflag_member,
2401 .log_details = btf_struct_log,
2402 .seq_show = btf_struct_seq_show,
2403};
2404
2405static int btf_enum_check_member(struct btf_verifier_env *env,
2406 const struct btf_type *struct_type,
2407 const struct btf_member *member,
2408 const struct btf_type *member_type)
2409{
2410 u32 struct_bits_off = member->offset;
2411 u32 struct_size, bytes_offset;
2412
2413 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2414 btf_verifier_log_member(env, struct_type, member,
2415 "Member is not byte aligned");
2416 return -EINVAL;
2417 }
2418
2419 struct_size = struct_type->size;
2420 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2421 if (struct_size - bytes_offset < member_type->size) {
2422 btf_verifier_log_member(env, struct_type, member,
2423 "Member exceeds struct_size");
2424 return -EINVAL;
2425 }
2426
2427 return 0;
2428}
2429
2430static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2431 const struct btf_type *struct_type,
2432 const struct btf_member *member,
2433 const struct btf_type *member_type)
2434{
2435 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2436 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2437
2438 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2439 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2440 if (!nr_bits) {
2441 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2442 btf_verifier_log_member(env, struct_type, member,
2443 "Member is not byte aligned");
2444 return -EINVAL;
2445 }
2446
2447 nr_bits = int_bitsize;
2448 } else if (nr_bits > int_bitsize) {
2449 btf_verifier_log_member(env, struct_type, member,
2450 "Invalid member bitfield_size");
2451 return -EINVAL;
2452 }
2453
2454 struct_size = struct_type->size;
2455 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2456 if (struct_size < bytes_end) {
2457 btf_verifier_log_member(env, struct_type, member,
2458 "Member exceeds struct_size");
2459 return -EINVAL;
2460 }
2461
2462 return 0;
2463}
2464
2465static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2466 const struct btf_type *t,
2467 u32 meta_left)
2468{
2469 const struct btf_enum *enums = btf_type_enum(t);
2470 struct btf *btf = env->btf;
2471 u16 i, nr_enums;
2472 u32 meta_needed;
2473
2474 nr_enums = btf_type_vlen(t);
2475 meta_needed = nr_enums * sizeof(*enums);
2476
2477 if (meta_left < meta_needed) {
2478 btf_verifier_log_basic(env, t,
2479 "meta_left:%u meta_needed:%u",
2480 meta_left, meta_needed);
2481 return -EINVAL;
2482 }
2483
2484 if (btf_type_kflag(t)) {
2485 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2486 return -EINVAL;
2487 }
2488
2489 if (t->size > 8 || !is_power_of_2(t->size)) {
2490 btf_verifier_log_type(env, t, "Unexpected size");
2491 return -EINVAL;
2492 }
2493
2494
2495 if (t->name_off &&
2496 !btf_name_valid_identifier(env->btf, t->name_off)) {
2497 btf_verifier_log_type(env, t, "Invalid name");
2498 return -EINVAL;
2499 }
2500
2501 btf_verifier_log_type(env, t, NULL);
2502
2503 for (i = 0; i < nr_enums; i++) {
2504 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2505 btf_verifier_log(env, "\tInvalid name_offset:%u",
2506 enums[i].name_off);
2507 return -EINVAL;
2508 }
2509
2510
2511 if (!enums[i].name_off ||
2512 !btf_name_valid_identifier(btf, enums[i].name_off)) {
2513 btf_verifier_log_type(env, t, "Invalid name");
2514 return -EINVAL;
2515 }
2516
2517 if (env->log.level == BPF_LOG_KERNEL)
2518 continue;
2519 btf_verifier_log(env, "\t%s val=%d\n",
2520 __btf_name_by_offset(btf, enums[i].name_off),
2521 enums[i].val);
2522 }
2523
2524 return meta_needed;
2525}
2526
2527static void btf_enum_log(struct btf_verifier_env *env,
2528 const struct btf_type *t)
2529{
2530 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2531}
2532
2533static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2534 u32 type_id, void *data, u8 bits_offset,
2535 struct seq_file *m)
2536{
2537 const struct btf_enum *enums = btf_type_enum(t);
2538 u32 i, nr_enums = btf_type_vlen(t);
2539 int v = *(int *)data;
2540
2541 for (i = 0; i < nr_enums; i++) {
2542 if (v == enums[i].val) {
2543 seq_printf(m, "%s",
2544 __btf_name_by_offset(btf,
2545 enums[i].name_off));
2546 return;
2547 }
2548 }
2549
2550 seq_printf(m, "%d", v);
2551}
2552
2553static struct btf_kind_operations enum_ops = {
2554 .check_meta = btf_enum_check_meta,
2555 .resolve = btf_df_resolve,
2556 .check_member = btf_enum_check_member,
2557 .check_kflag_member = btf_enum_check_kflag_member,
2558 .log_details = btf_enum_log,
2559 .seq_show = btf_enum_seq_show,
2560};
2561
2562static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2563 const struct btf_type *t,
2564 u32 meta_left)
2565{
2566 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2567
2568 if (meta_left < meta_needed) {
2569 btf_verifier_log_basic(env, t,
2570 "meta_left:%u meta_needed:%u",
2571 meta_left, meta_needed);
2572 return -EINVAL;
2573 }
2574
2575 if (t->name_off) {
2576 btf_verifier_log_type(env, t, "Invalid name");
2577 return -EINVAL;
2578 }
2579
2580 if (btf_type_kflag(t)) {
2581 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2582 return -EINVAL;
2583 }
2584
2585 btf_verifier_log_type(env, t, NULL);
2586
2587 return meta_needed;
2588}
2589
2590static void btf_func_proto_log(struct btf_verifier_env *env,
2591 const struct btf_type *t)
2592{
2593 const struct btf_param *args = (const struct btf_param *)(t + 1);
2594 u16 nr_args = btf_type_vlen(t), i;
2595
2596 btf_verifier_log(env, "return=%u args=(", t->type);
2597 if (!nr_args) {
2598 btf_verifier_log(env, "void");
2599 goto done;
2600 }
2601
2602 if (nr_args == 1 && !args[0].type) {
2603
2604 btf_verifier_log(env, "vararg");
2605 goto done;
2606 }
2607
2608 btf_verifier_log(env, "%u %s", args[0].type,
2609 __btf_name_by_offset(env->btf,
2610 args[0].name_off));
2611 for (i = 1; i < nr_args - 1; i++)
2612 btf_verifier_log(env, ", %u %s", args[i].type,
2613 __btf_name_by_offset(env->btf,
2614 args[i].name_off));
2615
2616 if (nr_args > 1) {
2617 const struct btf_param *last_arg = &args[nr_args - 1];
2618
2619 if (last_arg->type)
2620 btf_verifier_log(env, ", %u %s", last_arg->type,
2621 __btf_name_by_offset(env->btf,
2622 last_arg->name_off));
2623 else
2624 btf_verifier_log(env, ", vararg");
2625 }
2626
2627done:
2628 btf_verifier_log(env, ")");
2629}
2630
2631static struct btf_kind_operations func_proto_ops = {
2632 .check_meta = btf_func_proto_check_meta,
2633 .resolve = btf_df_resolve,
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643 .check_member = btf_df_check_member,
2644 .check_kflag_member = btf_df_check_kflag_member,
2645 .log_details = btf_func_proto_log,
2646 .seq_show = btf_df_seq_show,
2647};
2648
2649static s32 btf_func_check_meta(struct btf_verifier_env *env,
2650 const struct btf_type *t,
2651 u32 meta_left)
2652{
2653 if (!t->name_off ||
2654 !btf_name_valid_identifier(env->btf, t->name_off)) {
2655 btf_verifier_log_type(env, t, "Invalid name");
2656 return -EINVAL;
2657 }
2658
2659 if (btf_type_vlen(t) > BTF_FUNC_GLOBAL) {
2660 btf_verifier_log_type(env, t, "Invalid func linkage");
2661 return -EINVAL;
2662 }
2663
2664 if (btf_type_kflag(t)) {
2665 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2666 return -EINVAL;
2667 }
2668
2669 btf_verifier_log_type(env, t, NULL);
2670
2671 return 0;
2672}
2673
2674static struct btf_kind_operations func_ops = {
2675 .check_meta = btf_func_check_meta,
2676 .resolve = btf_df_resolve,
2677 .check_member = btf_df_check_member,
2678 .check_kflag_member = btf_df_check_kflag_member,
2679 .log_details = btf_ref_type_log,
2680 .seq_show = btf_df_seq_show,
2681};
2682
2683static s32 btf_var_check_meta(struct btf_verifier_env *env,
2684 const struct btf_type *t,
2685 u32 meta_left)
2686{
2687 const struct btf_var *var;
2688 u32 meta_needed = sizeof(*var);
2689
2690 if (meta_left < meta_needed) {
2691 btf_verifier_log_basic(env, t,
2692 "meta_left:%u meta_needed:%u",
2693 meta_left, meta_needed);
2694 return -EINVAL;
2695 }
2696
2697 if (btf_type_vlen(t)) {
2698 btf_verifier_log_type(env, t, "vlen != 0");
2699 return -EINVAL;
2700 }
2701
2702 if (btf_type_kflag(t)) {
2703 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2704 return -EINVAL;
2705 }
2706
2707 if (!t->name_off ||
2708 !__btf_name_valid(env->btf, t->name_off, true)) {
2709 btf_verifier_log_type(env, t, "Invalid name");
2710 return -EINVAL;
2711 }
2712
2713
2714 if (!t->type || !BTF_TYPE_ID_VALID(t->type)) {
2715 btf_verifier_log_type(env, t, "Invalid type_id");
2716 return -EINVAL;
2717 }
2718
2719 var = btf_type_var(t);
2720 if (var->linkage != BTF_VAR_STATIC &&
2721 var->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2722 btf_verifier_log_type(env, t, "Linkage not supported");
2723 return -EINVAL;
2724 }
2725
2726 btf_verifier_log_type(env, t, NULL);
2727
2728 return meta_needed;
2729}
2730
2731static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t)
2732{
2733 const struct btf_var *var = btf_type_var(t);
2734
2735 btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage);
2736}
2737
2738static const struct btf_kind_operations var_ops = {
2739 .check_meta = btf_var_check_meta,
2740 .resolve = btf_var_resolve,
2741 .check_member = btf_df_check_member,
2742 .check_kflag_member = btf_df_check_kflag_member,
2743 .log_details = btf_var_log,
2744 .seq_show = btf_var_seq_show,
2745};
2746
2747static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
2748 const struct btf_type *t,
2749 u32 meta_left)
2750{
2751 const struct btf_var_secinfo *vsi;
2752 u64 last_vsi_end_off = 0, sum = 0;
2753 u32 i, meta_needed;
2754
2755 meta_needed = btf_type_vlen(t) * sizeof(*vsi);
2756 if (meta_left < meta_needed) {
2757 btf_verifier_log_basic(env, t,
2758 "meta_left:%u meta_needed:%u",
2759 meta_left, meta_needed);
2760 return -EINVAL;
2761 }
2762
2763 if (!btf_type_vlen(t)) {
2764 btf_verifier_log_type(env, t, "vlen == 0");
2765 return -EINVAL;
2766 }
2767
2768 if (!t->size) {
2769 btf_verifier_log_type(env, t, "size == 0");
2770 return -EINVAL;
2771 }
2772
2773 if (btf_type_kflag(t)) {
2774 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2775 return -EINVAL;
2776 }
2777
2778 if (!t->name_off ||
2779 !btf_name_valid_section(env->btf, t->name_off)) {
2780 btf_verifier_log_type(env, t, "Invalid name");
2781 return -EINVAL;
2782 }
2783
2784 btf_verifier_log_type(env, t, NULL);
2785
2786 for_each_vsi(i, t, vsi) {
2787
2788 if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) {
2789 btf_verifier_log_vsi(env, t, vsi,
2790 "Invalid type_id");
2791 return -EINVAL;
2792 }
2793
2794 if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) {
2795 btf_verifier_log_vsi(env, t, vsi,
2796 "Invalid offset");
2797 return -EINVAL;
2798 }
2799
2800 if (!vsi->size || vsi->size > t->size) {
2801 btf_verifier_log_vsi(env, t, vsi,
2802 "Invalid size");
2803 return -EINVAL;
2804 }
2805
2806 last_vsi_end_off = vsi->offset + vsi->size;
2807 if (last_vsi_end_off > t->size) {
2808 btf_verifier_log_vsi(env, t, vsi,
2809 "Invalid offset+size");
2810 return -EINVAL;
2811 }
2812
2813 btf_verifier_log_vsi(env, t, vsi, NULL);
2814 sum += vsi->size;
2815 }
2816
2817 if (t->size < sum) {
2818 btf_verifier_log_type(env, t, "Invalid btf_info size");
2819 return -EINVAL;
2820 }
2821
2822 return meta_needed;
2823}
2824
2825static int btf_datasec_resolve(struct btf_verifier_env *env,
2826 const struct resolve_vertex *v)
2827{
2828 const struct btf_var_secinfo *vsi;
2829 struct btf *btf = env->btf;
2830 u16 i;
2831
2832 for_each_vsi_from(i, v->next_member, v->t, vsi) {
2833 u32 var_type_id = vsi->type, type_id, type_size = 0;
2834 const struct btf_type *var_type = btf_type_by_id(env->btf,
2835 var_type_id);
2836 if (!var_type || !btf_type_is_var(var_type)) {
2837 btf_verifier_log_vsi(env, v->t, vsi,
2838 "Not a VAR kind member");
2839 return -EINVAL;
2840 }
2841
2842 if (!env_type_is_resolve_sink(env, var_type) &&
2843 !env_type_is_resolved(env, var_type_id)) {
2844 env_stack_set_next_member(env, i + 1);
2845 return env_stack_push(env, var_type, var_type_id);
2846 }
2847
2848 type_id = var_type->type;
2849 if (!btf_type_id_size(btf, &type_id, &type_size)) {
2850 btf_verifier_log_vsi(env, v->t, vsi, "Invalid type");
2851 return -EINVAL;
2852 }
2853
2854 if (vsi->size < type_size) {
2855 btf_verifier_log_vsi(env, v->t, vsi, "Invalid size");
2856 return -EINVAL;
2857 }
2858 }
2859
2860 env_stack_pop_resolved(env, 0, 0);
2861 return 0;
2862}
2863
2864static void btf_datasec_log(struct btf_verifier_env *env,
2865 const struct btf_type *t)
2866{
2867 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2868}
2869
2870static void btf_datasec_seq_show(const struct btf *btf,
2871 const struct btf_type *t, u32 type_id,
2872 void *data, u8 bits_offset,
2873 struct seq_file *m)
2874{
2875 const struct btf_var_secinfo *vsi;
2876 const struct btf_type *var;
2877 u32 i;
2878
2879 seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
2880 for_each_vsi(i, t, vsi) {
2881 var = btf_type_by_id(btf, vsi->type);
2882 if (i)
2883 seq_puts(m, ",");
2884 btf_type_ops(var)->seq_show(btf, var, vsi->type,
2885 data + vsi->offset, bits_offset, m);
2886 }
2887 seq_puts(m, "}");
2888}
2889
2890static const struct btf_kind_operations datasec_ops = {
2891 .check_meta = btf_datasec_check_meta,
2892 .resolve = btf_datasec_resolve,
2893 .check_member = btf_df_check_member,
2894 .check_kflag_member = btf_df_check_kflag_member,
2895 .log_details = btf_datasec_log,
2896 .seq_show = btf_datasec_seq_show,
2897};
2898
2899static int btf_func_proto_check(struct btf_verifier_env *env,
2900 const struct btf_type *t)
2901{
2902 const struct btf_type *ret_type;
2903 const struct btf_param *args;
2904 const struct btf *btf;
2905 u16 nr_args, i;
2906 int err;
2907
2908 btf = env->btf;
2909 args = (const struct btf_param *)(t + 1);
2910 nr_args = btf_type_vlen(t);
2911
2912
2913 if (t->type) {
2914 u32 ret_type_id = t->type;
2915
2916 ret_type = btf_type_by_id(btf, ret_type_id);
2917 if (!ret_type) {
2918 btf_verifier_log_type(env, t, "Invalid return type");
2919 return -EINVAL;
2920 }
2921
2922 if (btf_type_needs_resolve(ret_type) &&
2923 !env_type_is_resolved(env, ret_type_id)) {
2924 err = btf_resolve(env, ret_type, ret_type_id);
2925 if (err)
2926 return err;
2927 }
2928
2929
2930 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2931 btf_verifier_log_type(env, t, "Invalid return type");
2932 return -EINVAL;
2933 }
2934 }
2935
2936 if (!nr_args)
2937 return 0;
2938
2939
2940 if (!args[nr_args - 1].type) {
2941 if (args[nr_args - 1].name_off) {
2942 btf_verifier_log_type(env, t, "Invalid arg#%u",
2943 nr_args);
2944 return -EINVAL;
2945 }
2946 nr_args--;
2947 }
2948
2949 err = 0;
2950 for (i = 0; i < nr_args; i++) {
2951 const struct btf_type *arg_type;
2952 u32 arg_type_id;
2953
2954 arg_type_id = args[i].type;
2955 arg_type = btf_type_by_id(btf, arg_type_id);
2956 if (!arg_type) {
2957 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2958 err = -EINVAL;
2959 break;
2960 }
2961
2962 if (args[i].name_off &&
2963 (!btf_name_offset_valid(btf, args[i].name_off) ||
2964 !btf_name_valid_identifier(btf, args[i].name_off))) {
2965 btf_verifier_log_type(env, t,
2966 "Invalid arg#%u", i + 1);
2967 err = -EINVAL;
2968 break;
2969 }
2970
2971 if (btf_type_needs_resolve(arg_type) &&
2972 !env_type_is_resolved(env, arg_type_id)) {
2973 err = btf_resolve(env, arg_type, arg_type_id);
2974 if (err)
2975 break;
2976 }
2977
2978 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2979 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2980 err = -EINVAL;
2981 break;
2982 }
2983 }
2984
2985 return err;
2986}
2987
2988static int btf_func_check(struct btf_verifier_env *env,
2989 const struct btf_type *t)
2990{
2991 const struct btf_type *proto_type;
2992 const struct btf_param *args;
2993 const struct btf *btf;
2994 u16 nr_args, i;
2995
2996 btf = env->btf;
2997 proto_type = btf_type_by_id(btf, t->type);
2998
2999 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
3000 btf_verifier_log_type(env, t, "Invalid type_id");
3001 return -EINVAL;
3002 }
3003
3004 args = (const struct btf_param *)(proto_type + 1);
3005 nr_args = btf_type_vlen(proto_type);
3006 for (i = 0; i < nr_args; i++) {
3007 if (!args[i].name_off && args[i].type) {
3008 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
3009 return -EINVAL;
3010 }
3011 }
3012
3013 return 0;
3014}
3015
3016static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
3017 [BTF_KIND_INT] = &int_ops,
3018 [BTF_KIND_PTR] = &ptr_ops,
3019 [BTF_KIND_ARRAY] = &array_ops,
3020 [BTF_KIND_STRUCT] = &struct_ops,
3021 [BTF_KIND_UNION] = &struct_ops,
3022 [BTF_KIND_ENUM] = &enum_ops,
3023 [BTF_KIND_FWD] = &fwd_ops,
3024 [BTF_KIND_TYPEDEF] = &modifier_ops,
3025 [BTF_KIND_VOLATILE] = &modifier_ops,
3026 [BTF_KIND_CONST] = &modifier_ops,
3027 [BTF_KIND_RESTRICT] = &modifier_ops,
3028 [BTF_KIND_FUNC] = &func_ops,
3029 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
3030 [BTF_KIND_VAR] = &var_ops,
3031 [BTF_KIND_DATASEC] = &datasec_ops,
3032};
3033
3034static s32 btf_check_meta(struct btf_verifier_env *env,
3035 const struct btf_type *t,
3036 u32 meta_left)
3037{
3038 u32 saved_meta_left = meta_left;
3039 s32 var_meta_size;
3040
3041 if (meta_left < sizeof(*t)) {
3042 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
3043 env->log_type_id, meta_left, sizeof(*t));
3044 return -EINVAL;
3045 }
3046 meta_left -= sizeof(*t);
3047
3048 if (t->info & ~BTF_INFO_MASK) {
3049 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
3050 env->log_type_id, t->info);
3051 return -EINVAL;
3052 }
3053
3054 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
3055 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
3056 btf_verifier_log(env, "[%u] Invalid kind:%u",
3057 env->log_type_id, BTF_INFO_KIND(t->info));
3058 return -EINVAL;
3059 }
3060
3061 if (!btf_name_offset_valid(env->btf, t->name_off)) {
3062 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
3063 env->log_type_id, t->name_off);
3064 return -EINVAL;
3065 }
3066
3067 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
3068 if (var_meta_size < 0)
3069 return var_meta_size;
3070
3071 meta_left -= var_meta_size;
3072
3073 return saved_meta_left - meta_left;
3074}
3075
3076static int btf_check_all_metas(struct btf_verifier_env *env)
3077{
3078 struct btf *btf = env->btf;
3079 struct btf_header *hdr;
3080 void *cur, *end;
3081
3082 hdr = &btf->hdr;
3083 cur = btf->nohdr_data + hdr->type_off;
3084 end = cur + hdr->type_len;
3085
3086 env->log_type_id = 1;
3087 while (cur < end) {
3088 struct btf_type *t = cur;
3089 s32 meta_size;
3090
3091 meta_size = btf_check_meta(env, t, end - cur);
3092 if (meta_size < 0)
3093 return meta_size;
3094
3095 btf_add_type(env, t);
3096 cur += meta_size;
3097 env->log_type_id++;
3098 }
3099
3100 return 0;
3101}
3102
3103static bool btf_resolve_valid(struct btf_verifier_env *env,
3104 const struct btf_type *t,
3105 u32 type_id)
3106{
3107 struct btf *btf = env->btf;
3108
3109 if (!env_type_is_resolved(env, type_id))
3110 return false;
3111
3112 if (btf_type_is_struct(t) || btf_type_is_datasec(t))
3113 return !btf->resolved_ids[type_id] &&
3114 !btf->resolved_sizes[type_id];
3115
3116 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
3117 btf_type_is_var(t)) {
3118 t = btf_type_id_resolve(btf, &type_id);
3119 return t &&
3120 !btf_type_is_modifier(t) &&
3121 !btf_type_is_var(t) &&
3122 !btf_type_is_datasec(t);
3123 }
3124
3125 if (btf_type_is_array(t)) {
3126 const struct btf_array *array = btf_type_array(t);
3127 const struct btf_type *elem_type;
3128 u32 elem_type_id = array->type;
3129 u32 elem_size;
3130
3131 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
3132 return elem_type && !btf_type_is_modifier(elem_type) &&
3133 (array->nelems * elem_size ==
3134 btf->resolved_sizes[type_id]);
3135 }
3136
3137 return false;
3138}
3139
3140static int btf_resolve(struct btf_verifier_env *env,
3141 const struct btf_type *t, u32 type_id)
3142{
3143 u32 save_log_type_id = env->log_type_id;
3144 const struct resolve_vertex *v;
3145 int err = 0;
3146
3147 env->resolve_mode = RESOLVE_TBD;
3148 env_stack_push(env, t, type_id);
3149 while (!err && (v = env_stack_peak(env))) {
3150 env->log_type_id = v->type_id;
3151 err = btf_type_ops(v->t)->resolve(env, v);
3152 }
3153
3154 env->log_type_id = type_id;
3155 if (err == -E2BIG) {
3156 btf_verifier_log_type(env, t,
3157 "Exceeded max resolving depth:%u",
3158 MAX_RESOLVE_DEPTH);
3159 } else if (err == -EEXIST) {
3160 btf_verifier_log_type(env, t, "Loop detected");
3161 }
3162
3163
3164 if (!err && !btf_resolve_valid(env, t, type_id)) {
3165 btf_verifier_log_type(env, t, "Invalid resolve state");
3166 err = -EINVAL;
3167 }
3168
3169 env->log_type_id = save_log_type_id;
3170 return err;
3171}
3172
3173static int btf_check_all_types(struct btf_verifier_env *env)
3174{
3175 struct btf *btf = env->btf;
3176 u32 type_id;
3177 int err;
3178
3179 err = env_resolve_init(env);
3180 if (err)
3181 return err;
3182
3183 env->phase++;
3184 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
3185 const struct btf_type *t = btf_type_by_id(btf, type_id);
3186
3187 env->log_type_id = type_id;
3188 if (btf_type_needs_resolve(t) &&
3189 !env_type_is_resolved(env, type_id)) {
3190 err = btf_resolve(env, t, type_id);
3191 if (err)
3192 return err;
3193 }
3194
3195 if (btf_type_is_func_proto(t)) {
3196 err = btf_func_proto_check(env, t);
3197 if (err)
3198 return err;
3199 }
3200
3201 if (btf_type_is_func(t)) {
3202 err = btf_func_check(env, t);
3203 if (err)
3204 return err;
3205 }
3206 }
3207
3208 return 0;
3209}
3210
3211static int btf_parse_type_sec(struct btf_verifier_env *env)
3212{
3213 const struct btf_header *hdr = &env->btf->hdr;
3214 int err;
3215
3216
3217 if (hdr->type_off & (sizeof(u32) - 1)) {
3218 btf_verifier_log(env, "Unaligned type_off");
3219 return -EINVAL;
3220 }
3221
3222 if (!hdr->type_len) {
3223 btf_verifier_log(env, "No type found");
3224 return -EINVAL;
3225 }
3226
3227 err = btf_check_all_metas(env);
3228 if (err)
3229 return err;
3230
3231 return btf_check_all_types(env);
3232}
3233
3234static int btf_parse_str_sec(struct btf_verifier_env *env)
3235{
3236 const struct btf_header *hdr;
3237 struct btf *btf = env->btf;
3238 const char *start, *end;
3239
3240 hdr = &btf->hdr;
3241 start = btf->nohdr_data + hdr->str_off;
3242 end = start + hdr->str_len;
3243
3244 if (end != btf->data + btf->data_size) {
3245 btf_verifier_log(env, "String section is not at the end");
3246 return -EINVAL;
3247 }
3248
3249 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
3250 start[0] || end[-1]) {
3251 btf_verifier_log(env, "Invalid string section");
3252 return -EINVAL;
3253 }
3254
3255 btf->strings = start;
3256
3257 return 0;
3258}
3259
3260static const size_t btf_sec_info_offset[] = {
3261 offsetof(struct btf_header, type_off),
3262 offsetof(struct btf_header, str_off),
3263};
3264
3265static int btf_sec_info_cmp(const void *a, const void *b)
3266{
3267 const struct btf_sec_info *x = a;
3268 const struct btf_sec_info *y = b;
3269
3270 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
3271}
3272
3273static int btf_check_sec_info(struct btf_verifier_env *env,
3274 u32 btf_data_size)
3275{
3276 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
3277 u32 total, expected_total, i;
3278 const struct btf_header *hdr;
3279 const struct btf *btf;
3280
3281 btf = env->btf;
3282 hdr = &btf->hdr;
3283
3284
3285 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
3286 secs[i] = *(struct btf_sec_info *)((void *)hdr +
3287 btf_sec_info_offset[i]);
3288
3289 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
3290 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
3291
3292
3293 total = 0;
3294 expected_total = btf_data_size - hdr->hdr_len;
3295 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
3296 if (expected_total < secs[i].off) {
3297 btf_verifier_log(env, "Invalid section offset");
3298 return -EINVAL;
3299 }
3300 if (total < secs[i].off) {
3301
3302 btf_verifier_log(env, "Unsupported section found");
3303 return -EINVAL;
3304 }
3305 if (total > secs[i].off) {
3306 btf_verifier_log(env, "Section overlap found");
3307 return -EINVAL;
3308 }
3309 if (expected_total - total < secs[i].len) {
3310 btf_verifier_log(env,
3311 "Total section length too long");
3312 return -EINVAL;
3313 }
3314 total += secs[i].len;
3315 }
3316
3317
3318 if (expected_total != total) {
3319 btf_verifier_log(env, "Unsupported section found");
3320 return -EINVAL;
3321 }
3322
3323 return 0;
3324}
3325
3326static int btf_parse_hdr(struct btf_verifier_env *env)
3327{
3328 u32 hdr_len, hdr_copy, btf_data_size;
3329 const struct btf_header *hdr;
3330 struct btf *btf;
3331 int err;
3332
3333 btf = env->btf;
3334 btf_data_size = btf->data_size;
3335
3336 if (btf_data_size <
3337 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
3338 btf_verifier_log(env, "hdr_len not found");
3339 return -EINVAL;
3340 }
3341
3342 hdr = btf->data;
3343 hdr_len = hdr->hdr_len;
3344 if (btf_data_size < hdr_len) {
3345 btf_verifier_log(env, "btf_header not found");
3346 return -EINVAL;
3347 }
3348
3349
3350 if (hdr_len > sizeof(btf->hdr)) {
3351 u8 *expected_zero = btf->data + sizeof(btf->hdr);
3352 u8 *end = btf->data + hdr_len;
3353
3354 for (; expected_zero < end; expected_zero++) {
3355 if (*expected_zero) {
3356 btf_verifier_log(env, "Unsupported btf_header");
3357 return -E2BIG;
3358 }
3359 }
3360 }
3361
3362 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
3363 memcpy(&btf->hdr, btf->data, hdr_copy);
3364
3365 hdr = &btf->hdr;
3366
3367 btf_verifier_log_hdr(env, btf_data_size);
3368
3369 if (hdr->magic != BTF_MAGIC) {
3370 btf_verifier_log(env, "Invalid magic");
3371 return -EINVAL;
3372 }
3373
3374 if (hdr->version != BTF_VERSION) {
3375 btf_verifier_log(env, "Unsupported version");
3376 return -ENOTSUPP;
3377 }
3378
3379 if (hdr->flags) {
3380 btf_verifier_log(env, "Unsupported flags");
3381 return -ENOTSUPP;
3382 }
3383
3384 if (btf_data_size == hdr->hdr_len) {
3385 btf_verifier_log(env, "No data");
3386 return -EINVAL;
3387 }
3388
3389 err = btf_check_sec_info(env, btf_data_size);
3390 if (err)
3391 return err;
3392
3393 return 0;
3394}
3395
3396static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
3397 u32 log_level, char __user *log_ubuf, u32 log_size)
3398{
3399 struct btf_verifier_env *env = NULL;
3400 struct bpf_verifier_log *log;
3401 struct btf *btf = NULL;
3402 u8 *data;
3403 int err;
3404
3405 if (btf_data_size > BTF_MAX_SIZE)
3406 return ERR_PTR(-E2BIG);
3407
3408 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3409 if (!env)
3410 return ERR_PTR(-ENOMEM);
3411
3412 log = &env->log;
3413 if (log_level || log_ubuf || log_size) {
3414
3415
3416
3417 log->level = log_level;
3418 log->ubuf = log_ubuf;
3419 log->len_total = log_size;
3420
3421
3422 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
3423 !log->level || !log->ubuf) {
3424 err = -EINVAL;
3425 goto errout;
3426 }
3427 }
3428
3429 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3430 if (!btf) {
3431 err = -ENOMEM;
3432 goto errout;
3433 }
3434 env->btf = btf;
3435
3436 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
3437 if (!data) {
3438 err = -ENOMEM;
3439 goto errout;
3440 }
3441
3442 btf->data = data;
3443 btf->data_size = btf_data_size;
3444
3445 if (copy_from_user(data, btf_data, btf_data_size)) {
3446 err = -EFAULT;
3447 goto errout;
3448 }
3449
3450 err = btf_parse_hdr(env);
3451 if (err)
3452 goto errout;
3453
3454 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3455
3456 err = btf_parse_str_sec(env);
3457 if (err)
3458 goto errout;
3459
3460 err = btf_parse_type_sec(env);
3461 if (err)
3462 goto errout;
3463
3464 if (log->level && bpf_verifier_log_full(log)) {
3465 err = -ENOSPC;
3466 goto errout;
3467 }
3468
3469 btf_verifier_env_free(env);
3470 refcount_set(&btf->refcnt, 1);
3471 return btf;
3472
3473errout:
3474 btf_verifier_env_free(env);
3475 if (btf)
3476 btf_free(btf);
3477 return ERR_PTR(err);
3478}
3479
3480extern char __weak _binary__btf_vmlinux_bin_start[];
3481extern char __weak _binary__btf_vmlinux_bin_end[];
3482extern struct btf *btf_vmlinux;
3483
3484#define BPF_MAP_TYPE(_id, _ops)
3485static union {
3486 struct bpf_ctx_convert {
3487#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3488 prog_ctx_type _id##_prog; \
3489 kern_ctx_type _id##_kern;
3490#include <linux/bpf_types.h>
3491#undef BPF_PROG_TYPE
3492 } *__t;
3493
3494 const struct btf_type *t;
3495} bpf_ctx_convert;
3496enum {
3497#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3498 __ctx_convert##_id,
3499#include <linux/bpf_types.h>
3500#undef BPF_PROG_TYPE
3501 __ctx_convert_unused,
3502};
3503static u8 bpf_ctx_convert_map[] = {
3504#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
3505 [_id] = __ctx_convert##_id,
3506#include <linux/bpf_types.h>
3507#undef BPF_PROG_TYPE
3508 0,
3509};
3510#undef BPF_MAP_TYPE
3511
3512static const struct btf_member *
3513btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
3514 const struct btf_type *t, enum bpf_prog_type prog_type,
3515 int arg)
3516{
3517 const struct btf_type *conv_struct;
3518 const struct btf_type *ctx_struct;
3519 const struct btf_member *ctx_type;
3520 const char *tname, *ctx_tname;
3521
3522 conv_struct = bpf_ctx_convert.t;
3523 if (!conv_struct) {
3524 bpf_log(log, "btf_vmlinux is malformed\n");
3525 return NULL;
3526 }
3527 t = btf_type_by_id(btf, t->type);
3528 while (btf_type_is_modifier(t))
3529 t = btf_type_by_id(btf, t->type);
3530 if (!btf_type_is_struct(t)) {
3531
3532
3533
3534
3535
3536 if (log->level & BPF_LOG_LEVEL)
3537 bpf_log(log, "arg#%d type is not a struct\n", arg);
3538 return NULL;
3539 }
3540 tname = btf_name_by_offset(btf, t->name_off);
3541 if (!tname) {
3542 bpf_log(log, "arg#%d struct doesn't have a name\n", arg);
3543 return NULL;
3544 }
3545
3546 ctx_type = btf_type_member(conv_struct) + bpf_ctx_convert_map[prog_type] * 2;
3547
3548
3549
3550 ctx_struct = btf_type_by_id(btf_vmlinux, ctx_type->type);
3551 if (!ctx_struct)
3552
3553 return NULL;
3554 ctx_tname = btf_name_by_offset(btf_vmlinux, ctx_struct->name_off);
3555 if (!ctx_tname) {
3556
3557 bpf_log(log, "Please fix kernel include/linux/bpf_types.h\n");
3558 return NULL;
3559 }
3560
3561
3562
3563
3564
3565
3566
3567 if (strcmp(ctx_tname, tname))
3568 return NULL;
3569 return ctx_type;
3570}
3571
3572static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
3573 struct btf *btf,
3574 const struct btf_type *t,
3575 enum bpf_prog_type prog_type,
3576 int arg)
3577{
3578 const struct btf_member *prog_ctx_type, *kern_ctx_type;
3579
3580 prog_ctx_type = btf_get_prog_ctx_type(log, btf, t, prog_type, arg);
3581 if (!prog_ctx_type)
3582 return -ENOENT;
3583 kern_ctx_type = prog_ctx_type + 1;
3584 return kern_ctx_type->type;
3585}
3586
3587struct btf *btf_parse_vmlinux(void)
3588{
3589 struct btf_verifier_env *env = NULL;
3590 struct bpf_verifier_log *log;
3591 struct btf *btf = NULL;
3592 int err, i;
3593
3594 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
3595 if (!env)
3596 return ERR_PTR(-ENOMEM);
3597
3598 log = &env->log;
3599 log->level = BPF_LOG_KERNEL;
3600
3601 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
3602 if (!btf) {
3603 err = -ENOMEM;
3604 goto errout;
3605 }
3606 env->btf = btf;
3607
3608 btf->data = _binary__btf_vmlinux_bin_start;
3609 btf->data_size = _binary__btf_vmlinux_bin_end -
3610 _binary__btf_vmlinux_bin_start;
3611
3612 err = btf_parse_hdr(env);
3613 if (err)
3614 goto errout;
3615
3616 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
3617
3618 err = btf_parse_str_sec(env);
3619 if (err)
3620 goto errout;
3621
3622 err = btf_check_all_metas(env);
3623 if (err)
3624 goto errout;
3625
3626
3627 for (i = 1; i <= btf->nr_types; i++) {
3628 const struct btf_type *t;
3629 const char *tname;
3630
3631 t = btf_type_by_id(btf, i);
3632 if (!__btf_type_is_struct(t))
3633 continue;
3634 tname = __btf_name_by_offset(btf, t->name_off);
3635 if (!strcmp(tname, "bpf_ctx_convert")) {
3636
3637 bpf_ctx_convert.t = t;
3638 break;
3639 }
3640 }
3641 if (i > btf->nr_types) {
3642 err = -ENOENT;
3643 goto errout;
3644 }
3645
3646 bpf_struct_ops_init(btf, log);
3647
3648 btf_verifier_env_free(env);
3649 refcount_set(&btf->refcnt, 1);
3650 return btf;
3651
3652errout:
3653 btf_verifier_env_free(env);
3654 if (btf) {
3655 kvfree(btf->types);
3656 kfree(btf);
3657 }
3658 return ERR_PTR(err);
3659}
3660
3661struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
3662{
3663 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3664
3665 if (tgt_prog) {
3666 return tgt_prog->aux->btf;
3667 } else {
3668 return btf_vmlinux;
3669 }
3670}
3671
3672static bool is_string_ptr(struct btf *btf, const struct btf_type *t)
3673{
3674
3675 t = btf_type_by_id(btf, t->type);
3676
3677
3678 if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
3679 t = btf_type_by_id(btf, t->type);
3680
3681
3682 return btf_type_is_int(t) && t->size == 1;
3683}
3684
3685bool btf_ctx_access(int off, int size, enum bpf_access_type type,
3686 const struct bpf_prog *prog,
3687 struct bpf_insn_access_aux *info)
3688{
3689 const struct btf_type *t = prog->aux->attach_func_proto;
3690 struct bpf_prog *tgt_prog = prog->aux->linked_prog;
3691 struct btf *btf = bpf_prog_get_target_btf(prog);
3692 const char *tname = prog->aux->attach_func_name;
3693 struct bpf_verifier_log *log = info->log;
3694 const struct btf_param *args;
3695 u32 nr_args, arg;
3696 int ret;
3697
3698 if (off % 8) {
3699 bpf_log(log, "func '%s' offset %d is not multiple of 8\n",
3700 tname, off);
3701 return false;
3702 }
3703 arg = off / 8;
3704 args = (const struct btf_param *)(t + 1);
3705
3706 nr_args = t ? btf_type_vlen(t) : 5;
3707 if (prog->aux->attach_btf_trace) {
3708
3709 args++;
3710 nr_args--;
3711 }
3712
3713 if (prog->expected_attach_type == BPF_TRACE_FEXIT &&
3714 arg == nr_args) {
3715 if (!t)
3716
3717 return true;
3718
3719 t = btf_type_by_id(btf, t->type);
3720 } else if (arg >= nr_args) {
3721 bpf_log(log, "func '%s' doesn't have %d-th argument\n",
3722 tname, arg + 1);
3723 return false;
3724 } else {
3725 if (!t)
3726
3727 return true;
3728 t = btf_type_by_id(btf, args[arg].type);
3729 }
3730
3731 while (btf_type_is_modifier(t))
3732 t = btf_type_by_id(btf, t->type);
3733 if (btf_type_is_int(t) || btf_type_is_enum(t))
3734
3735 return true;
3736 if (!btf_type_is_ptr(t)) {
3737 bpf_log(log,
3738 "func '%s' arg%d '%s' has type %s. Only pointer access is allowed\n",
3739 tname, arg,
3740 __btf_name_by_offset(btf, t->name_off),
3741 btf_kind_str[BTF_INFO_KIND(t->info)]);
3742 return false;
3743 }
3744 if (t->type == 0)
3745
3746
3747
3748
3749 return true;
3750
3751 if (is_string_ptr(btf, t))
3752 return true;
3753
3754
3755 info->reg_type = PTR_TO_BTF_ID;
3756
3757 if (tgt_prog) {
3758 ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg);
3759 if (ret > 0) {
3760 info->btf_id = ret;
3761 return true;
3762 } else {
3763 return false;
3764 }
3765 }
3766
3767 info->btf_id = t->type;
3768 t = btf_type_by_id(btf, t->type);
3769
3770 while (btf_type_is_modifier(t)) {
3771 info->btf_id = t->type;
3772 t = btf_type_by_id(btf, t->type);
3773 }
3774 if (!btf_type_is_struct(t)) {
3775 bpf_log(log,
3776 "func '%s' arg%d type %s is not a struct\n",
3777 tname, arg, btf_kind_str[BTF_INFO_KIND(t->info)]);
3778 return false;
3779 }
3780 bpf_log(log, "func '%s' arg%d has btf_id %d type %s '%s'\n",
3781 tname, arg, info->btf_id, btf_kind_str[BTF_INFO_KIND(t->info)],
3782 __btf_name_by_offset(btf, t->name_off));
3783 return true;
3784}
3785
3786int btf_struct_access(struct bpf_verifier_log *log,
3787 const struct btf_type *t, int off, int size,
3788 enum bpf_access_type atype,
3789 u32 *next_btf_id)
3790{
3791 u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
3792 const struct btf_type *mtype, *elem_type = NULL;
3793 const struct btf_member *member;
3794 const char *tname, *mname;
3795
3796again:
3797 tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3798 if (!btf_type_is_struct(t)) {
3799 bpf_log(log, "Type '%s' is not a struct\n", tname);
3800 return -EINVAL;
3801 }
3802
3803 if (off + size > t->size) {
3804 bpf_log(log, "access beyond struct %s at off %u size %u\n",
3805 tname, off, size);
3806 return -EACCES;
3807 }
3808
3809 for_each_member(i, t, member) {
3810
3811 moff = btf_member_bit_offset(t, member) / 8;
3812 if (off + size <= moff)
3813
3814 break;
3815
3816 if (btf_member_bitfield_size(t, member)) {
3817 u32 end_bit = btf_member_bit_offset(t, member) +
3818 btf_member_bitfield_size(t, member);
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828 if (off <= moff &&
3829 BITS_ROUNDUP_BYTES(end_bit) <= off + size)
3830 return SCALAR_VALUE;
3831
3832
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844 continue;
3845 }
3846
3847
3848 if (off < moff)
3849 break;
3850
3851
3852 mtype = btf_type_by_id(btf_vmlinux, member->type);
3853 mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
3854
3855 mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
3856 &elem_type, &total_nelems);
3857 if (IS_ERR(mtype)) {
3858 bpf_log(log, "field %s doesn't have size\n", mname);
3859 return -EFAULT;
3860 }
3861
3862 mtrue_end = moff + msize;
3863 if (off >= mtrue_end)
3864
3865 continue;
3866
3867 if (btf_type_is_array(mtype)) {
3868 u32 elem_idx;
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911 if (moff == mtrue_end)
3912 continue;
3913
3914 msize /= total_nelems;
3915 elem_idx = (off - moff) / msize;
3916 moff += elem_idx * msize;
3917 mtype = elem_type;
3918 }
3919
3920
3921
3922
3923 if (btf_type_is_struct(mtype)) {
3924
3925 t = mtype;
3926
3927
3928 off -= moff;
3929 goto again;
3930 }
3931
3932 if (btf_type_is_ptr(mtype)) {
3933 const struct btf_type *stype;
3934 u32 id;
3935
3936 if (msize != size || off != moff) {
3937 bpf_log(log,
3938 "cannot access ptr member %s with moff %u in struct %s with off %u size %u\n",
3939 mname, moff, tname, off, size);
3940 return -EACCES;
3941 }
3942
3943 stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
3944 if (btf_type_is_struct(stype)) {
3945 *next_btf_id = id;
3946 return PTR_TO_BTF_ID;
3947 }
3948 }
3949
3950
3951
3952
3953
3954
3955
3956 if (off + size > mtrue_end) {
3957 bpf_log(log,
3958 "access beyond the end of member %s (mend:%u) in struct %s with off %u size %u\n",
3959 mname, mtrue_end, tname, off, size);
3960 return -EACCES;
3961 }
3962
3963 return SCALAR_VALUE;
3964 }
3965 bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
3966 return -EINVAL;
3967}
3968
3969static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
3970 int arg)
3971{
3972 char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
3973 const struct btf_param *args;
3974 const struct btf_type *t;
3975 const char *tname, *sym;
3976 u32 btf_id, i;
3977
3978 if (IS_ERR(btf_vmlinux)) {
3979 bpf_log(log, "btf_vmlinux is malformed\n");
3980 return -EINVAL;
3981 }
3982
3983 sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
3984 if (!sym) {
3985 bpf_log(log, "kernel doesn't have kallsyms\n");
3986 return -EFAULT;
3987 }
3988
3989 for (i = 1; i <= btf_vmlinux->nr_types; i++) {
3990 t = btf_type_by_id(btf_vmlinux, i);
3991 if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
3992 continue;
3993 tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
3994 if (!strcmp(tname, fnname))
3995 break;
3996 }
3997 if (i > btf_vmlinux->nr_types) {
3998 bpf_log(log, "helper %s type is not found\n", fnname);
3999 return -ENOENT;
4000 }
4001
4002 t = btf_type_by_id(btf_vmlinux, t->type);
4003 if (!btf_type_is_ptr(t))
4004 return -EFAULT;
4005 t = btf_type_by_id(btf_vmlinux, t->type);
4006 if (!btf_type_is_func_proto(t))
4007 return -EFAULT;
4008
4009 args = (const struct btf_param *)(t + 1);
4010 if (arg >= btf_type_vlen(t)) {
4011 bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
4012 fnname, arg);
4013 return -EINVAL;
4014 }
4015
4016 t = btf_type_by_id(btf_vmlinux, args[arg].type);
4017 if (!btf_type_is_ptr(t) || !t->type) {
4018
4019 bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
4020 return -EFAULT;
4021 }
4022 btf_id = t->type;
4023 t = btf_type_by_id(btf_vmlinux, t->type);
4024
4025 while (btf_type_is_modifier(t)) {
4026 btf_id = t->type;
4027 t = btf_type_by_id(btf_vmlinux, t->type);
4028 }
4029 if (!btf_type_is_struct(t)) {
4030 bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
4031 return -EFAULT;
4032 }
4033 bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
4034 arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
4035 return btf_id;
4036}
4037
4038int btf_resolve_helper_id(struct bpf_verifier_log *log,
4039 const struct bpf_func_proto *fn, int arg)
4040{
4041 int *btf_id = &fn->btf_id[arg];
4042 int ret;
4043
4044 if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID)
4045 return -EINVAL;
4046
4047 ret = READ_ONCE(*btf_id);
4048 if (ret)
4049 return ret;
4050
4051 ret = __btf_resolve_helper_id(log, fn->func, arg);
4052 if (!ret) {
4053
4054 bpf_log(log, "BTF resolution bug\n");
4055 return -EFAULT;
4056 }
4057 WRITE_ONCE(*btf_id, ret);
4058 return ret;
4059}
4060
4061static int __get_type_size(struct btf *btf, u32 btf_id,
4062 const struct btf_type **bad_type)
4063{
4064 const struct btf_type *t;
4065
4066 if (!btf_id)
4067
4068 return 0;
4069 t = btf_type_by_id(btf, btf_id);
4070 while (t && btf_type_is_modifier(t))
4071 t = btf_type_by_id(btf, t->type);
4072 if (!t) {
4073 *bad_type = btf->types[0];
4074 return -EINVAL;
4075 }
4076 if (btf_type_is_ptr(t))
4077
4078 return sizeof(void *);
4079 if (btf_type_is_int(t) || btf_type_is_enum(t))
4080 return t->size;
4081 *bad_type = t;
4082 return -EINVAL;
4083}
4084
4085int btf_distill_func_proto(struct bpf_verifier_log *log,
4086 struct btf *btf,
4087 const struct btf_type *func,
4088 const char *tname,
4089 struct btf_func_model *m)
4090{
4091 const struct btf_param *args;
4092 const struct btf_type *t;
4093 u32 i, nargs;
4094 int ret;
4095
4096 if (!func) {
4097
4098
4099
4100 for (i = 0; i < 5; i++)
4101 m->arg_size[i] = 8;
4102 m->ret_size = 8;
4103 m->nr_args = 5;
4104 return 0;
4105 }
4106 args = (const struct btf_param *)(func + 1);
4107 nargs = btf_type_vlen(func);
4108 if (nargs >= MAX_BPF_FUNC_ARGS) {
4109 bpf_log(log,
4110 "The function %s has %d arguments. Too many.\n",
4111 tname, nargs);
4112 return -EINVAL;
4113 }
4114 ret = __get_type_size(btf, func->type, &t);
4115 if (ret < 0) {
4116 bpf_log(log,
4117 "The function %s return type %s is unsupported.\n",
4118 tname, btf_kind_str[BTF_INFO_KIND(t->info)]);
4119 return -EINVAL;
4120 }
4121 m->ret_size = ret;
4122
4123 for (i = 0; i < nargs; i++) {
4124 ret = __get_type_size(btf, args[i].type, &t);
4125 if (ret < 0) {
4126 bpf_log(log,
4127 "The function %s arg%d type %s is unsupported.\n",
4128 tname, i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4129 return -EINVAL;
4130 }
4131 m->arg_size[i] = ret;
4132 }
4133 m->nr_args = nargs;
4134 return 0;
4135}
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145static int btf_check_func_type_match(struct bpf_verifier_log *log,
4146 struct btf *btf1, const struct btf_type *t1,
4147 struct btf *btf2, const struct btf_type *t2)
4148{
4149 const struct btf_param *args1, *args2;
4150 const char *fn1, *fn2, *s1, *s2;
4151 u32 nargs1, nargs2, i;
4152
4153 fn1 = btf_name_by_offset(btf1, t1->name_off);
4154 fn2 = btf_name_by_offset(btf2, t2->name_off);
4155
4156 if (btf_func_linkage(t1) != BTF_FUNC_GLOBAL) {
4157 bpf_log(log, "%s() is not a global function\n", fn1);
4158 return -EINVAL;
4159 }
4160 if (btf_func_linkage(t2) != BTF_FUNC_GLOBAL) {
4161 bpf_log(log, "%s() is not a global function\n", fn2);
4162 return -EINVAL;
4163 }
4164
4165 t1 = btf_type_by_id(btf1, t1->type);
4166 if (!t1 || !btf_type_is_func_proto(t1))
4167 return -EFAULT;
4168 t2 = btf_type_by_id(btf2, t2->type);
4169 if (!t2 || !btf_type_is_func_proto(t2))
4170 return -EFAULT;
4171
4172 args1 = (const struct btf_param *)(t1 + 1);
4173 nargs1 = btf_type_vlen(t1);
4174 args2 = (const struct btf_param *)(t2 + 1);
4175 nargs2 = btf_type_vlen(t2);
4176
4177 if (nargs1 != nargs2) {
4178 bpf_log(log, "%s() has %d args while %s() has %d args\n",
4179 fn1, nargs1, fn2, nargs2);
4180 return -EINVAL;
4181 }
4182
4183 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
4184 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
4185 if (t1->info != t2->info) {
4186 bpf_log(log,
4187 "Return type %s of %s() doesn't match type %s of %s()\n",
4188 btf_type_str(t1), fn1,
4189 btf_type_str(t2), fn2);
4190 return -EINVAL;
4191 }
4192
4193 for (i = 0; i < nargs1; i++) {
4194 t1 = btf_type_skip_modifiers(btf1, args1[i].type, NULL);
4195 t2 = btf_type_skip_modifiers(btf2, args2[i].type, NULL);
4196
4197 if (t1->info != t2->info) {
4198 bpf_log(log, "arg%d in %s() is %s while %s() has %s\n",
4199 i, fn1, btf_type_str(t1),
4200 fn2, btf_type_str(t2));
4201 return -EINVAL;
4202 }
4203 if (btf_type_has_size(t1) && t1->size != t2->size) {
4204 bpf_log(log,
4205 "arg%d in %s() has size %d while %s() has %d\n",
4206 i, fn1, t1->size,
4207 fn2, t2->size);
4208 return -EINVAL;
4209 }
4210
4211
4212
4213
4214
4215 if (btf_type_is_int(t1) || btf_type_is_enum(t1))
4216 continue;
4217 if (!btf_type_is_ptr(t1)) {
4218 bpf_log(log,
4219 "arg%d in %s() has unrecognized type\n",
4220 i, fn1);
4221 return -EINVAL;
4222 }
4223 t1 = btf_type_skip_modifiers(btf1, t1->type, NULL);
4224 t2 = btf_type_skip_modifiers(btf2, t2->type, NULL);
4225 if (!btf_type_is_struct(t1)) {
4226 bpf_log(log,
4227 "arg%d in %s() is not a pointer to context\n",
4228 i, fn1);
4229 return -EINVAL;
4230 }
4231 if (!btf_type_is_struct(t2)) {
4232 bpf_log(log,
4233 "arg%d in %s() is not a pointer to context\n",
4234 i, fn2);
4235 return -EINVAL;
4236 }
4237
4238
4239
4240
4241
4242
4243 s1 = btf_name_by_offset(btf1, t1->name_off);
4244 s2 = btf_name_by_offset(btf2, t2->name_off);
4245 if (strcmp(s1, s2)) {
4246 bpf_log(log,
4247 "arg%d %s(struct %s *) doesn't match %s(struct %s *)\n",
4248 i, fn1, s1, fn2, s2);
4249 return -EINVAL;
4250 }
4251 }
4252 return 0;
4253}
4254
4255
4256int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
4257 struct btf *btf2, const struct btf_type *t2)
4258{
4259 struct btf *btf1 = prog->aux->btf;
4260 const struct btf_type *t1;
4261 u32 btf_id = 0;
4262
4263 if (!prog->aux->func_info) {
4264 bpf_log(&env->log, "Program extension requires BTF\n");
4265 return -EINVAL;
4266 }
4267
4268 btf_id = prog->aux->func_info[0].type_id;
4269 if (!btf_id)
4270 return -EFAULT;
4271
4272 t1 = btf_type_by_id(btf1, btf_id);
4273 if (!t1 || !btf_type_is_func(t1))
4274 return -EFAULT;
4275
4276 return btf_check_func_type_match(&env->log, btf1, t1, btf2, t2);
4277}
4278
4279
4280
4281
4282
4283
4284
4285
4286int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
4287 struct bpf_reg_state *reg)
4288{
4289 struct bpf_verifier_log *log = &env->log;
4290 struct bpf_prog *prog = env->prog;
4291 struct btf *btf = prog->aux->btf;
4292 const struct btf_param *args;
4293 const struct btf_type *t;
4294 u32 i, nargs, btf_id;
4295 const char *tname;
4296
4297 if (!prog->aux->func_info)
4298 return -EINVAL;
4299
4300 btf_id = prog->aux->func_info[subprog].type_id;
4301 if (!btf_id)
4302 return -EFAULT;
4303
4304 if (prog->aux->func_info_aux[subprog].unreliable)
4305 return -EINVAL;
4306
4307 t = btf_type_by_id(btf, btf_id);
4308 if (!t || !btf_type_is_func(t)) {
4309
4310
4311
4312 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
4313 subprog);
4314 return -EFAULT;
4315 }
4316 tname = btf_name_by_offset(btf, t->name_off);
4317
4318 t = btf_type_by_id(btf, t->type);
4319 if (!t || !btf_type_is_func_proto(t)) {
4320 bpf_log(log, "Invalid BTF of func %s\n", tname);
4321 return -EFAULT;
4322 }
4323 args = (const struct btf_param *)(t + 1);
4324 nargs = btf_type_vlen(t);
4325 if (nargs > 5) {
4326 bpf_log(log, "Function %s has %d > 5 args\n", tname, nargs);
4327 goto out;
4328 }
4329
4330
4331
4332 for (i = 0; i < nargs; i++) {
4333 t = btf_type_by_id(btf, args[i].type);
4334 while (btf_type_is_modifier(t))
4335 t = btf_type_by_id(btf, t->type);
4336 if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4337 if (reg[i + 1].type == SCALAR_VALUE)
4338 continue;
4339 bpf_log(log, "R%d is not a scalar\n", i + 1);
4340 goto out;
4341 }
4342 if (btf_type_is_ptr(t)) {
4343 if (reg[i + 1].type == SCALAR_VALUE) {
4344 bpf_log(log, "R%d is not a pointer\n", i + 1);
4345 goto out;
4346 }
4347
4348
4349
4350 if (btf_get_prog_ctx_type(log, btf, t, prog->type, i)) {
4351 if (reg[i + 1].type != PTR_TO_CTX) {
4352 bpf_log(log,
4353 "arg#%d expected pointer to ctx, but got %s\n",
4354 i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4355 goto out;
4356 }
4357 if (check_ctx_reg(env, ®[i + 1], i + 1))
4358 goto out;
4359 continue;
4360 }
4361 }
4362 bpf_log(log, "Unrecognized arg#%d type %s\n",
4363 i, btf_kind_str[BTF_INFO_KIND(t->info)]);
4364 goto out;
4365 }
4366 return 0;
4367out:
4368
4369
4370
4371
4372 prog->aux->func_info_aux[subprog].unreliable = true;
4373 return -EINVAL;
4374}
4375
4376
4377
4378
4379
4380
4381
4382
4383int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
4384 struct bpf_reg_state *reg)
4385{
4386 struct bpf_verifier_log *log = &env->log;
4387 struct bpf_prog *prog = env->prog;
4388 enum bpf_prog_type prog_type = prog->type;
4389 struct btf *btf = prog->aux->btf;
4390 const struct btf_param *args;
4391 const struct btf_type *t;
4392 u32 i, nargs, btf_id;
4393 const char *tname;
4394
4395 if (!prog->aux->func_info ||
4396 prog->aux->func_info_aux[subprog].linkage != BTF_FUNC_GLOBAL) {
4397 bpf_log(log, "Verifier bug\n");
4398 return -EFAULT;
4399 }
4400
4401 btf_id = prog->aux->func_info[subprog].type_id;
4402 if (!btf_id) {
4403 bpf_log(log, "Global functions need valid BTF\n");
4404 return -EFAULT;
4405 }
4406
4407 t = btf_type_by_id(btf, btf_id);
4408 if (!t || !btf_type_is_func(t)) {
4409
4410
4411
4412 bpf_log(log, "BTF of func#%d doesn't point to KIND_FUNC\n",
4413 subprog);
4414 return -EFAULT;
4415 }
4416 tname = btf_name_by_offset(btf, t->name_off);
4417
4418 if (log->level & BPF_LOG_LEVEL)
4419 bpf_log(log, "Validating %s() func#%d...\n",
4420 tname, subprog);
4421
4422 if (prog->aux->func_info_aux[subprog].unreliable) {
4423 bpf_log(log, "Verifier bug in function %s()\n", tname);
4424 return -EFAULT;
4425 }
4426 if (prog_type == BPF_PROG_TYPE_EXT)
4427 prog_type = prog->aux->linked_prog->type;
4428
4429 t = btf_type_by_id(btf, t->type);
4430 if (!t || !btf_type_is_func_proto(t)) {
4431 bpf_log(log, "Invalid type of function %s()\n", tname);
4432 return -EFAULT;
4433 }
4434 args = (const struct btf_param *)(t + 1);
4435 nargs = btf_type_vlen(t);
4436 if (nargs > 5) {
4437 bpf_log(log, "Global function %s() with %d > 5 args. Buggy compiler.\n",
4438 tname, nargs);
4439 return -EINVAL;
4440 }
4441
4442 t = btf_type_by_id(btf, t->type);
4443 while (btf_type_is_modifier(t))
4444 t = btf_type_by_id(btf, t->type);
4445 if (!btf_type_is_int(t) && !btf_type_is_enum(t)) {
4446 bpf_log(log,
4447 "Global function %s() doesn't return scalar. Only those are supported.\n",
4448 tname);
4449 return -EINVAL;
4450 }
4451
4452
4453
4454 for (i = 0; i < nargs; i++) {
4455 t = btf_type_by_id(btf, args[i].type);
4456 while (btf_type_is_modifier(t))
4457 t = btf_type_by_id(btf, t->type);
4458 if (btf_type_is_int(t) || btf_type_is_enum(t)) {
4459 reg[i + 1].type = SCALAR_VALUE;
4460 continue;
4461 }
4462 if (btf_type_is_ptr(t) &&
4463 btf_get_prog_ctx_type(log, btf, t, prog_type, i)) {
4464 reg[i + 1].type = PTR_TO_CTX;
4465 continue;
4466 }
4467 bpf_log(log, "Arg#%d type %s in %s() is not supported yet.\n",
4468 i, btf_kind_str[BTF_INFO_KIND(t->info)], tname);
4469 return -EINVAL;
4470 }
4471 return 0;
4472}
4473
4474void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
4475 struct seq_file *m)
4476{
4477 const struct btf_type *t = btf_type_by_id(btf, type_id);
4478
4479 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
4480}
4481
4482#ifdef CONFIG_PROC_FS
4483static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp)
4484{
4485 const struct btf *btf = filp->private_data;
4486
4487 seq_printf(m, "btf_id:\t%u\n", btf->id);
4488}
4489#endif
4490
4491static int btf_release(struct inode *inode, struct file *filp)
4492{
4493 btf_put(filp->private_data);
4494 return 0;
4495}
4496
4497const struct file_operations btf_fops = {
4498#ifdef CONFIG_PROC_FS
4499 .show_fdinfo = bpf_btf_show_fdinfo,
4500#endif
4501 .release = btf_release,
4502};
4503
4504static int __btf_new_fd(struct btf *btf)
4505{
4506 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
4507}
4508
4509int btf_new_fd(const union bpf_attr *attr)
4510{
4511 struct btf *btf;
4512 int ret;
4513
4514 btf = btf_parse(u64_to_user_ptr(attr->btf),
4515 attr->btf_size, attr->btf_log_level,
4516 u64_to_user_ptr(attr->btf_log_buf),
4517 attr->btf_log_size);
4518 if (IS_ERR(btf))
4519 return PTR_ERR(btf);
4520
4521 ret = btf_alloc_id(btf);
4522 if (ret) {
4523 btf_free(btf);
4524 return ret;
4525 }
4526
4527
4528
4529
4530
4531
4532
4533 ret = __btf_new_fd(btf);
4534 if (ret < 0)
4535 btf_put(btf);
4536
4537 return ret;
4538}
4539
4540struct btf *btf_get_by_fd(int fd)
4541{
4542 struct btf *btf;
4543 struct fd f;
4544
4545 f = fdget(fd);
4546
4547 if (!f.file)
4548 return ERR_PTR(-EBADF);
4549
4550 if (f.file->f_op != &btf_fops) {
4551 fdput(f);
4552 return ERR_PTR(-EINVAL);
4553 }
4554
4555 btf = f.file->private_data;
4556 refcount_inc(&btf->refcnt);
4557 fdput(f);
4558
4559 return btf;
4560}
4561
4562int btf_get_info_by_fd(const struct btf *btf,
4563 const union bpf_attr *attr,
4564 union bpf_attr __user *uattr)
4565{
4566 struct bpf_btf_info __user *uinfo;
4567 struct bpf_btf_info info;
4568 u32 info_copy, btf_copy;
4569 void __user *ubtf;
4570 u32 uinfo_len;
4571
4572 uinfo = u64_to_user_ptr(attr->info.info);
4573 uinfo_len = attr->info.info_len;
4574
4575 info_copy = min_t(u32, uinfo_len, sizeof(info));
4576 memset(&info, 0, sizeof(info));
4577 if (copy_from_user(&info, uinfo, info_copy))
4578 return -EFAULT;
4579
4580 info.id = btf->id;
4581 ubtf = u64_to_user_ptr(info.btf);
4582 btf_copy = min_t(u32, btf->data_size, info.btf_size);
4583 if (copy_to_user(ubtf, btf->data, btf_copy))
4584 return -EFAULT;
4585 info.btf_size = btf->data_size;
4586
4587 if (copy_to_user(uinfo, &info, info_copy) ||
4588 put_user(info_copy, &uattr->info.info_len))
4589 return -EFAULT;
4590
4591 return 0;
4592}
4593
4594int btf_get_fd_by_id(u32 id)
4595{
4596 struct btf *btf;
4597 int fd;
4598
4599 rcu_read_lock();
4600 btf = idr_find(&btf_idr, id);
4601 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
4602 btf = ERR_PTR(-ENOENT);
4603 rcu_read_unlock();
4604
4605 if (IS_ERR(btf))
4606 return PTR_ERR(btf);
4607
4608 fd = __btf_new_fd(btf);
4609 if (fd < 0)
4610 btf_put(btf);
4611
4612 return fd;
4613}
4614
4615u32 btf_id(const struct btf *btf)
4616{
4617 return btf->id;
4618}
4619