1
2
3
4#include <uapi/linux/btf.h>
5#include <uapi/linux/types.h>
6#include <linux/seq_file.h>
7#include <linux/compiler.h>
8#include <linux/ctype.h>
9#include <linux/errno.h>
10#include <linux/slab.h>
11#include <linux/anon_inodes.h>
12#include <linux/file.h>
13#include <linux/uaccess.h>
14#include <linux/kernel.h>
15#include <linux/idr.h>
16#include <linux/sort.h>
17#include <linux/bpf_verifier.h>
18#include <linux/btf.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2)
161#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1)
162#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK)
163#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3)
164#define BITS_ROUNDUP_BYTES(bits) \
165 (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits))
166
167#define BTF_INFO_MASK 0x8f00ffff
168#define BTF_INT_MASK 0x0fffffff
169#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE)
170#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET)
171
172
173
174
175
176#define BTF_MAX_SIZE (16 * 1024 * 1024)
177
178#define for_each_member(i, struct_type, member) \
179 for (i = 0, member = btf_type_member(struct_type); \
180 i < btf_type_vlen(struct_type); \
181 i++, member++)
182
183#define for_each_member_from(i, from, struct_type, member) \
184 for (i = from, member = btf_type_member(struct_type) + from; \
185 i < btf_type_vlen(struct_type); \
186 i++, member++)
187
188static DEFINE_IDR(btf_idr);
189static DEFINE_SPINLOCK(btf_idr_lock);
190
191struct btf {
192 void *data;
193 struct btf_type **types;
194 u32 *resolved_ids;
195 u32 *resolved_sizes;
196 const char *strings;
197 void *nohdr_data;
198 struct btf_header hdr;
199 u32 nr_types;
200 u32 types_size;
201 u32 data_size;
202 refcount_t refcnt;
203 u32 id;
204 struct rcu_head rcu;
205};
206
207enum verifier_phase {
208 CHECK_META,
209 CHECK_TYPE,
210};
211
212struct resolve_vertex {
213 const struct btf_type *t;
214 u32 type_id;
215 u16 next_member;
216};
217
218enum visit_state {
219 NOT_VISITED,
220 VISITED,
221 RESOLVED,
222};
223
224enum resolve_mode {
225 RESOLVE_TBD,
226 RESOLVE_PTR,
227 RESOLVE_STRUCT_OR_ARRAY,
228
229
230};
231
232#define MAX_RESOLVE_DEPTH 32
233
234struct btf_sec_info {
235 u32 off;
236 u32 len;
237};
238
239struct btf_verifier_env {
240 struct btf *btf;
241 u8 *visit_states;
242 struct resolve_vertex stack[MAX_RESOLVE_DEPTH];
243 struct bpf_verifier_log log;
244 u32 log_type_id;
245 u32 top_stack;
246 enum verifier_phase phase;
247 enum resolve_mode resolve_mode;
248};
249
250static const char * const btf_kind_str[NR_BTF_KINDS] = {
251 [BTF_KIND_UNKN] = "UNKNOWN",
252 [BTF_KIND_INT] = "INT",
253 [BTF_KIND_PTR] = "PTR",
254 [BTF_KIND_ARRAY] = "ARRAY",
255 [BTF_KIND_STRUCT] = "STRUCT",
256 [BTF_KIND_UNION] = "UNION",
257 [BTF_KIND_ENUM] = "ENUM",
258 [BTF_KIND_FWD] = "FWD",
259 [BTF_KIND_TYPEDEF] = "TYPEDEF",
260 [BTF_KIND_VOLATILE] = "VOLATILE",
261 [BTF_KIND_CONST] = "CONST",
262 [BTF_KIND_RESTRICT] = "RESTRICT",
263 [BTF_KIND_FUNC] = "FUNC",
264 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
265};
266
267struct btf_kind_operations {
268 s32 (*check_meta)(struct btf_verifier_env *env,
269 const struct btf_type *t,
270 u32 meta_left);
271 int (*resolve)(struct btf_verifier_env *env,
272 const struct resolve_vertex *v);
273 int (*check_member)(struct btf_verifier_env *env,
274 const struct btf_type *struct_type,
275 const struct btf_member *member,
276 const struct btf_type *member_type);
277 int (*check_kflag_member)(struct btf_verifier_env *env,
278 const struct btf_type *struct_type,
279 const struct btf_member *member,
280 const struct btf_type *member_type);
281 void (*log_details)(struct btf_verifier_env *env,
282 const struct btf_type *t);
283 void (*seq_show)(const struct btf *btf, const struct btf_type *t,
284 u32 type_id, void *data, u8 bits_offsets,
285 struct seq_file *m);
286};
287
288static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
289static struct btf_type btf_void;
290
291static int btf_resolve(struct btf_verifier_env *env,
292 const struct btf_type *t, u32 type_id);
293
294static bool btf_type_is_modifier(const struct btf_type *t)
295{
296
297
298
299
300
301
302
303
304
305
306 switch (BTF_INFO_KIND(t->info)) {
307 case BTF_KIND_TYPEDEF:
308 case BTF_KIND_VOLATILE:
309 case BTF_KIND_CONST:
310 case BTF_KIND_RESTRICT:
311 return true;
312 }
313
314 return false;
315}
316
317static bool btf_type_is_void(const struct btf_type *t)
318{
319 return t == &btf_void;
320}
321
322static bool btf_type_is_fwd(const struct btf_type *t)
323{
324 return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
325}
326
327static bool btf_type_is_func(const struct btf_type *t)
328{
329 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
330}
331
332static bool btf_type_is_func_proto(const struct btf_type *t)
333{
334 return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
335}
336
337static bool btf_type_nosize(const struct btf_type *t)
338{
339 return btf_type_is_void(t) || btf_type_is_fwd(t) ||
340 btf_type_is_func(t) || btf_type_is_func_proto(t);
341}
342
343static bool btf_type_nosize_or_null(const struct btf_type *t)
344{
345 return !t || btf_type_nosize(t);
346}
347
348
349
350
351static bool btf_type_is_struct(const struct btf_type *t)
352{
353 u8 kind = BTF_INFO_KIND(t->info);
354
355 return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
356}
357
358static bool __btf_type_is_struct(const struct btf_type *t)
359{
360 return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
361}
362
363static bool btf_type_is_array(const struct btf_type *t)
364{
365 return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
366}
367
368static bool btf_type_is_ptr(const struct btf_type *t)
369{
370 return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
371}
372
373static bool btf_type_is_int(const struct btf_type *t)
374{
375 return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
376}
377
378
379
380
381
382
383
384
385
386
387
388
389
390static bool btf_type_needs_resolve(const struct btf_type *t)
391{
392 return btf_type_is_modifier(t) ||
393 btf_type_is_ptr(t) ||
394 btf_type_is_struct(t) ||
395 btf_type_is_array(t);
396}
397
398
399static bool btf_type_has_size(const struct btf_type *t)
400{
401 switch (BTF_INFO_KIND(t->info)) {
402 case BTF_KIND_INT:
403 case BTF_KIND_STRUCT:
404 case BTF_KIND_UNION:
405 case BTF_KIND_ENUM:
406 return true;
407 }
408
409 return false;
410}
411
412static const char *btf_int_encoding_str(u8 encoding)
413{
414 if (encoding == 0)
415 return "(none)";
416 else if (encoding == BTF_INT_SIGNED)
417 return "SIGNED";
418 else if (encoding == BTF_INT_CHAR)
419 return "CHAR";
420 else if (encoding == BTF_INT_BOOL)
421 return "BOOL";
422 else
423 return "UNKN";
424}
425
426static u16 btf_type_vlen(const struct btf_type *t)
427{
428 return BTF_INFO_VLEN(t->info);
429}
430
431static bool btf_type_kflag(const struct btf_type *t)
432{
433 return BTF_INFO_KFLAG(t->info);
434}
435
436static u32 btf_member_bit_offset(const struct btf_type *struct_type,
437 const struct btf_member *member)
438{
439 return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
440 : member->offset;
441}
442
443static u32 btf_member_bitfield_size(const struct btf_type *struct_type,
444 const struct btf_member *member)
445{
446 return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
447 : 0;
448}
449
450static u32 btf_type_int(const struct btf_type *t)
451{
452 return *(u32 *)(t + 1);
453}
454
455static const struct btf_array *btf_type_array(const struct btf_type *t)
456{
457 return (const struct btf_array *)(t + 1);
458}
459
460static const struct btf_member *btf_type_member(const struct btf_type *t)
461{
462 return (const struct btf_member *)(t + 1);
463}
464
465static const struct btf_enum *btf_type_enum(const struct btf_type *t)
466{
467 return (const struct btf_enum *)(t + 1);
468}
469
470static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
471{
472 return kind_ops[BTF_INFO_KIND(t->info)];
473}
474
475static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
476{
477 return BTF_STR_OFFSET_VALID(offset) &&
478 offset < btf->hdr.str_len;
479}
480
481
482
483
484static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
485{
486
487 const char *src = &btf->strings[offset];
488 const char *src_limit;
489
490 if (!isalpha(*src) && *src != '_')
491 return false;
492
493
494 src_limit = src + KSYM_NAME_LEN;
495 src++;
496 while (*src && src < src_limit) {
497 if (!isalnum(*src) && *src != '_')
498 return false;
499 src++;
500 }
501
502 return !*src;
503}
504
505static const char *__btf_name_by_offset(const struct btf *btf, u32 offset)
506{
507 if (!offset)
508 return "(anon)";
509 else if (offset < btf->hdr.str_len)
510 return &btf->strings[offset];
511 else
512 return "(invalid-name-offset)";
513}
514
515const char *btf_name_by_offset(const struct btf *btf, u32 offset)
516{
517 if (offset < btf->hdr.str_len)
518 return &btf->strings[offset];
519
520 return NULL;
521}
522
523const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
524{
525 if (type_id > btf->nr_types)
526 return NULL;
527
528 return btf->types[type_id];
529}
530
531
532
533
534
535static bool btf_type_int_is_regular(const struct btf_type *t)
536{
537 u8 nr_bits, nr_bytes;
538 u32 int_data;
539
540 int_data = btf_type_int(t);
541 nr_bits = BTF_INT_BITS(int_data);
542 nr_bytes = BITS_ROUNDUP_BYTES(nr_bits);
543 if (BITS_PER_BYTE_MASKED(nr_bits) ||
544 BTF_INT_OFFSET(int_data) ||
545 (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) &&
546 nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) &&
547 nr_bytes != (2 * sizeof(u64)))) {
548 return false;
549 }
550
551 return true;
552}
553
554
555
556
557
558bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
559 const struct btf_member *m,
560 u32 expected_offset, u32 expected_size)
561{
562 const struct btf_type *t;
563 u32 id, int_data;
564 u8 nr_bits;
565
566 id = m->type;
567 t = btf_type_id_size(btf, &id, NULL);
568 if (!t || !btf_type_is_int(t))
569 return false;
570
571 int_data = btf_type_int(t);
572 nr_bits = BTF_INT_BITS(int_data);
573 if (btf_type_kflag(s)) {
574 u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset);
575 u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset);
576
577
578
579
580 return !bitfield_size &&
581 BITS_ROUNDUP_BYTES(bit_offset) == expected_offset &&
582 BITS_ROUNDUP_BYTES(nr_bits) == expected_size;
583 }
584
585 if (BTF_INT_OFFSET(int_data) ||
586 BITS_PER_BYTE_MASKED(m->offset) ||
587 BITS_ROUNDUP_BYTES(m->offset) != expected_offset ||
588 BITS_PER_BYTE_MASKED(nr_bits) ||
589 BITS_ROUNDUP_BYTES(nr_bits) != expected_size)
590 return false;
591
592 return true;
593}
594
595__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
596 const char *fmt, ...)
597{
598 va_list args;
599
600 va_start(args, fmt);
601 bpf_verifier_vlog(log, fmt, args);
602 va_end(args);
603}
604
605__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env,
606 const char *fmt, ...)
607{
608 struct bpf_verifier_log *log = &env->log;
609 va_list args;
610
611 if (!bpf_verifier_log_needed(log))
612 return;
613
614 va_start(args, fmt);
615 bpf_verifier_vlog(log, fmt, args);
616 va_end(args);
617}
618
619__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env,
620 const struct btf_type *t,
621 bool log_details,
622 const char *fmt, ...)
623{
624 struct bpf_verifier_log *log = &env->log;
625 u8 kind = BTF_INFO_KIND(t->info);
626 struct btf *btf = env->btf;
627 va_list args;
628
629 if (!bpf_verifier_log_needed(log))
630 return;
631
632 __btf_verifier_log(log, "[%u] %s %s%s",
633 env->log_type_id,
634 btf_kind_str[kind],
635 __btf_name_by_offset(btf, t->name_off),
636 log_details ? " " : "");
637
638 if (log_details)
639 btf_type_ops(t)->log_details(env, t);
640
641 if (fmt && *fmt) {
642 __btf_verifier_log(log, " ");
643 va_start(args, fmt);
644 bpf_verifier_vlog(log, fmt, args);
645 va_end(args);
646 }
647
648 __btf_verifier_log(log, "\n");
649}
650
651#define btf_verifier_log_type(env, t, ...) \
652 __btf_verifier_log_type((env), (t), true, __VA_ARGS__)
653#define btf_verifier_log_basic(env, t, ...) \
654 __btf_verifier_log_type((env), (t), false, __VA_ARGS__)
655
656__printf(4, 5)
657static void btf_verifier_log_member(struct btf_verifier_env *env,
658 const struct btf_type *struct_type,
659 const struct btf_member *member,
660 const char *fmt, ...)
661{
662 struct bpf_verifier_log *log = &env->log;
663 struct btf *btf = env->btf;
664 va_list args;
665
666 if (!bpf_verifier_log_needed(log))
667 return;
668
669
670
671
672
673
674
675 if (env->phase != CHECK_META)
676 btf_verifier_log_type(env, struct_type, NULL);
677
678 if (btf_type_kflag(struct_type))
679 __btf_verifier_log(log,
680 "\t%s type_id=%u bitfield_size=%u bits_offset=%u",
681 __btf_name_by_offset(btf, member->name_off),
682 member->type,
683 BTF_MEMBER_BITFIELD_SIZE(member->offset),
684 BTF_MEMBER_BIT_OFFSET(member->offset));
685 else
686 __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u",
687 __btf_name_by_offset(btf, member->name_off),
688 member->type, member->offset);
689
690 if (fmt && *fmt) {
691 __btf_verifier_log(log, " ");
692 va_start(args, fmt);
693 bpf_verifier_vlog(log, fmt, args);
694 va_end(args);
695 }
696
697 __btf_verifier_log(log, "\n");
698}
699
700static void btf_verifier_log_hdr(struct btf_verifier_env *env,
701 u32 btf_data_size)
702{
703 struct bpf_verifier_log *log = &env->log;
704 const struct btf *btf = env->btf;
705 const struct btf_header *hdr;
706
707 if (!bpf_verifier_log_needed(log))
708 return;
709
710 hdr = &btf->hdr;
711 __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic);
712 __btf_verifier_log(log, "version: %u\n", hdr->version);
713 __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags);
714 __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len);
715 __btf_verifier_log(log, "type_off: %u\n", hdr->type_off);
716 __btf_verifier_log(log, "type_len: %u\n", hdr->type_len);
717 __btf_verifier_log(log, "str_off: %u\n", hdr->str_off);
718 __btf_verifier_log(log, "str_len: %u\n", hdr->str_len);
719 __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size);
720}
721
722static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t)
723{
724 struct btf *btf = env->btf;
725
726
727
728
729
730 if (btf->types_size - btf->nr_types < 2) {
731
732
733 struct btf_type **new_types;
734 u32 expand_by, new_size;
735
736 if (btf->types_size == BTF_MAX_TYPE) {
737 btf_verifier_log(env, "Exceeded max num of types");
738 return -E2BIG;
739 }
740
741 expand_by = max_t(u32, btf->types_size >> 2, 16);
742 new_size = min_t(u32, BTF_MAX_TYPE,
743 btf->types_size + expand_by);
744
745 new_types = kvcalloc(new_size, sizeof(*new_types),
746 GFP_KERNEL | __GFP_NOWARN);
747 if (!new_types)
748 return -ENOMEM;
749
750 if (btf->nr_types == 0)
751 new_types[0] = &btf_void;
752 else
753 memcpy(new_types, btf->types,
754 sizeof(*btf->types) * (btf->nr_types + 1));
755
756 kvfree(btf->types);
757 btf->types = new_types;
758 btf->types_size = new_size;
759 }
760
761 btf->types[++(btf->nr_types)] = t;
762
763 return 0;
764}
765
766static int btf_alloc_id(struct btf *btf)
767{
768 int id;
769
770 idr_preload(GFP_KERNEL);
771 spin_lock_bh(&btf_idr_lock);
772 id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC);
773 if (id > 0)
774 btf->id = id;
775 spin_unlock_bh(&btf_idr_lock);
776 idr_preload_end();
777
778 if (WARN_ON_ONCE(!id))
779 return -ENOSPC;
780
781 return id > 0 ? 0 : id;
782}
783
784static void btf_free_id(struct btf *btf)
785{
786 unsigned long flags;
787
788
789
790
791
792
793
794
795
796
797 spin_lock_irqsave(&btf_idr_lock, flags);
798 idr_remove(&btf_idr, btf->id);
799 spin_unlock_irqrestore(&btf_idr_lock, flags);
800}
801
802static void btf_free(struct btf *btf)
803{
804 kvfree(btf->types);
805 kvfree(btf->resolved_sizes);
806 kvfree(btf->resolved_ids);
807 kvfree(btf->data);
808 kfree(btf);
809}
810
811static void btf_free_rcu(struct rcu_head *rcu)
812{
813 struct btf *btf = container_of(rcu, struct btf, rcu);
814
815 btf_free(btf);
816}
817
818void btf_put(struct btf *btf)
819{
820 if (btf && refcount_dec_and_test(&btf->refcnt)) {
821 btf_free_id(btf);
822 call_rcu(&btf->rcu, btf_free_rcu);
823 }
824}
825
826static int env_resolve_init(struct btf_verifier_env *env)
827{
828 struct btf *btf = env->btf;
829 u32 nr_types = btf->nr_types;
830 u32 *resolved_sizes = NULL;
831 u32 *resolved_ids = NULL;
832 u8 *visit_states = NULL;
833
834
835 resolved_sizes = kvcalloc(nr_types + 1, sizeof(*resolved_sizes),
836 GFP_KERNEL | __GFP_NOWARN);
837 if (!resolved_sizes)
838 goto nomem;
839
840 resolved_ids = kvcalloc(nr_types + 1, sizeof(*resolved_ids),
841 GFP_KERNEL | __GFP_NOWARN);
842 if (!resolved_ids)
843 goto nomem;
844
845 visit_states = kvcalloc(nr_types + 1, sizeof(*visit_states),
846 GFP_KERNEL | __GFP_NOWARN);
847 if (!visit_states)
848 goto nomem;
849
850 btf->resolved_sizes = resolved_sizes;
851 btf->resolved_ids = resolved_ids;
852 env->visit_states = visit_states;
853
854 return 0;
855
856nomem:
857 kvfree(resolved_sizes);
858 kvfree(resolved_ids);
859 kvfree(visit_states);
860 return -ENOMEM;
861}
862
863static void btf_verifier_env_free(struct btf_verifier_env *env)
864{
865 kvfree(env->visit_states);
866 kfree(env);
867}
868
869static bool env_type_is_resolve_sink(const struct btf_verifier_env *env,
870 const struct btf_type *next_type)
871{
872 switch (env->resolve_mode) {
873 case RESOLVE_TBD:
874
875 return !btf_type_needs_resolve(next_type);
876 case RESOLVE_PTR:
877
878
879
880 return !btf_type_is_modifier(next_type) &&
881 !btf_type_is_ptr(next_type);
882 case RESOLVE_STRUCT_OR_ARRAY:
883
884
885
886 return !btf_type_is_modifier(next_type) &&
887 !btf_type_is_array(next_type) &&
888 !btf_type_is_struct(next_type);
889 default:
890 BUG();
891 }
892}
893
894static bool env_type_is_resolved(const struct btf_verifier_env *env,
895 u32 type_id)
896{
897 return env->visit_states[type_id] == RESOLVED;
898}
899
900static int env_stack_push(struct btf_verifier_env *env,
901 const struct btf_type *t, u32 type_id)
902{
903 struct resolve_vertex *v;
904
905 if (env->top_stack == MAX_RESOLVE_DEPTH)
906 return -E2BIG;
907
908 if (env->visit_states[type_id] != NOT_VISITED)
909 return -EEXIST;
910
911 env->visit_states[type_id] = VISITED;
912
913 v = &env->stack[env->top_stack++];
914 v->t = t;
915 v->type_id = type_id;
916 v->next_member = 0;
917
918 if (env->resolve_mode == RESOLVE_TBD) {
919 if (btf_type_is_ptr(t))
920 env->resolve_mode = RESOLVE_PTR;
921 else if (btf_type_is_struct(t) || btf_type_is_array(t))
922 env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY;
923 }
924
925 return 0;
926}
927
928static void env_stack_set_next_member(struct btf_verifier_env *env,
929 u16 next_member)
930{
931 env->stack[env->top_stack - 1].next_member = next_member;
932}
933
934static void env_stack_pop_resolved(struct btf_verifier_env *env,
935 u32 resolved_type_id,
936 u32 resolved_size)
937{
938 u32 type_id = env->stack[--(env->top_stack)].type_id;
939 struct btf *btf = env->btf;
940
941 btf->resolved_sizes[type_id] = resolved_size;
942 btf->resolved_ids[type_id] = resolved_type_id;
943 env->visit_states[type_id] = RESOLVED;
944}
945
946static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
947{
948 return env->top_stack ? &env->stack[env->top_stack - 1] : NULL;
949}
950
951
952static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
953 u32 *type_id)
954{
955 *type_id = btf->resolved_ids[*type_id];
956 return btf_type_by_id(btf, *type_id);
957}
958
959const struct btf_type *btf_type_id_size(const struct btf *btf,
960 u32 *type_id, u32 *ret_size)
961{
962 const struct btf_type *size_type;
963 u32 size_type_id = *type_id;
964 u32 size = 0;
965
966 size_type = btf_type_by_id(btf, size_type_id);
967 if (btf_type_nosize_or_null(size_type))
968 return NULL;
969
970 if (btf_type_has_size(size_type)) {
971 size = size_type->size;
972 } else if (btf_type_is_array(size_type)) {
973 size = btf->resolved_sizes[size_type_id];
974 } else if (btf_type_is_ptr(size_type)) {
975 size = sizeof(void *);
976 } else {
977 if (WARN_ON_ONCE(!btf_type_is_modifier(size_type)))
978 return NULL;
979
980 size = btf->resolved_sizes[size_type_id];
981 size_type_id = btf->resolved_ids[size_type_id];
982 size_type = btf_type_by_id(btf, size_type_id);
983 if (btf_type_nosize_or_null(size_type))
984 return NULL;
985 }
986
987 *type_id = size_type_id;
988 if (ret_size)
989 *ret_size = size;
990
991 return size_type;
992}
993
994static int btf_df_check_member(struct btf_verifier_env *env,
995 const struct btf_type *struct_type,
996 const struct btf_member *member,
997 const struct btf_type *member_type)
998{
999 btf_verifier_log_basic(env, struct_type,
1000 "Unsupported check_member");
1001 return -EINVAL;
1002}
1003
1004static int btf_df_check_kflag_member(struct btf_verifier_env *env,
1005 const struct btf_type *struct_type,
1006 const struct btf_member *member,
1007 const struct btf_type *member_type)
1008{
1009 btf_verifier_log_basic(env, struct_type,
1010 "Unsupported check_kflag_member");
1011 return -EINVAL;
1012}
1013
1014
1015
1016
1017static int btf_generic_check_kflag_member(struct btf_verifier_env *env,
1018 const struct btf_type *struct_type,
1019 const struct btf_member *member,
1020 const struct btf_type *member_type)
1021{
1022 if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) {
1023 btf_verifier_log_member(env, struct_type, member,
1024 "Invalid member bitfield_size");
1025 return -EINVAL;
1026 }
1027
1028
1029
1030
1031 return btf_type_ops(member_type)->check_member(env, struct_type,
1032 member,
1033 member_type);
1034}
1035
1036static int btf_df_resolve(struct btf_verifier_env *env,
1037 const struct resolve_vertex *v)
1038{
1039 btf_verifier_log_basic(env, v->t, "Unsupported resolve");
1040 return -EINVAL;
1041}
1042
1043static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
1044 u32 type_id, void *data, u8 bits_offsets,
1045 struct seq_file *m)
1046{
1047 seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
1048}
1049
1050static int btf_int_check_member(struct btf_verifier_env *env,
1051 const struct btf_type *struct_type,
1052 const struct btf_member *member,
1053 const struct btf_type *member_type)
1054{
1055 u32 int_data = btf_type_int(member_type);
1056 u32 struct_bits_off = member->offset;
1057 u32 struct_size = struct_type->size;
1058 u32 nr_copy_bits;
1059 u32 bytes_offset;
1060
1061 if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) {
1062 btf_verifier_log_member(env, struct_type, member,
1063 "bits_offset exceeds U32_MAX");
1064 return -EINVAL;
1065 }
1066
1067 struct_bits_off += BTF_INT_OFFSET(int_data);
1068 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1069 nr_copy_bits = BTF_INT_BITS(int_data) +
1070 BITS_PER_BYTE_MASKED(struct_bits_off);
1071
1072 if (nr_copy_bits > BITS_PER_U128) {
1073 btf_verifier_log_member(env, struct_type, member,
1074 "nr_copy_bits exceeds 128");
1075 return -EINVAL;
1076 }
1077
1078 if (struct_size < bytes_offset ||
1079 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1080 btf_verifier_log_member(env, struct_type, member,
1081 "Member exceeds struct_size");
1082 return -EINVAL;
1083 }
1084
1085 return 0;
1086}
1087
1088static int btf_int_check_kflag_member(struct btf_verifier_env *env,
1089 const struct btf_type *struct_type,
1090 const struct btf_member *member,
1091 const struct btf_type *member_type)
1092{
1093 u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset;
1094 u32 int_data = btf_type_int(member_type);
1095 u32 struct_size = struct_type->size;
1096 u32 nr_copy_bits;
1097
1098
1099 if (!btf_type_int_is_regular(member_type)) {
1100 btf_verifier_log_member(env, struct_type, member,
1101 "Invalid member base type");
1102 return -EINVAL;
1103 }
1104
1105
1106 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
1107 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
1108 nr_int_data_bits = BTF_INT_BITS(int_data);
1109 if (!nr_bits) {
1110
1111
1112
1113 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1114 btf_verifier_log_member(env, struct_type, member,
1115 "Invalid member offset");
1116 return -EINVAL;
1117 }
1118
1119 nr_bits = nr_int_data_bits;
1120 } else if (nr_bits > nr_int_data_bits) {
1121 btf_verifier_log_member(env, struct_type, member,
1122 "Invalid member bitfield_size");
1123 return -EINVAL;
1124 }
1125
1126 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1127 nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off);
1128 if (nr_copy_bits > BITS_PER_U128) {
1129 btf_verifier_log_member(env, struct_type, member,
1130 "nr_copy_bits exceeds 128");
1131 return -EINVAL;
1132 }
1133
1134 if (struct_size < bytes_offset ||
1135 struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) {
1136 btf_verifier_log_member(env, struct_type, member,
1137 "Member exceeds struct_size");
1138 return -EINVAL;
1139 }
1140
1141 return 0;
1142}
1143
1144static s32 btf_int_check_meta(struct btf_verifier_env *env,
1145 const struct btf_type *t,
1146 u32 meta_left)
1147{
1148 u32 int_data, nr_bits, meta_needed = sizeof(int_data);
1149 u16 encoding;
1150
1151 if (meta_left < meta_needed) {
1152 btf_verifier_log_basic(env, t,
1153 "meta_left:%u meta_needed:%u",
1154 meta_left, meta_needed);
1155 return -EINVAL;
1156 }
1157
1158 if (btf_type_vlen(t)) {
1159 btf_verifier_log_type(env, t, "vlen != 0");
1160 return -EINVAL;
1161 }
1162
1163 if (btf_type_kflag(t)) {
1164 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1165 return -EINVAL;
1166 }
1167
1168 int_data = btf_type_int(t);
1169 if (int_data & ~BTF_INT_MASK) {
1170 btf_verifier_log_basic(env, t, "Invalid int_data:%x",
1171 int_data);
1172 return -EINVAL;
1173 }
1174
1175 nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data);
1176
1177 if (nr_bits > BITS_PER_U128) {
1178 btf_verifier_log_type(env, t, "nr_bits exceeds %zu",
1179 BITS_PER_U128);
1180 return -EINVAL;
1181 }
1182
1183 if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) {
1184 btf_verifier_log_type(env, t, "nr_bits exceeds type_size");
1185 return -EINVAL;
1186 }
1187
1188
1189
1190
1191
1192
1193
1194 encoding = BTF_INT_ENCODING(int_data);
1195 if (encoding &&
1196 encoding != BTF_INT_SIGNED &&
1197 encoding != BTF_INT_CHAR &&
1198 encoding != BTF_INT_BOOL) {
1199 btf_verifier_log_type(env, t, "Unsupported encoding");
1200 return -ENOTSUPP;
1201 }
1202
1203 btf_verifier_log_type(env, t, NULL);
1204
1205 return meta_needed;
1206}
1207
1208static void btf_int_log(struct btf_verifier_env *env,
1209 const struct btf_type *t)
1210{
1211 int int_data = btf_type_int(t);
1212
1213 btf_verifier_log(env,
1214 "size=%u bits_offset=%u nr_bits=%u encoding=%s",
1215 t->size, BTF_INT_OFFSET(int_data),
1216 BTF_INT_BITS(int_data),
1217 btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
1218}
1219
1220static void btf_int128_print(struct seq_file *m, void *data)
1221{
1222
1223
1224
1225
1226
1227
1228
1229 u64 upper_num, lower_num;
1230
1231#ifdef __BIG_ENDIAN_BITFIELD
1232 upper_num = *(u64 *)data;
1233 lower_num = *(u64 *)(data + 8);
1234#else
1235 upper_num = *(u64 *)(data + 8);
1236 lower_num = *(u64 *)data;
1237#endif
1238 if (upper_num == 0)
1239 seq_printf(m, "0x%llx", lower_num);
1240 else
1241 seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
1242}
1243
1244static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
1245 u16 right_shift_bits)
1246{
1247 u64 upper_num, lower_num;
1248
1249#ifdef __BIG_ENDIAN_BITFIELD
1250 upper_num = print_num[0];
1251 lower_num = print_num[1];
1252#else
1253 upper_num = print_num[1];
1254 lower_num = print_num[0];
1255#endif
1256
1257
1258 if (left_shift_bits >= 64) {
1259 upper_num = lower_num << (left_shift_bits - 64);
1260 lower_num = 0;
1261 } else {
1262 upper_num = (upper_num << left_shift_bits) |
1263 (lower_num >> (64 - left_shift_bits));
1264 lower_num = lower_num << left_shift_bits;
1265 }
1266
1267 if (right_shift_bits >= 64) {
1268 lower_num = upper_num >> (right_shift_bits - 64);
1269 upper_num = 0;
1270 } else {
1271 lower_num = (lower_num >> right_shift_bits) |
1272 (upper_num << (64 - right_shift_bits));
1273 upper_num = upper_num >> right_shift_bits;
1274 }
1275
1276#ifdef __BIG_ENDIAN_BITFIELD
1277 print_num[0] = upper_num;
1278 print_num[1] = lower_num;
1279#else
1280 print_num[0] = lower_num;
1281 print_num[1] = upper_num;
1282#endif
1283}
1284
1285static void btf_bitfield_seq_show(void *data, u8 bits_offset,
1286 u8 nr_bits, struct seq_file *m)
1287{
1288 u16 left_shift_bits, right_shift_bits;
1289 u8 nr_copy_bytes;
1290 u8 nr_copy_bits;
1291 u64 print_num[2] = {};
1292
1293 nr_copy_bits = nr_bits + bits_offset;
1294 nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
1295
1296 memcpy(print_num, data, nr_copy_bytes);
1297
1298#ifdef __BIG_ENDIAN_BITFIELD
1299 left_shift_bits = bits_offset;
1300#else
1301 left_shift_bits = BITS_PER_U128 - nr_copy_bits;
1302#endif
1303 right_shift_bits = BITS_PER_U128 - nr_bits;
1304
1305 btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
1306 btf_int128_print(m, print_num);
1307}
1308
1309
1310static void btf_int_bits_seq_show(const struct btf *btf,
1311 const struct btf_type *t,
1312 void *data, u8 bits_offset,
1313 struct seq_file *m)
1314{
1315 u32 int_data = btf_type_int(t);
1316 u8 nr_bits = BTF_INT_BITS(int_data);
1317 u8 total_bits_offset;
1318
1319
1320
1321
1322
1323 total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
1324 data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
1325 bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
1326 btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
1327}
1328
1329static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
1330 u32 type_id, void *data, u8 bits_offset,
1331 struct seq_file *m)
1332{
1333 u32 int_data = btf_type_int(t);
1334 u8 encoding = BTF_INT_ENCODING(int_data);
1335 bool sign = encoding & BTF_INT_SIGNED;
1336 u8 nr_bits = BTF_INT_BITS(int_data);
1337
1338 if (bits_offset || BTF_INT_OFFSET(int_data) ||
1339 BITS_PER_BYTE_MASKED(nr_bits)) {
1340 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1341 return;
1342 }
1343
1344 switch (nr_bits) {
1345 case 128:
1346 btf_int128_print(m, data);
1347 break;
1348 case 64:
1349 if (sign)
1350 seq_printf(m, "%lld", *(s64 *)data);
1351 else
1352 seq_printf(m, "%llu", *(u64 *)data);
1353 break;
1354 case 32:
1355 if (sign)
1356 seq_printf(m, "%d", *(s32 *)data);
1357 else
1358 seq_printf(m, "%u", *(u32 *)data);
1359 break;
1360 case 16:
1361 if (sign)
1362 seq_printf(m, "%d", *(s16 *)data);
1363 else
1364 seq_printf(m, "%u", *(u16 *)data);
1365 break;
1366 case 8:
1367 if (sign)
1368 seq_printf(m, "%d", *(s8 *)data);
1369 else
1370 seq_printf(m, "%u", *(u8 *)data);
1371 break;
1372 default:
1373 btf_int_bits_seq_show(btf, t, data, bits_offset, m);
1374 }
1375}
1376
1377static const struct btf_kind_operations int_ops = {
1378 .check_meta = btf_int_check_meta,
1379 .resolve = btf_df_resolve,
1380 .check_member = btf_int_check_member,
1381 .check_kflag_member = btf_int_check_kflag_member,
1382 .log_details = btf_int_log,
1383 .seq_show = btf_int_seq_show,
1384};
1385
1386static int btf_modifier_check_member(struct btf_verifier_env *env,
1387 const struct btf_type *struct_type,
1388 const struct btf_member *member,
1389 const struct btf_type *member_type)
1390{
1391 const struct btf_type *resolved_type;
1392 u32 resolved_type_id = member->type;
1393 struct btf_member resolved_member;
1394 struct btf *btf = env->btf;
1395
1396 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1397 if (!resolved_type) {
1398 btf_verifier_log_member(env, struct_type, member,
1399 "Invalid member");
1400 return -EINVAL;
1401 }
1402
1403 resolved_member = *member;
1404 resolved_member.type = resolved_type_id;
1405
1406 return btf_type_ops(resolved_type)->check_member(env, struct_type,
1407 &resolved_member,
1408 resolved_type);
1409}
1410
1411static int btf_modifier_check_kflag_member(struct btf_verifier_env *env,
1412 const struct btf_type *struct_type,
1413 const struct btf_member *member,
1414 const struct btf_type *member_type)
1415{
1416 const struct btf_type *resolved_type;
1417 u32 resolved_type_id = member->type;
1418 struct btf_member resolved_member;
1419 struct btf *btf = env->btf;
1420
1421 resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL);
1422 if (!resolved_type) {
1423 btf_verifier_log_member(env, struct_type, member,
1424 "Invalid member");
1425 return -EINVAL;
1426 }
1427
1428 resolved_member = *member;
1429 resolved_member.type = resolved_type_id;
1430
1431 return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type,
1432 &resolved_member,
1433 resolved_type);
1434}
1435
1436static int btf_ptr_check_member(struct btf_verifier_env *env,
1437 const struct btf_type *struct_type,
1438 const struct btf_member *member,
1439 const struct btf_type *member_type)
1440{
1441 u32 struct_size, struct_bits_off, bytes_offset;
1442
1443 struct_size = struct_type->size;
1444 struct_bits_off = member->offset;
1445 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1446
1447 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1448 btf_verifier_log_member(env, struct_type, member,
1449 "Member is not byte aligned");
1450 return -EINVAL;
1451 }
1452
1453 if (struct_size - bytes_offset < sizeof(void *)) {
1454 btf_verifier_log_member(env, struct_type, member,
1455 "Member exceeds struct_size");
1456 return -EINVAL;
1457 }
1458
1459 return 0;
1460}
1461
1462static int btf_ref_type_check_meta(struct btf_verifier_env *env,
1463 const struct btf_type *t,
1464 u32 meta_left)
1465{
1466 if (btf_type_vlen(t)) {
1467 btf_verifier_log_type(env, t, "vlen != 0");
1468 return -EINVAL;
1469 }
1470
1471 if (btf_type_kflag(t)) {
1472 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1473 return -EINVAL;
1474 }
1475
1476 if (!BTF_TYPE_ID_VALID(t->type)) {
1477 btf_verifier_log_type(env, t, "Invalid type_id");
1478 return -EINVAL;
1479 }
1480
1481
1482
1483
1484 if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
1485 if (!t->name_off ||
1486 !btf_name_valid_identifier(env->btf, t->name_off)) {
1487 btf_verifier_log_type(env, t, "Invalid name");
1488 return -EINVAL;
1489 }
1490 } else {
1491 if (t->name_off) {
1492 btf_verifier_log_type(env, t, "Invalid name");
1493 return -EINVAL;
1494 }
1495 }
1496
1497 btf_verifier_log_type(env, t, NULL);
1498
1499 return 0;
1500}
1501
1502static int btf_modifier_resolve(struct btf_verifier_env *env,
1503 const struct resolve_vertex *v)
1504{
1505 const struct btf_type *t = v->t;
1506 const struct btf_type *next_type;
1507 u32 next_type_id = t->type;
1508 struct btf *btf = env->btf;
1509 u32 next_type_size = 0;
1510
1511 next_type = btf_type_by_id(btf, next_type_id);
1512 if (!next_type) {
1513 btf_verifier_log_type(env, v->t, "Invalid type_id");
1514 return -EINVAL;
1515 }
1516
1517 if (!env_type_is_resolve_sink(env, next_type) &&
1518 !env_type_is_resolved(env, next_type_id))
1519 return env_stack_push(env, next_type, next_type_id);
1520
1521
1522
1523
1524
1525
1526
1527 if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
1528 if (env_type_is_resolved(env, next_type_id))
1529 next_type = btf_type_id_resolve(btf, &next_type_id);
1530
1531
1532 if (!btf_type_is_void(next_type) &&
1533 !btf_type_is_fwd(next_type) &&
1534 !btf_type_is_func_proto(next_type)) {
1535 btf_verifier_log_type(env, v->t, "Invalid type_id");
1536 return -EINVAL;
1537 }
1538 }
1539
1540 env_stack_pop_resolved(env, next_type_id, next_type_size);
1541
1542 return 0;
1543}
1544
1545static int btf_ptr_resolve(struct btf_verifier_env *env,
1546 const struct resolve_vertex *v)
1547{
1548 const struct btf_type *next_type;
1549 const struct btf_type *t = v->t;
1550 u32 next_type_id = t->type;
1551 struct btf *btf = env->btf;
1552
1553 next_type = btf_type_by_id(btf, next_type_id);
1554 if (!next_type) {
1555 btf_verifier_log_type(env, v->t, "Invalid type_id");
1556 return -EINVAL;
1557 }
1558
1559 if (!env_type_is_resolve_sink(env, next_type) &&
1560 !env_type_is_resolved(env, next_type_id))
1561 return env_stack_push(env, next_type, next_type_id);
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571 if (btf_type_is_modifier(next_type)) {
1572 const struct btf_type *resolved_type;
1573 u32 resolved_type_id;
1574
1575 resolved_type_id = next_type_id;
1576 resolved_type = btf_type_id_resolve(btf, &resolved_type_id);
1577
1578 if (btf_type_is_ptr(resolved_type) &&
1579 !env_type_is_resolve_sink(env, resolved_type) &&
1580 !env_type_is_resolved(env, resolved_type_id))
1581 return env_stack_push(env, resolved_type,
1582 resolved_type_id);
1583 }
1584
1585 if (!btf_type_id_size(btf, &next_type_id, NULL)) {
1586 if (env_type_is_resolved(env, next_type_id))
1587 next_type = btf_type_id_resolve(btf, &next_type_id);
1588
1589 if (!btf_type_is_void(next_type) &&
1590 !btf_type_is_fwd(next_type) &&
1591 !btf_type_is_func_proto(next_type)) {
1592 btf_verifier_log_type(env, v->t, "Invalid type_id");
1593 return -EINVAL;
1594 }
1595 }
1596
1597 env_stack_pop_resolved(env, next_type_id, 0);
1598
1599 return 0;
1600}
1601
1602static void btf_modifier_seq_show(const struct btf *btf,
1603 const struct btf_type *t,
1604 u32 type_id, void *data,
1605 u8 bits_offset, struct seq_file *m)
1606{
1607 t = btf_type_id_resolve(btf, &type_id);
1608
1609 btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
1610}
1611
1612static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
1613 u32 type_id, void *data, u8 bits_offset,
1614 struct seq_file *m)
1615{
1616
1617 seq_printf(m, "%p", *(void **)data);
1618}
1619
1620static void btf_ref_type_log(struct btf_verifier_env *env,
1621 const struct btf_type *t)
1622{
1623 btf_verifier_log(env, "type_id=%u", t->type);
1624}
1625
1626static struct btf_kind_operations modifier_ops = {
1627 .check_meta = btf_ref_type_check_meta,
1628 .resolve = btf_modifier_resolve,
1629 .check_member = btf_modifier_check_member,
1630 .check_kflag_member = btf_modifier_check_kflag_member,
1631 .log_details = btf_ref_type_log,
1632 .seq_show = btf_modifier_seq_show,
1633};
1634
1635static struct btf_kind_operations ptr_ops = {
1636 .check_meta = btf_ref_type_check_meta,
1637 .resolve = btf_ptr_resolve,
1638 .check_member = btf_ptr_check_member,
1639 .check_kflag_member = btf_generic_check_kflag_member,
1640 .log_details = btf_ref_type_log,
1641 .seq_show = btf_ptr_seq_show,
1642};
1643
1644static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
1645 const struct btf_type *t,
1646 u32 meta_left)
1647{
1648 if (btf_type_vlen(t)) {
1649 btf_verifier_log_type(env, t, "vlen != 0");
1650 return -EINVAL;
1651 }
1652
1653 if (t->type) {
1654 btf_verifier_log_type(env, t, "type != 0");
1655 return -EINVAL;
1656 }
1657
1658
1659 if (!t->name_off ||
1660 !btf_name_valid_identifier(env->btf, t->name_off)) {
1661 btf_verifier_log_type(env, t, "Invalid name");
1662 return -EINVAL;
1663 }
1664
1665 btf_verifier_log_type(env, t, NULL);
1666
1667 return 0;
1668}
1669
1670static void btf_fwd_type_log(struct btf_verifier_env *env,
1671 const struct btf_type *t)
1672{
1673 btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct");
1674}
1675
1676static struct btf_kind_operations fwd_ops = {
1677 .check_meta = btf_fwd_check_meta,
1678 .resolve = btf_df_resolve,
1679 .check_member = btf_df_check_member,
1680 .check_kflag_member = btf_df_check_kflag_member,
1681 .log_details = btf_fwd_type_log,
1682 .seq_show = btf_df_seq_show,
1683};
1684
1685static int btf_array_check_member(struct btf_verifier_env *env,
1686 const struct btf_type *struct_type,
1687 const struct btf_member *member,
1688 const struct btf_type *member_type)
1689{
1690 u32 struct_bits_off = member->offset;
1691 u32 struct_size, bytes_offset;
1692 u32 array_type_id, array_size;
1693 struct btf *btf = env->btf;
1694
1695 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1696 btf_verifier_log_member(env, struct_type, member,
1697 "Member is not byte aligned");
1698 return -EINVAL;
1699 }
1700
1701 array_type_id = member->type;
1702 btf_type_id_size(btf, &array_type_id, &array_size);
1703 struct_size = struct_type->size;
1704 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1705 if (struct_size - bytes_offset < array_size) {
1706 btf_verifier_log_member(env, struct_type, member,
1707 "Member exceeds struct_size");
1708 return -EINVAL;
1709 }
1710
1711 return 0;
1712}
1713
1714static s32 btf_array_check_meta(struct btf_verifier_env *env,
1715 const struct btf_type *t,
1716 u32 meta_left)
1717{
1718 const struct btf_array *array = btf_type_array(t);
1719 u32 meta_needed = sizeof(*array);
1720
1721 if (meta_left < meta_needed) {
1722 btf_verifier_log_basic(env, t,
1723 "meta_left:%u meta_needed:%u",
1724 meta_left, meta_needed);
1725 return -EINVAL;
1726 }
1727
1728
1729 if (t->name_off) {
1730 btf_verifier_log_type(env, t, "Invalid name");
1731 return -EINVAL;
1732 }
1733
1734 if (btf_type_vlen(t)) {
1735 btf_verifier_log_type(env, t, "vlen != 0");
1736 return -EINVAL;
1737 }
1738
1739 if (btf_type_kflag(t)) {
1740 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
1741 return -EINVAL;
1742 }
1743
1744 if (t->size) {
1745 btf_verifier_log_type(env, t, "size != 0");
1746 return -EINVAL;
1747 }
1748
1749
1750
1751
1752 if (!array->type || !BTF_TYPE_ID_VALID(array->type)) {
1753 btf_verifier_log_type(env, t, "Invalid elem");
1754 return -EINVAL;
1755 }
1756
1757 if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) {
1758 btf_verifier_log_type(env, t, "Invalid index");
1759 return -EINVAL;
1760 }
1761
1762 btf_verifier_log_type(env, t, NULL);
1763
1764 return meta_needed;
1765}
1766
1767static int btf_array_resolve(struct btf_verifier_env *env,
1768 const struct resolve_vertex *v)
1769{
1770 const struct btf_array *array = btf_type_array(v->t);
1771 const struct btf_type *elem_type, *index_type;
1772 u32 elem_type_id, index_type_id;
1773 struct btf *btf = env->btf;
1774 u32 elem_size;
1775
1776
1777 index_type_id = array->index_type;
1778 index_type = btf_type_by_id(btf, index_type_id);
1779 if (btf_type_nosize_or_null(index_type)) {
1780 btf_verifier_log_type(env, v->t, "Invalid index");
1781 return -EINVAL;
1782 }
1783
1784 if (!env_type_is_resolve_sink(env, index_type) &&
1785 !env_type_is_resolved(env, index_type_id))
1786 return env_stack_push(env, index_type, index_type_id);
1787
1788 index_type = btf_type_id_size(btf, &index_type_id, NULL);
1789 if (!index_type || !btf_type_is_int(index_type) ||
1790 !btf_type_int_is_regular(index_type)) {
1791 btf_verifier_log_type(env, v->t, "Invalid index");
1792 return -EINVAL;
1793 }
1794
1795
1796 elem_type_id = array->type;
1797 elem_type = btf_type_by_id(btf, elem_type_id);
1798 if (btf_type_nosize_or_null(elem_type)) {
1799 btf_verifier_log_type(env, v->t,
1800 "Invalid elem");
1801 return -EINVAL;
1802 }
1803
1804 if (!env_type_is_resolve_sink(env, elem_type) &&
1805 !env_type_is_resolved(env, elem_type_id))
1806 return env_stack_push(env, elem_type, elem_type_id);
1807
1808 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1809 if (!elem_type) {
1810 btf_verifier_log_type(env, v->t, "Invalid elem");
1811 return -EINVAL;
1812 }
1813
1814 if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) {
1815 btf_verifier_log_type(env, v->t, "Invalid array of int");
1816 return -EINVAL;
1817 }
1818
1819 if (array->nelems && elem_size > U32_MAX / array->nelems) {
1820 btf_verifier_log_type(env, v->t,
1821 "Array size overflows U32_MAX");
1822 return -EINVAL;
1823 }
1824
1825 env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems);
1826
1827 return 0;
1828}
1829
1830static void btf_array_log(struct btf_verifier_env *env,
1831 const struct btf_type *t)
1832{
1833 const struct btf_array *array = btf_type_array(t);
1834
1835 btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u",
1836 array->type, array->index_type, array->nelems);
1837}
1838
1839static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
1840 u32 type_id, void *data, u8 bits_offset,
1841 struct seq_file *m)
1842{
1843 const struct btf_array *array = btf_type_array(t);
1844 const struct btf_kind_operations *elem_ops;
1845 const struct btf_type *elem_type;
1846 u32 i, elem_size, elem_type_id;
1847
1848 elem_type_id = array->type;
1849 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
1850 elem_ops = btf_type_ops(elem_type);
1851 seq_puts(m, "[");
1852 for (i = 0; i < array->nelems; i++) {
1853 if (i)
1854 seq_puts(m, ",");
1855
1856 elem_ops->seq_show(btf, elem_type, elem_type_id, data,
1857 bits_offset, m);
1858 data += elem_size;
1859 }
1860 seq_puts(m, "]");
1861}
1862
1863static struct btf_kind_operations array_ops = {
1864 .check_meta = btf_array_check_meta,
1865 .resolve = btf_array_resolve,
1866 .check_member = btf_array_check_member,
1867 .check_kflag_member = btf_generic_check_kflag_member,
1868 .log_details = btf_array_log,
1869 .seq_show = btf_array_seq_show,
1870};
1871
1872static int btf_struct_check_member(struct btf_verifier_env *env,
1873 const struct btf_type *struct_type,
1874 const struct btf_member *member,
1875 const struct btf_type *member_type)
1876{
1877 u32 struct_bits_off = member->offset;
1878 u32 struct_size, bytes_offset;
1879
1880 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
1881 btf_verifier_log_member(env, struct_type, member,
1882 "Member is not byte aligned");
1883 return -EINVAL;
1884 }
1885
1886 struct_size = struct_type->size;
1887 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
1888 if (struct_size - bytes_offset < member_type->size) {
1889 btf_verifier_log_member(env, struct_type, member,
1890 "Member exceeds struct_size");
1891 return -EINVAL;
1892 }
1893
1894 return 0;
1895}
1896
1897static s32 btf_struct_check_meta(struct btf_verifier_env *env,
1898 const struct btf_type *t,
1899 u32 meta_left)
1900{
1901 bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION;
1902 const struct btf_member *member;
1903 u32 meta_needed, last_offset;
1904 struct btf *btf = env->btf;
1905 u32 struct_size = t->size;
1906 u32 offset;
1907 u16 i;
1908
1909 meta_needed = btf_type_vlen(t) * sizeof(*member);
1910 if (meta_left < meta_needed) {
1911 btf_verifier_log_basic(env, t,
1912 "meta_left:%u meta_needed:%u",
1913 meta_left, meta_needed);
1914 return -EINVAL;
1915 }
1916
1917
1918 if (t->name_off &&
1919 !btf_name_valid_identifier(env->btf, t->name_off)) {
1920 btf_verifier_log_type(env, t, "Invalid name");
1921 return -EINVAL;
1922 }
1923
1924 btf_verifier_log_type(env, t, NULL);
1925
1926 last_offset = 0;
1927 for_each_member(i, t, member) {
1928 if (!btf_name_offset_valid(btf, member->name_off)) {
1929 btf_verifier_log_member(env, t, member,
1930 "Invalid member name_offset:%u",
1931 member->name_off);
1932 return -EINVAL;
1933 }
1934
1935
1936 if (member->name_off &&
1937 !btf_name_valid_identifier(btf, member->name_off)) {
1938 btf_verifier_log_member(env, t, member, "Invalid name");
1939 return -EINVAL;
1940 }
1941
1942 if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
1943 btf_verifier_log_member(env, t, member,
1944 "Invalid type_id");
1945 return -EINVAL;
1946 }
1947
1948 offset = btf_member_bit_offset(t, member);
1949 if (is_union && offset) {
1950 btf_verifier_log_member(env, t, member,
1951 "Invalid member bits_offset");
1952 return -EINVAL;
1953 }
1954
1955
1956
1957
1958
1959 if (last_offset > offset) {
1960 btf_verifier_log_member(env, t, member,
1961 "Invalid member bits_offset");
1962 return -EINVAL;
1963 }
1964
1965 if (BITS_ROUNDUP_BYTES(offset) > struct_size) {
1966 btf_verifier_log_member(env, t, member,
1967 "Member bits_offset exceeds its struct size");
1968 return -EINVAL;
1969 }
1970
1971 btf_verifier_log_member(env, t, member, NULL);
1972 last_offset = offset;
1973 }
1974
1975 return meta_needed;
1976}
1977
1978static int btf_struct_resolve(struct btf_verifier_env *env,
1979 const struct resolve_vertex *v)
1980{
1981 const struct btf_member *member;
1982 int err;
1983 u16 i;
1984
1985
1986
1987
1988
1989 if (v->next_member) {
1990 const struct btf_type *last_member_type;
1991 const struct btf_member *last_member;
1992 u16 last_member_type_id;
1993
1994 last_member = btf_type_member(v->t) + v->next_member - 1;
1995 last_member_type_id = last_member->type;
1996 if (WARN_ON_ONCE(!env_type_is_resolved(env,
1997 last_member_type_id)))
1998 return -EINVAL;
1999
2000 last_member_type = btf_type_by_id(env->btf,
2001 last_member_type_id);
2002 if (btf_type_kflag(v->t))
2003 err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t,
2004 last_member,
2005 last_member_type);
2006 else
2007 err = btf_type_ops(last_member_type)->check_member(env, v->t,
2008 last_member,
2009 last_member_type);
2010 if (err)
2011 return err;
2012 }
2013
2014 for_each_member_from(i, v->next_member, v->t, member) {
2015 u32 member_type_id = member->type;
2016 const struct btf_type *member_type = btf_type_by_id(env->btf,
2017 member_type_id);
2018
2019 if (btf_type_nosize_or_null(member_type)) {
2020 btf_verifier_log_member(env, v->t, member,
2021 "Invalid member");
2022 return -EINVAL;
2023 }
2024
2025 if (!env_type_is_resolve_sink(env, member_type) &&
2026 !env_type_is_resolved(env, member_type_id)) {
2027 env_stack_set_next_member(env, i + 1);
2028 return env_stack_push(env, member_type, member_type_id);
2029 }
2030
2031 if (btf_type_kflag(v->t))
2032 err = btf_type_ops(member_type)->check_kflag_member(env, v->t,
2033 member,
2034 member_type);
2035 else
2036 err = btf_type_ops(member_type)->check_member(env, v->t,
2037 member,
2038 member_type);
2039 if (err)
2040 return err;
2041 }
2042
2043 env_stack_pop_resolved(env, 0, 0);
2044
2045 return 0;
2046}
2047
2048static void btf_struct_log(struct btf_verifier_env *env,
2049 const struct btf_type *t)
2050{
2051 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2052}
2053
2054
2055
2056
2057
2058int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
2059{
2060 const struct btf_member *member;
2061 u32 i, off = -ENOENT;
2062
2063 if (!__btf_type_is_struct(t))
2064 return -EINVAL;
2065
2066 for_each_member(i, t, member) {
2067 const struct btf_type *member_type = btf_type_by_id(btf,
2068 member->type);
2069 if (!__btf_type_is_struct(member_type))
2070 continue;
2071 if (member_type->size != sizeof(struct bpf_spin_lock))
2072 continue;
2073 if (strcmp(__btf_name_by_offset(btf, member_type->name_off),
2074 "bpf_spin_lock"))
2075 continue;
2076 if (off != -ENOENT)
2077
2078 return -E2BIG;
2079 off = btf_member_bit_offset(t, member);
2080 if (off % 8)
2081
2082 return -EINVAL;
2083 off /= 8;
2084 if (off % __alignof__(struct bpf_spin_lock))
2085
2086 return -EINVAL;
2087 }
2088 return off;
2089}
2090
2091static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
2092 u32 type_id, void *data, u8 bits_offset,
2093 struct seq_file *m)
2094{
2095 const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
2096 const struct btf_member *member;
2097 u32 i;
2098
2099 seq_puts(m, "{");
2100 for_each_member(i, t, member) {
2101 const struct btf_type *member_type = btf_type_by_id(btf,
2102 member->type);
2103 const struct btf_kind_operations *ops;
2104 u32 member_offset, bitfield_size;
2105 u32 bytes_offset;
2106 u8 bits8_offset;
2107
2108 if (i)
2109 seq_puts(m, seq);
2110
2111 member_offset = btf_member_bit_offset(t, member);
2112 bitfield_size = btf_member_bitfield_size(t, member);
2113 bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
2114 bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
2115 if (bitfield_size) {
2116 btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
2117 bitfield_size, m);
2118 } else {
2119 ops = btf_type_ops(member_type);
2120 ops->seq_show(btf, member_type, member->type,
2121 data + bytes_offset, bits8_offset, m);
2122 }
2123 }
2124 seq_puts(m, "}");
2125}
2126
2127static struct btf_kind_operations struct_ops = {
2128 .check_meta = btf_struct_check_meta,
2129 .resolve = btf_struct_resolve,
2130 .check_member = btf_struct_check_member,
2131 .check_kflag_member = btf_generic_check_kflag_member,
2132 .log_details = btf_struct_log,
2133 .seq_show = btf_struct_seq_show,
2134};
2135
2136static int btf_enum_check_member(struct btf_verifier_env *env,
2137 const struct btf_type *struct_type,
2138 const struct btf_member *member,
2139 const struct btf_type *member_type)
2140{
2141 u32 struct_bits_off = member->offset;
2142 u32 struct_size, bytes_offset;
2143
2144 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2145 btf_verifier_log_member(env, struct_type, member,
2146 "Member is not byte aligned");
2147 return -EINVAL;
2148 }
2149
2150 struct_size = struct_type->size;
2151 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
2152 if (struct_size - bytes_offset < sizeof(int)) {
2153 btf_verifier_log_member(env, struct_type, member,
2154 "Member exceeds struct_size");
2155 return -EINVAL;
2156 }
2157
2158 return 0;
2159}
2160
2161static int btf_enum_check_kflag_member(struct btf_verifier_env *env,
2162 const struct btf_type *struct_type,
2163 const struct btf_member *member,
2164 const struct btf_type *member_type)
2165{
2166 u32 struct_bits_off, nr_bits, bytes_end, struct_size;
2167 u32 int_bitsize = sizeof(int) * BITS_PER_BYTE;
2168
2169 struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset);
2170 nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset);
2171 if (!nr_bits) {
2172 if (BITS_PER_BYTE_MASKED(struct_bits_off)) {
2173 btf_verifier_log_member(env, struct_type, member,
2174 "Member is not byte aligned");
2175 return -EINVAL;
2176 }
2177
2178 nr_bits = int_bitsize;
2179 } else if (nr_bits > int_bitsize) {
2180 btf_verifier_log_member(env, struct_type, member,
2181 "Invalid member bitfield_size");
2182 return -EINVAL;
2183 }
2184
2185 struct_size = struct_type->size;
2186 bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits);
2187 if (struct_size < bytes_end) {
2188 btf_verifier_log_member(env, struct_type, member,
2189 "Member exceeds struct_size");
2190 return -EINVAL;
2191 }
2192
2193 return 0;
2194}
2195
2196static s32 btf_enum_check_meta(struct btf_verifier_env *env,
2197 const struct btf_type *t,
2198 u32 meta_left)
2199{
2200 const struct btf_enum *enums = btf_type_enum(t);
2201 struct btf *btf = env->btf;
2202 u16 i, nr_enums;
2203 u32 meta_needed;
2204
2205 nr_enums = btf_type_vlen(t);
2206 meta_needed = nr_enums * sizeof(*enums);
2207
2208 if (meta_left < meta_needed) {
2209 btf_verifier_log_basic(env, t,
2210 "meta_left:%u meta_needed:%u",
2211 meta_left, meta_needed);
2212 return -EINVAL;
2213 }
2214
2215 if (btf_type_kflag(t)) {
2216 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2217 return -EINVAL;
2218 }
2219
2220 if (t->size != sizeof(int)) {
2221 btf_verifier_log_type(env, t, "Expected size:%zu",
2222 sizeof(int));
2223 return -EINVAL;
2224 }
2225
2226
2227 if (t->name_off &&
2228 !btf_name_valid_identifier(env->btf, t->name_off)) {
2229 btf_verifier_log_type(env, t, "Invalid name");
2230 return -EINVAL;
2231 }
2232
2233 btf_verifier_log_type(env, t, NULL);
2234
2235 for (i = 0; i < nr_enums; i++) {
2236 if (!btf_name_offset_valid(btf, enums[i].name_off)) {
2237 btf_verifier_log(env, "\tInvalid name_offset:%u",
2238 enums[i].name_off);
2239 return -EINVAL;
2240 }
2241
2242
2243 if (!enums[i].name_off ||
2244 !btf_name_valid_identifier(btf, enums[i].name_off)) {
2245 btf_verifier_log_type(env, t, "Invalid name");
2246 return -EINVAL;
2247 }
2248
2249
2250 btf_verifier_log(env, "\t%s val=%d\n",
2251 __btf_name_by_offset(btf, enums[i].name_off),
2252 enums[i].val);
2253 }
2254
2255 return meta_needed;
2256}
2257
2258static void btf_enum_log(struct btf_verifier_env *env,
2259 const struct btf_type *t)
2260{
2261 btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
2262}
2263
2264static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
2265 u32 type_id, void *data, u8 bits_offset,
2266 struct seq_file *m)
2267{
2268 const struct btf_enum *enums = btf_type_enum(t);
2269 u32 i, nr_enums = btf_type_vlen(t);
2270 int v = *(int *)data;
2271
2272 for (i = 0; i < nr_enums; i++) {
2273 if (v == enums[i].val) {
2274 seq_printf(m, "%s",
2275 __btf_name_by_offset(btf,
2276 enums[i].name_off));
2277 return;
2278 }
2279 }
2280
2281 seq_printf(m, "%d", v);
2282}
2283
2284static struct btf_kind_operations enum_ops = {
2285 .check_meta = btf_enum_check_meta,
2286 .resolve = btf_df_resolve,
2287 .check_member = btf_enum_check_member,
2288 .check_kflag_member = btf_enum_check_kflag_member,
2289 .log_details = btf_enum_log,
2290 .seq_show = btf_enum_seq_show,
2291};
2292
2293static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
2294 const struct btf_type *t,
2295 u32 meta_left)
2296{
2297 u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param);
2298
2299 if (meta_left < meta_needed) {
2300 btf_verifier_log_basic(env, t,
2301 "meta_left:%u meta_needed:%u",
2302 meta_left, meta_needed);
2303 return -EINVAL;
2304 }
2305
2306 if (t->name_off) {
2307 btf_verifier_log_type(env, t, "Invalid name");
2308 return -EINVAL;
2309 }
2310
2311 if (btf_type_kflag(t)) {
2312 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2313 return -EINVAL;
2314 }
2315
2316 btf_verifier_log_type(env, t, NULL);
2317
2318 return meta_needed;
2319}
2320
2321static void btf_func_proto_log(struct btf_verifier_env *env,
2322 const struct btf_type *t)
2323{
2324 const struct btf_param *args = (const struct btf_param *)(t + 1);
2325 u16 nr_args = btf_type_vlen(t), i;
2326
2327 btf_verifier_log(env, "return=%u args=(", t->type);
2328 if (!nr_args) {
2329 btf_verifier_log(env, "void");
2330 goto done;
2331 }
2332
2333 if (nr_args == 1 && !args[0].type) {
2334
2335 btf_verifier_log(env, "vararg");
2336 goto done;
2337 }
2338
2339 btf_verifier_log(env, "%u %s", args[0].type,
2340 __btf_name_by_offset(env->btf,
2341 args[0].name_off));
2342 for (i = 1; i < nr_args - 1; i++)
2343 btf_verifier_log(env, ", %u %s", args[i].type,
2344 __btf_name_by_offset(env->btf,
2345 args[i].name_off));
2346
2347 if (nr_args > 1) {
2348 const struct btf_param *last_arg = &args[nr_args - 1];
2349
2350 if (last_arg->type)
2351 btf_verifier_log(env, ", %u %s", last_arg->type,
2352 __btf_name_by_offset(env->btf,
2353 last_arg->name_off));
2354 else
2355 btf_verifier_log(env, ", vararg");
2356 }
2357
2358done:
2359 btf_verifier_log(env, ")");
2360}
2361
2362static struct btf_kind_operations func_proto_ops = {
2363 .check_meta = btf_func_proto_check_meta,
2364 .resolve = btf_df_resolve,
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374 .check_member = btf_df_check_member,
2375 .check_kflag_member = btf_df_check_kflag_member,
2376 .log_details = btf_func_proto_log,
2377 .seq_show = btf_df_seq_show,
2378};
2379
2380static s32 btf_func_check_meta(struct btf_verifier_env *env,
2381 const struct btf_type *t,
2382 u32 meta_left)
2383{
2384 if (!t->name_off ||
2385 !btf_name_valid_identifier(env->btf, t->name_off)) {
2386 btf_verifier_log_type(env, t, "Invalid name");
2387 return -EINVAL;
2388 }
2389
2390 if (btf_type_vlen(t)) {
2391 btf_verifier_log_type(env, t, "vlen != 0");
2392 return -EINVAL;
2393 }
2394
2395 if (btf_type_kflag(t)) {
2396 btf_verifier_log_type(env, t, "Invalid btf_info kind_flag");
2397 return -EINVAL;
2398 }
2399
2400 btf_verifier_log_type(env, t, NULL);
2401
2402 return 0;
2403}
2404
2405static struct btf_kind_operations func_ops = {
2406 .check_meta = btf_func_check_meta,
2407 .resolve = btf_df_resolve,
2408 .check_member = btf_df_check_member,
2409 .check_kflag_member = btf_df_check_kflag_member,
2410 .log_details = btf_ref_type_log,
2411 .seq_show = btf_df_seq_show,
2412};
2413
2414static int btf_func_proto_check(struct btf_verifier_env *env,
2415 const struct btf_type *t)
2416{
2417 const struct btf_type *ret_type;
2418 const struct btf_param *args;
2419 const struct btf *btf;
2420 u16 nr_args, i;
2421 int err;
2422
2423 btf = env->btf;
2424 args = (const struct btf_param *)(t + 1);
2425 nr_args = btf_type_vlen(t);
2426
2427
2428 if (t->type) {
2429 u32 ret_type_id = t->type;
2430
2431 ret_type = btf_type_by_id(btf, ret_type_id);
2432 if (!ret_type) {
2433 btf_verifier_log_type(env, t, "Invalid return type");
2434 return -EINVAL;
2435 }
2436
2437 if (btf_type_needs_resolve(ret_type) &&
2438 !env_type_is_resolved(env, ret_type_id)) {
2439 err = btf_resolve(env, ret_type, ret_type_id);
2440 if (err)
2441 return err;
2442 }
2443
2444
2445 if (!btf_type_id_size(btf, &ret_type_id, NULL)) {
2446 btf_verifier_log_type(env, t, "Invalid return type");
2447 return -EINVAL;
2448 }
2449 }
2450
2451 if (!nr_args)
2452 return 0;
2453
2454
2455 if (!args[nr_args - 1].type) {
2456 if (args[nr_args - 1].name_off) {
2457 btf_verifier_log_type(env, t, "Invalid arg#%u",
2458 nr_args);
2459 return -EINVAL;
2460 }
2461 nr_args--;
2462 }
2463
2464 err = 0;
2465 for (i = 0; i < nr_args; i++) {
2466 const struct btf_type *arg_type;
2467 u32 arg_type_id;
2468
2469 arg_type_id = args[i].type;
2470 arg_type = btf_type_by_id(btf, arg_type_id);
2471 if (!arg_type) {
2472 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2473 err = -EINVAL;
2474 break;
2475 }
2476
2477 if (args[i].name_off &&
2478 (!btf_name_offset_valid(btf, args[i].name_off) ||
2479 !btf_name_valid_identifier(btf, args[i].name_off))) {
2480 btf_verifier_log_type(env, t,
2481 "Invalid arg#%u", i + 1);
2482 err = -EINVAL;
2483 break;
2484 }
2485
2486 if (btf_type_needs_resolve(arg_type) &&
2487 !env_type_is_resolved(env, arg_type_id)) {
2488 err = btf_resolve(env, arg_type, arg_type_id);
2489 if (err)
2490 break;
2491 }
2492
2493 if (!btf_type_id_size(btf, &arg_type_id, NULL)) {
2494 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2495 err = -EINVAL;
2496 break;
2497 }
2498 }
2499
2500 return err;
2501}
2502
2503static int btf_func_check(struct btf_verifier_env *env,
2504 const struct btf_type *t)
2505{
2506 const struct btf_type *proto_type;
2507 const struct btf_param *args;
2508 const struct btf *btf;
2509 u16 nr_args, i;
2510
2511 btf = env->btf;
2512 proto_type = btf_type_by_id(btf, t->type);
2513
2514 if (!proto_type || !btf_type_is_func_proto(proto_type)) {
2515 btf_verifier_log_type(env, t, "Invalid type_id");
2516 return -EINVAL;
2517 }
2518
2519 args = (const struct btf_param *)(proto_type + 1);
2520 nr_args = btf_type_vlen(proto_type);
2521 for (i = 0; i < nr_args; i++) {
2522 if (!args[i].name_off && args[i].type) {
2523 btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1);
2524 return -EINVAL;
2525 }
2526 }
2527
2528 return 0;
2529}
2530
2531static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
2532 [BTF_KIND_INT] = &int_ops,
2533 [BTF_KIND_PTR] = &ptr_ops,
2534 [BTF_KIND_ARRAY] = &array_ops,
2535 [BTF_KIND_STRUCT] = &struct_ops,
2536 [BTF_KIND_UNION] = &struct_ops,
2537 [BTF_KIND_ENUM] = &enum_ops,
2538 [BTF_KIND_FWD] = &fwd_ops,
2539 [BTF_KIND_TYPEDEF] = &modifier_ops,
2540 [BTF_KIND_VOLATILE] = &modifier_ops,
2541 [BTF_KIND_CONST] = &modifier_ops,
2542 [BTF_KIND_RESTRICT] = &modifier_ops,
2543 [BTF_KIND_FUNC] = &func_ops,
2544 [BTF_KIND_FUNC_PROTO] = &func_proto_ops,
2545};
2546
2547static s32 btf_check_meta(struct btf_verifier_env *env,
2548 const struct btf_type *t,
2549 u32 meta_left)
2550{
2551 u32 saved_meta_left = meta_left;
2552 s32 var_meta_size;
2553
2554 if (meta_left < sizeof(*t)) {
2555 btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu",
2556 env->log_type_id, meta_left, sizeof(*t));
2557 return -EINVAL;
2558 }
2559 meta_left -= sizeof(*t);
2560
2561 if (t->info & ~BTF_INFO_MASK) {
2562 btf_verifier_log(env, "[%u] Invalid btf_info:%x",
2563 env->log_type_id, t->info);
2564 return -EINVAL;
2565 }
2566
2567 if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX ||
2568 BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) {
2569 btf_verifier_log(env, "[%u] Invalid kind:%u",
2570 env->log_type_id, BTF_INFO_KIND(t->info));
2571 return -EINVAL;
2572 }
2573
2574 if (!btf_name_offset_valid(env->btf, t->name_off)) {
2575 btf_verifier_log(env, "[%u] Invalid name_offset:%u",
2576 env->log_type_id, t->name_off);
2577 return -EINVAL;
2578 }
2579
2580 var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left);
2581 if (var_meta_size < 0)
2582 return var_meta_size;
2583
2584 meta_left -= var_meta_size;
2585
2586 return saved_meta_left - meta_left;
2587}
2588
2589static int btf_check_all_metas(struct btf_verifier_env *env)
2590{
2591 struct btf *btf = env->btf;
2592 struct btf_header *hdr;
2593 void *cur, *end;
2594
2595 hdr = &btf->hdr;
2596 cur = btf->nohdr_data + hdr->type_off;
2597 end = cur + hdr->type_len;
2598
2599 env->log_type_id = 1;
2600 while (cur < end) {
2601 struct btf_type *t = cur;
2602 s32 meta_size;
2603
2604 meta_size = btf_check_meta(env, t, end - cur);
2605 if (meta_size < 0)
2606 return meta_size;
2607
2608 btf_add_type(env, t);
2609 cur += meta_size;
2610 env->log_type_id++;
2611 }
2612
2613 return 0;
2614}
2615
2616static bool btf_resolve_valid(struct btf_verifier_env *env,
2617 const struct btf_type *t,
2618 u32 type_id)
2619{
2620 struct btf *btf = env->btf;
2621
2622 if (!env_type_is_resolved(env, type_id))
2623 return false;
2624
2625 if (btf_type_is_struct(t))
2626 return !btf->resolved_ids[type_id] &&
2627 !btf->resolved_sizes[type_id];
2628
2629 if (btf_type_is_modifier(t) || btf_type_is_ptr(t)) {
2630 t = btf_type_id_resolve(btf, &type_id);
2631 return t && !btf_type_is_modifier(t);
2632 }
2633
2634 if (btf_type_is_array(t)) {
2635 const struct btf_array *array = btf_type_array(t);
2636 const struct btf_type *elem_type;
2637 u32 elem_type_id = array->type;
2638 u32 elem_size;
2639
2640 elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
2641 return elem_type && !btf_type_is_modifier(elem_type) &&
2642 (array->nelems * elem_size ==
2643 btf->resolved_sizes[type_id]);
2644 }
2645
2646 return false;
2647}
2648
2649static int btf_resolve(struct btf_verifier_env *env,
2650 const struct btf_type *t, u32 type_id)
2651{
2652 u32 save_log_type_id = env->log_type_id;
2653 const struct resolve_vertex *v;
2654 int err = 0;
2655
2656 env->resolve_mode = RESOLVE_TBD;
2657 env_stack_push(env, t, type_id);
2658 while (!err && (v = env_stack_peak(env))) {
2659 env->log_type_id = v->type_id;
2660 err = btf_type_ops(v->t)->resolve(env, v);
2661 }
2662
2663 env->log_type_id = type_id;
2664 if (err == -E2BIG) {
2665 btf_verifier_log_type(env, t,
2666 "Exceeded max resolving depth:%u",
2667 MAX_RESOLVE_DEPTH);
2668 } else if (err == -EEXIST) {
2669 btf_verifier_log_type(env, t, "Loop detected");
2670 }
2671
2672
2673 if (!err && !btf_resolve_valid(env, t, type_id)) {
2674 btf_verifier_log_type(env, t, "Invalid resolve state");
2675 err = -EINVAL;
2676 }
2677
2678 env->log_type_id = save_log_type_id;
2679 return err;
2680}
2681
2682static int btf_check_all_types(struct btf_verifier_env *env)
2683{
2684 struct btf *btf = env->btf;
2685 u32 type_id;
2686 int err;
2687
2688 err = env_resolve_init(env);
2689 if (err)
2690 return err;
2691
2692 env->phase++;
2693 for (type_id = 1; type_id <= btf->nr_types; type_id++) {
2694 const struct btf_type *t = btf_type_by_id(btf, type_id);
2695
2696 env->log_type_id = type_id;
2697 if (btf_type_needs_resolve(t) &&
2698 !env_type_is_resolved(env, type_id)) {
2699 err = btf_resolve(env, t, type_id);
2700 if (err)
2701 return err;
2702 }
2703
2704 if (btf_type_is_func_proto(t)) {
2705 err = btf_func_proto_check(env, t);
2706 if (err)
2707 return err;
2708 }
2709
2710 if (btf_type_is_func(t)) {
2711 err = btf_func_check(env, t);
2712 if (err)
2713 return err;
2714 }
2715 }
2716
2717 return 0;
2718}
2719
2720static int btf_parse_type_sec(struct btf_verifier_env *env)
2721{
2722 const struct btf_header *hdr = &env->btf->hdr;
2723 int err;
2724
2725
2726 if (hdr->type_off & (sizeof(u32) - 1)) {
2727 btf_verifier_log(env, "Unaligned type_off");
2728 return -EINVAL;
2729 }
2730
2731 if (!hdr->type_len) {
2732 btf_verifier_log(env, "No type found");
2733 return -EINVAL;
2734 }
2735
2736 err = btf_check_all_metas(env);
2737 if (err)
2738 return err;
2739
2740 return btf_check_all_types(env);
2741}
2742
2743static int btf_parse_str_sec(struct btf_verifier_env *env)
2744{
2745 const struct btf_header *hdr;
2746 struct btf *btf = env->btf;
2747 const char *start, *end;
2748
2749 hdr = &btf->hdr;
2750 start = btf->nohdr_data + hdr->str_off;
2751 end = start + hdr->str_len;
2752
2753 if (end != btf->data + btf->data_size) {
2754 btf_verifier_log(env, "String section is not at the end");
2755 return -EINVAL;
2756 }
2757
2758 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2759 start[0] || end[-1]) {
2760 btf_verifier_log(env, "Invalid string section");
2761 return -EINVAL;
2762 }
2763
2764 btf->strings = start;
2765
2766 return 0;
2767}
2768
2769static const size_t btf_sec_info_offset[] = {
2770 offsetof(struct btf_header, type_off),
2771 offsetof(struct btf_header, str_off),
2772};
2773
2774static int btf_sec_info_cmp(const void *a, const void *b)
2775{
2776 const struct btf_sec_info *x = a;
2777 const struct btf_sec_info *y = b;
2778
2779 return (int)(x->off - y->off) ? : (int)(x->len - y->len);
2780}
2781
2782static int btf_check_sec_info(struct btf_verifier_env *env,
2783 u32 btf_data_size)
2784{
2785 struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)];
2786 u32 total, expected_total, i;
2787 const struct btf_header *hdr;
2788 const struct btf *btf;
2789
2790 btf = env->btf;
2791 hdr = &btf->hdr;
2792
2793
2794 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++)
2795 secs[i] = *(struct btf_sec_info *)((void *)hdr +
2796 btf_sec_info_offset[i]);
2797
2798 sort(secs, ARRAY_SIZE(btf_sec_info_offset),
2799 sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL);
2800
2801
2802 total = 0;
2803 expected_total = btf_data_size - hdr->hdr_len;
2804 for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) {
2805 if (expected_total < secs[i].off) {
2806 btf_verifier_log(env, "Invalid section offset");
2807 return -EINVAL;
2808 }
2809 if (total < secs[i].off) {
2810
2811 btf_verifier_log(env, "Unsupported section found");
2812 return -EINVAL;
2813 }
2814 if (total > secs[i].off) {
2815 btf_verifier_log(env, "Section overlap found");
2816 return -EINVAL;
2817 }
2818 if (expected_total - total < secs[i].len) {
2819 btf_verifier_log(env,
2820 "Total section length too long");
2821 return -EINVAL;
2822 }
2823 total += secs[i].len;
2824 }
2825
2826
2827 if (expected_total != total) {
2828 btf_verifier_log(env, "Unsupported section found");
2829 return -EINVAL;
2830 }
2831
2832 return 0;
2833}
2834
2835static int btf_parse_hdr(struct btf_verifier_env *env)
2836{
2837 u32 hdr_len, hdr_copy, btf_data_size;
2838 const struct btf_header *hdr;
2839 struct btf *btf;
2840 int err;
2841
2842 btf = env->btf;
2843 btf_data_size = btf->data_size;
2844
2845 if (btf_data_size <
2846 offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) {
2847 btf_verifier_log(env, "hdr_len not found");
2848 return -EINVAL;
2849 }
2850
2851 hdr = btf->data;
2852 hdr_len = hdr->hdr_len;
2853 if (btf_data_size < hdr_len) {
2854 btf_verifier_log(env, "btf_header not found");
2855 return -EINVAL;
2856 }
2857
2858
2859 if (hdr_len > sizeof(btf->hdr)) {
2860 u8 *expected_zero = btf->data + sizeof(btf->hdr);
2861 u8 *end = btf->data + hdr_len;
2862
2863 for (; expected_zero < end; expected_zero++) {
2864 if (*expected_zero) {
2865 btf_verifier_log(env, "Unsupported btf_header");
2866 return -E2BIG;
2867 }
2868 }
2869 }
2870
2871 hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr));
2872 memcpy(&btf->hdr, btf->data, hdr_copy);
2873
2874 hdr = &btf->hdr;
2875
2876 btf_verifier_log_hdr(env, btf_data_size);
2877
2878 if (hdr->magic != BTF_MAGIC) {
2879 btf_verifier_log(env, "Invalid magic");
2880 return -EINVAL;
2881 }
2882
2883 if (hdr->version != BTF_VERSION) {
2884 btf_verifier_log(env, "Unsupported version");
2885 return -ENOTSUPP;
2886 }
2887
2888 if (hdr->flags) {
2889 btf_verifier_log(env, "Unsupported flags");
2890 return -ENOTSUPP;
2891 }
2892
2893 if (btf_data_size == hdr->hdr_len) {
2894 btf_verifier_log(env, "No data");
2895 return -EINVAL;
2896 }
2897
2898 err = btf_check_sec_info(env, btf_data_size);
2899 if (err)
2900 return err;
2901
2902 return 0;
2903}
2904
2905static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
2906 u32 log_level, char __user *log_ubuf, u32 log_size)
2907{
2908 struct btf_verifier_env *env = NULL;
2909 struct bpf_verifier_log *log;
2910 struct btf *btf = NULL;
2911 u8 *data;
2912 int err;
2913
2914 if (btf_data_size > BTF_MAX_SIZE)
2915 return ERR_PTR(-E2BIG);
2916
2917 env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
2918 if (!env)
2919 return ERR_PTR(-ENOMEM);
2920
2921 log = &env->log;
2922 if (log_level || log_ubuf || log_size) {
2923
2924
2925
2926 log->level = log_level;
2927 log->ubuf = log_ubuf;
2928 log->len_total = log_size;
2929
2930
2931 if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 ||
2932 !log->level || !log->ubuf) {
2933 err = -EINVAL;
2934 goto errout;
2935 }
2936 }
2937
2938 btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
2939 if (!btf) {
2940 err = -ENOMEM;
2941 goto errout;
2942 }
2943 env->btf = btf;
2944
2945 data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
2946 if (!data) {
2947 err = -ENOMEM;
2948 goto errout;
2949 }
2950
2951 btf->data = data;
2952 btf->data_size = btf_data_size;
2953
2954 if (copy_from_user(data, btf_data, btf_data_size)) {
2955 err = -EFAULT;
2956 goto errout;
2957 }
2958
2959 err = btf_parse_hdr(env);
2960 if (err)
2961 goto errout;
2962
2963 btf->nohdr_data = btf->data + btf->hdr.hdr_len;
2964
2965 err = btf_parse_str_sec(env);
2966 if (err)
2967 goto errout;
2968
2969 err = btf_parse_type_sec(env);
2970 if (err)
2971 goto errout;
2972
2973 if (log->level && bpf_verifier_log_full(log)) {
2974 err = -ENOSPC;
2975 goto errout;
2976 }
2977
2978 btf_verifier_env_free(env);
2979 refcount_set(&btf->refcnt, 1);
2980 return btf;
2981
2982errout:
2983 btf_verifier_env_free(env);
2984 if (btf)
2985 btf_free(btf);
2986 return ERR_PTR(err);
2987}
2988
2989void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
2990 struct seq_file *m)
2991{
2992 const struct btf_type *t = btf_type_by_id(btf, type_id);
2993
2994 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
2995}
2996
2997static int btf_release(struct inode *inode, struct file *filp)
2998{
2999 btf_put(filp->private_data);
3000 return 0;
3001}
3002
3003const struct file_operations btf_fops = {
3004 .release = btf_release,
3005};
3006
3007static int __btf_new_fd(struct btf *btf)
3008{
3009 return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
3010}
3011
3012int btf_new_fd(const union bpf_attr *attr)
3013{
3014 struct btf *btf;
3015 int ret;
3016
3017 btf = btf_parse(u64_to_user_ptr(attr->btf),
3018 attr->btf_size, attr->btf_log_level,
3019 u64_to_user_ptr(attr->btf_log_buf),
3020 attr->btf_log_size);
3021 if (IS_ERR(btf))
3022 return PTR_ERR(btf);
3023
3024 ret = btf_alloc_id(btf);
3025 if (ret) {
3026 btf_free(btf);
3027 return ret;
3028 }
3029
3030
3031
3032
3033
3034
3035
3036 ret = __btf_new_fd(btf);
3037 if (ret < 0)
3038 btf_put(btf);
3039
3040 return ret;
3041}
3042
3043struct btf *btf_get_by_fd(int fd)
3044{
3045 struct btf *btf;
3046 struct fd f;
3047
3048 f = fdget(fd);
3049
3050 if (!f.file)
3051 return ERR_PTR(-EBADF);
3052
3053 if (f.file->f_op != &btf_fops) {
3054 fdput(f);
3055 return ERR_PTR(-EINVAL);
3056 }
3057
3058 btf = f.file->private_data;
3059 refcount_inc(&btf->refcnt);
3060 fdput(f);
3061
3062 return btf;
3063}
3064
3065int btf_get_info_by_fd(const struct btf *btf,
3066 const union bpf_attr *attr,
3067 union bpf_attr __user *uattr)
3068{
3069 struct bpf_btf_info __user *uinfo;
3070 struct bpf_btf_info info = {};
3071 u32 info_copy, btf_copy;
3072 void __user *ubtf;
3073 u32 uinfo_len;
3074
3075 uinfo = u64_to_user_ptr(attr->info.info);
3076 uinfo_len = attr->info.info_len;
3077
3078 info_copy = min_t(u32, uinfo_len, sizeof(info));
3079 if (copy_from_user(&info, uinfo, info_copy))
3080 return -EFAULT;
3081
3082 info.id = btf->id;
3083 ubtf = u64_to_user_ptr(info.btf);
3084 btf_copy = min_t(u32, btf->data_size, info.btf_size);
3085 if (copy_to_user(ubtf, btf->data, btf_copy))
3086 return -EFAULT;
3087 info.btf_size = btf->data_size;
3088
3089 if (copy_to_user(uinfo, &info, info_copy) ||
3090 put_user(info_copy, &uattr->info.info_len))
3091 return -EFAULT;
3092
3093 return 0;
3094}
3095
3096int btf_get_fd_by_id(u32 id)
3097{
3098 struct btf *btf;
3099 int fd;
3100
3101 rcu_read_lock();
3102 btf = idr_find(&btf_idr, id);
3103 if (!btf || !refcount_inc_not_zero(&btf->refcnt))
3104 btf = ERR_PTR(-ENOENT);
3105 rcu_read_unlock();
3106
3107 if (IS_ERR(btf))
3108 return PTR_ERR(btf);
3109
3110 fd = __btf_new_fd(btf);
3111 if (fd < 0)
3112 btf_put(btf);
3113
3114 return fd;
3115}
3116
3117u32 btf_id(const struct btf *btf)
3118{
3119 return btf->id;
3120}
3121