1
2
3
4
5
6
7
8
9
10
11
12
13#include <endian.h>
14#include <asm/types.h>
15#include <linux/types.h>
16#include <stdint.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <unistd.h>
20#include <errno.h>
21#include <string.h>
22#include <stddef.h>
23#include <stdbool.h>
24#include <sched.h>
25#include <limits.h>
26#include <assert.h>
27
28#include <sys/capability.h>
29
30#include <linux/unistd.h>
31#include <linux/filter.h>
32#include <linux/bpf_perf_event.h>
33#include <linux/bpf.h>
34#include <linux/if_ether.h>
35#include <linux/btf.h>
36
37#include <bpf/bpf.h>
38#include <bpf/libbpf.h>
39
40#ifdef HAVE_GENHDR
41# include "autoconf.h"
42#else
43# if defined(__i386) || defined(__x86_64) || defined(__s390x__) || defined(__aarch64__)
44# define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1
45# endif
46#endif
47#include "bpf_rlimit.h"
48#include "bpf_rand.h"
49#include "bpf_util.h"
50#include "test_btf.h"
51#include "../../../include/linux/filter.h"
52
53#define MAX_INSNS BPF_MAXINSNS
54#define MAX_TEST_INSNS 1000000
55#define MAX_FIXUPS 8
56#define MAX_NR_MAPS 20
57#define MAX_TEST_RUNS 8
58#define POINTER_VALUE 0xcafe4all
59#define TEST_DATA_LEN 64
60
61#define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0)
62#define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1)
63
64#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
65static bool unpriv_disabled = false;
66static int skips;
67static bool verbose = false;
68
69struct bpf_test {
70 const char *descr;
71 struct bpf_insn insns[MAX_INSNS];
72 struct bpf_insn *fill_insns;
73 int fixup_map_hash_8b[MAX_FIXUPS];
74 int fixup_map_hash_48b[MAX_FIXUPS];
75 int fixup_map_hash_16b[MAX_FIXUPS];
76 int fixup_map_array_48b[MAX_FIXUPS];
77 int fixup_map_sockmap[MAX_FIXUPS];
78 int fixup_map_sockhash[MAX_FIXUPS];
79 int fixup_map_xskmap[MAX_FIXUPS];
80 int fixup_map_stacktrace[MAX_FIXUPS];
81 int fixup_prog1[MAX_FIXUPS];
82 int fixup_prog2[MAX_FIXUPS];
83 int fixup_map_in_map[MAX_FIXUPS];
84 int fixup_cgroup_storage[MAX_FIXUPS];
85 int fixup_percpu_cgroup_storage[MAX_FIXUPS];
86 int fixup_map_spin_lock[MAX_FIXUPS];
87 int fixup_map_array_ro[MAX_FIXUPS];
88 int fixup_map_array_wo[MAX_FIXUPS];
89 int fixup_map_array_small[MAX_FIXUPS];
90 int fixup_sk_storage_map[MAX_FIXUPS];
91 int fixup_map_event_output[MAX_FIXUPS];
92 int fixup_map_reuseport_array[MAX_FIXUPS];
93 const char *errstr;
94 const char *errstr_unpriv;
95 uint32_t insn_processed;
96 int prog_len;
97 enum {
98 UNDEF,
99 ACCEPT,
100 REJECT,
101 VERBOSE_ACCEPT,
102 } result, result_unpriv;
103 enum bpf_prog_type prog_type;
104 uint8_t flags;
105 void (*fill_helper)(struct bpf_test *self);
106 uint8_t runs;
107#define bpf_testdata_struct_t \
108 struct { \
109 uint32_t retval, retval_unpriv; \
110 union { \
111 __u8 data[TEST_DATA_LEN]; \
112 __u64 data64[TEST_DATA_LEN / 8]; \
113 }; \
114 }
115 union {
116 bpf_testdata_struct_t;
117 bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
118 };
119 enum bpf_attach_type expected_attach_type;
120};
121
122
123
124
125#define MAX_ENTRIES 11
126
127struct test_val {
128 unsigned int index;
129 int foo[MAX_ENTRIES];
130};
131
132struct other_val {
133 long long foo;
134 long long bar;
135};
136
137static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
138{
139
140#define PUSH_CNT 51
141
142 unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
143 struct bpf_insn *insn = self->fill_insns;
144 int i = 0, j, k = 0;
145
146 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
147loop:
148 for (j = 0; j < PUSH_CNT; j++) {
149 insn[i++] = BPF_LD_ABS(BPF_B, 0);
150
151 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
152 i++;
153 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
154 insn[i++] = BPF_MOV64_IMM(BPF_REG_2, 1);
155 insn[i++] = BPF_MOV64_IMM(BPF_REG_3, 2);
156 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
157 BPF_FUNC_skb_vlan_push),
158 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
159 i++;
160 }
161
162 for (j = 0; j < PUSH_CNT; j++) {
163 insn[i++] = BPF_LD_ABS(BPF_B, 0);
164 insn[i] = BPF_JMP32_IMM(BPF_JNE, BPF_REG_0, 0x34, len - i - 3);
165 i++;
166 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
167 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
168 BPF_FUNC_skb_vlan_pop),
169 insn[i] = BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, len - i - 3);
170 i++;
171 }
172 if (++k < 5)
173 goto loop;
174
175 for (; i < len - 3; i++)
176 insn[i] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0xbef);
177 insn[len - 3] = BPF_JMP_A(1);
178
179 insn[len - 2] = BPF_MOV32_IMM(BPF_REG_0, 0);
180 insn[len - 1] = BPF_EXIT_INSN();
181 self->prog_len = len;
182}
183
184static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
185{
186 struct bpf_insn *insn = self->fill_insns;
187
188
189
190
191
192
193 unsigned int len = (1 << 15) / 7;
194 int i = 0;
195
196 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
197 insn[i++] = BPF_LD_ABS(BPF_B, 0);
198 insn[i] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 10, len - i - 2);
199 i++;
200 while (i < len - 1)
201 insn[i++] = BPF_LD_ABS(BPF_B, 1);
202 insn[i] = BPF_EXIT_INSN();
203 self->prog_len = i + 1;
204}
205
206static void bpf_fill_rand_ld_dw(struct bpf_test *self)
207{
208 struct bpf_insn *insn = self->fill_insns;
209 uint64_t res = 0;
210 int i = 0;
211
212 insn[i++] = BPF_MOV32_IMM(BPF_REG_0, 0);
213 while (i < self->retval) {
214 uint64_t val = bpf_semi_rand_get();
215 struct bpf_insn tmp[2] = { BPF_LD_IMM64(BPF_REG_1, val) };
216
217 res ^= val;
218 insn[i++] = tmp[0];
219 insn[i++] = tmp[1];
220 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
221 }
222 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_0);
223 insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
224 insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
225 insn[i] = BPF_EXIT_INSN();
226 self->prog_len = i + 1;
227 res ^= (res >> 32);
228 self->retval = (uint32_t)res;
229}
230
231#define MAX_JMP_SEQ 8192
232
233
234static void bpf_fill_scale1(struct bpf_test *self)
235{
236 struct bpf_insn *insn = self->fill_insns;
237 int i = 0, k = 0;
238
239 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
240
241 while (k++ < MAX_JMP_SEQ) {
242 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
243 BPF_FUNC_get_prandom_u32);
244 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
245 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
246 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
247 -8 * (k % 64 + 1));
248 }
249
250
251
252 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
253 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
254 insn[i] = BPF_EXIT_INSN();
255 self->prog_len = i + 1;
256 self->retval = 42;
257}
258
259
260static void bpf_fill_scale2(struct bpf_test *self)
261{
262 struct bpf_insn *insn = self->fill_insns;
263 int i = 0, k = 0;
264
265#define FUNC_NEST 7
266 for (k = 0; k < FUNC_NEST; k++) {
267 insn[i++] = BPF_CALL_REL(1);
268 insn[i++] = BPF_EXIT_INSN();
269 }
270 insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
271
272 k = 0;
273 while (k++ < MAX_JMP_SEQ) {
274 insn[i++] = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
275 BPF_FUNC_get_prandom_u32);
276 insn[i++] = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, bpf_semi_rand_get(), 2);
277 insn[i++] = BPF_MOV64_REG(BPF_REG_1, BPF_REG_10);
278 insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
279 -8 * (k % (64 - 4 * FUNC_NEST) + 1));
280 }
281 while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
282 insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
283 insn[i] = BPF_EXIT_INSN();
284 self->prog_len = i + 1;
285 self->retval = 42;
286}
287
288static void bpf_fill_scale(struct bpf_test *self)
289{
290 switch (self->retval) {
291 case 1:
292 return bpf_fill_scale1(self);
293 case 2:
294 return bpf_fill_scale2(self);
295 default:
296 self->prog_len = 0;
297 break;
298 }
299}
300
301
302#define BPF_SK_LOOKUP(func) \
303 \
304 BPF_MOV64_IMM(BPF_REG_2, 0), \
305 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -8), \
306 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -16), \
307 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -24), \
308 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -32), \
309 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -40), \
310 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -48), \
311 \
312 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), \
313 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -48), \
314 BPF_MOV64_IMM(BPF_REG_3, sizeof(struct bpf_sock_tuple)), \
315 BPF_MOV64_IMM(BPF_REG_4, 0), \
316 BPF_MOV64_IMM(BPF_REG_5, 0), \
317 BPF_EMIT_CALL(BPF_FUNC_ ## func)
318
319
320
321
322
323#define BPF_DIRECT_PKT_R2 \
324 BPF_MOV64_IMM(BPF_REG_0, 0), \
325 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
326 offsetof(struct __sk_buff, data)), \
327 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
328 offsetof(struct __sk_buff, data_end)), \
329 BPF_MOV64_REG(BPF_REG_4, BPF_REG_2), \
330 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 8), \
331 BPF_JMP_REG(BPF_JLE, BPF_REG_4, BPF_REG_3, 1), \
332 BPF_EXIT_INSN()
333
334
335
336
337#define BPF_RAND_UEXT_R7 \
338 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
339 BPF_FUNC_get_prandom_u32), \
340 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
341 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 33), \
342 BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 33)
343
344
345
346
347#define BPF_RAND_SEXT_R7 \
348 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, \
349 BPF_FUNC_get_prandom_u32), \
350 BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), \
351 BPF_ALU64_IMM(BPF_OR, BPF_REG_7, 0x80000000), \
352 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 32), \
353 BPF_ALU64_IMM(BPF_ARSH, BPF_REG_7, 32)
354
355static struct bpf_test tests[] = {
356#define FILL_ARRAY
357#include <verifier/tests.h>
358#undef FILL_ARRAY
359};
360
361static int probe_filter_length(const struct bpf_insn *fp)
362{
363 int len;
364
365 for (len = MAX_INSNS - 1; len > 0; --len)
366 if (fp[len].code != 0 || fp[len].imm != 0)
367 break;
368 return len + 1;
369}
370
371static bool skip_unsupported_map(enum bpf_map_type map_type)
372{
373 if (!bpf_probe_map_type(map_type, 0)) {
374 printf("SKIP (unsupported map type %d)\n", map_type);
375 skips++;
376 return true;
377 }
378 return false;
379}
380
381static int __create_map(uint32_t type, uint32_t size_key,
382 uint32_t size_value, uint32_t max_elem,
383 uint32_t extra_flags)
384{
385 int fd;
386
387 fd = bpf_create_map(type, size_key, size_value, max_elem,
388 (type == BPF_MAP_TYPE_HASH ?
389 BPF_F_NO_PREALLOC : 0) | extra_flags);
390 if (fd < 0) {
391 if (skip_unsupported_map(type))
392 return -1;
393 printf("Failed to create hash map '%s'!\n", strerror(errno));
394 }
395
396 return fd;
397}
398
399static int create_map(uint32_t type, uint32_t size_key,
400 uint32_t size_value, uint32_t max_elem)
401{
402 return __create_map(type, size_key, size_value, max_elem, 0);
403}
404
405static void update_map(int fd, int index)
406{
407 struct test_val value = {
408 .index = (6 + 1) * sizeof(int),
409 .foo[6] = 0xabcdef12,
410 };
411
412 assert(!bpf_map_update_elem(fd, &index, &value, 0));
413}
414
415static int create_prog_dummy_simple(enum bpf_prog_type prog_type, int ret)
416{
417 struct bpf_insn prog[] = {
418 BPF_MOV64_IMM(BPF_REG_0, ret),
419 BPF_EXIT_INSN(),
420 };
421
422 return bpf_load_program(prog_type, prog,
423 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
424}
425
426static int create_prog_dummy_loop(enum bpf_prog_type prog_type, int mfd,
427 int idx, int ret)
428{
429 struct bpf_insn prog[] = {
430 BPF_MOV64_IMM(BPF_REG_3, idx),
431 BPF_LD_MAP_FD(BPF_REG_2, mfd),
432 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
433 BPF_FUNC_tail_call),
434 BPF_MOV64_IMM(BPF_REG_0, ret),
435 BPF_EXIT_INSN(),
436 };
437
438 return bpf_load_program(prog_type, prog,
439 ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
440}
441
442static int create_prog_array(enum bpf_prog_type prog_type, uint32_t max_elem,
443 int p1key, int p2key, int p3key)
444{
445 int mfd, p1fd, p2fd, p3fd;
446
447 mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int),
448 sizeof(int), max_elem, 0);
449 if (mfd < 0) {
450 if (skip_unsupported_map(BPF_MAP_TYPE_PROG_ARRAY))
451 return -1;
452 printf("Failed to create prog array '%s'!\n", strerror(errno));
453 return -1;
454 }
455
456 p1fd = create_prog_dummy_simple(prog_type, 42);
457 p2fd = create_prog_dummy_loop(prog_type, mfd, p2key, 41);
458 p3fd = create_prog_dummy_simple(prog_type, 24);
459 if (p1fd < 0 || p2fd < 0 || p3fd < 0)
460 goto err;
461 if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0)
462 goto err;
463 if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0)
464 goto err;
465 if (bpf_map_update_elem(mfd, &p3key, &p3fd, BPF_ANY) < 0) {
466err:
467 close(mfd);
468 mfd = -1;
469 }
470 close(p3fd);
471 close(p2fd);
472 close(p1fd);
473 return mfd;
474}
475
476static int create_map_in_map(void)
477{
478 int inner_map_fd, outer_map_fd;
479
480 inner_map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
481 sizeof(int), 1, 0);
482 if (inner_map_fd < 0) {
483 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY))
484 return -1;
485 printf("Failed to create array '%s'!\n", strerror(errno));
486 return inner_map_fd;
487 }
488
489 outer_map_fd = bpf_create_map_in_map(BPF_MAP_TYPE_ARRAY_OF_MAPS, NULL,
490 sizeof(int), inner_map_fd, 1, 0);
491 if (outer_map_fd < 0) {
492 if (skip_unsupported_map(BPF_MAP_TYPE_ARRAY_OF_MAPS))
493 return -1;
494 printf("Failed to create array of maps '%s'!\n",
495 strerror(errno));
496 }
497
498 close(inner_map_fd);
499
500 return outer_map_fd;
501}
502
503static int create_cgroup_storage(bool percpu)
504{
505 enum bpf_map_type type = percpu ? BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE :
506 BPF_MAP_TYPE_CGROUP_STORAGE;
507 int fd;
508
509 fd = bpf_create_map(type, sizeof(struct bpf_cgroup_storage_key),
510 TEST_DATA_LEN, 0, 0);
511 if (fd < 0) {
512 if (skip_unsupported_map(type))
513 return -1;
514 printf("Failed to create cgroup storage '%s'!\n",
515 strerror(errno));
516 }
517
518 return fd;
519}
520
521
522
523
524
525
526
527
528
529static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l";
530static __u32 btf_raw_types[] = {
531
532 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
533
534 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),
535 BTF_MEMBER_ENC(15, 1, 0),
536
537 BTF_TYPE_ENC(15, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 2), 8),
538 BTF_MEMBER_ENC(19, 1, 0),
539 BTF_MEMBER_ENC(23, 2, 32),
540};
541
542static int load_btf(void)
543{
544 struct btf_header hdr = {
545 .magic = BTF_MAGIC,
546 .version = BTF_VERSION,
547 .hdr_len = sizeof(struct btf_header),
548 .type_len = sizeof(btf_raw_types),
549 .str_off = sizeof(btf_raw_types),
550 .str_len = sizeof(btf_str_sec),
551 };
552 void *ptr, *raw_btf;
553 int btf_fd;
554
555 ptr = raw_btf = malloc(sizeof(hdr) + sizeof(btf_raw_types) +
556 sizeof(btf_str_sec));
557
558 memcpy(ptr, &hdr, sizeof(hdr));
559 ptr += sizeof(hdr);
560 memcpy(ptr, btf_raw_types, hdr.type_len);
561 ptr += hdr.type_len;
562 memcpy(ptr, btf_str_sec, hdr.str_len);
563 ptr += hdr.str_len;
564
565 btf_fd = bpf_load_btf(raw_btf, ptr - raw_btf, 0, 0, 0);
566 free(raw_btf);
567 if (btf_fd < 0)
568 return -1;
569 return btf_fd;
570}
571
572static int create_map_spin_lock(void)
573{
574 struct bpf_create_map_attr attr = {
575 .name = "test_map",
576 .map_type = BPF_MAP_TYPE_ARRAY,
577 .key_size = 4,
578 .value_size = 8,
579 .max_entries = 1,
580 .btf_key_type_id = 1,
581 .btf_value_type_id = 3,
582 };
583 int fd, btf_fd;
584
585 btf_fd = load_btf();
586 if (btf_fd < 0)
587 return -1;
588 attr.btf_fd = btf_fd;
589 fd = bpf_create_map_xattr(&attr);
590 if (fd < 0)
591 printf("Failed to create map with spin_lock\n");
592 return fd;
593}
594
595static int create_sk_storage_map(void)
596{
597 struct bpf_create_map_attr attr = {
598 .name = "test_map",
599 .map_type = BPF_MAP_TYPE_SK_STORAGE,
600 .key_size = 4,
601 .value_size = 8,
602 .max_entries = 0,
603 .map_flags = BPF_F_NO_PREALLOC,
604 .btf_key_type_id = 1,
605 .btf_value_type_id = 3,
606 };
607 int fd, btf_fd;
608
609 btf_fd = load_btf();
610 if (btf_fd < 0)
611 return -1;
612 attr.btf_fd = btf_fd;
613 fd = bpf_create_map_xattr(&attr);
614 close(attr.btf_fd);
615 if (fd < 0)
616 printf("Failed to create sk_storage_map\n");
617 return fd;
618}
619
620static char bpf_vlog[UINT_MAX >> 8];
621
622static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
623 struct bpf_insn *prog, int *map_fds)
624{
625 int *fixup_map_hash_8b = test->fixup_map_hash_8b;
626 int *fixup_map_hash_48b = test->fixup_map_hash_48b;
627 int *fixup_map_hash_16b = test->fixup_map_hash_16b;
628 int *fixup_map_array_48b = test->fixup_map_array_48b;
629 int *fixup_map_sockmap = test->fixup_map_sockmap;
630 int *fixup_map_sockhash = test->fixup_map_sockhash;
631 int *fixup_map_xskmap = test->fixup_map_xskmap;
632 int *fixup_map_stacktrace = test->fixup_map_stacktrace;
633 int *fixup_prog1 = test->fixup_prog1;
634 int *fixup_prog2 = test->fixup_prog2;
635 int *fixup_map_in_map = test->fixup_map_in_map;
636 int *fixup_cgroup_storage = test->fixup_cgroup_storage;
637 int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
638 int *fixup_map_spin_lock = test->fixup_map_spin_lock;
639 int *fixup_map_array_ro = test->fixup_map_array_ro;
640 int *fixup_map_array_wo = test->fixup_map_array_wo;
641 int *fixup_map_array_small = test->fixup_map_array_small;
642 int *fixup_sk_storage_map = test->fixup_sk_storage_map;
643 int *fixup_map_event_output = test->fixup_map_event_output;
644 int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
645
646 if (test->fill_helper) {
647 test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
648 test->fill_helper(test);
649 }
650
651
652
653
654
655 if (*fixup_map_hash_8b) {
656 map_fds[0] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
657 sizeof(long long), 1);
658 do {
659 prog[*fixup_map_hash_8b].imm = map_fds[0];
660 fixup_map_hash_8b++;
661 } while (*fixup_map_hash_8b);
662 }
663
664 if (*fixup_map_hash_48b) {
665 map_fds[1] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
666 sizeof(struct test_val), 1);
667 do {
668 prog[*fixup_map_hash_48b].imm = map_fds[1];
669 fixup_map_hash_48b++;
670 } while (*fixup_map_hash_48b);
671 }
672
673 if (*fixup_map_hash_16b) {
674 map_fds[2] = create_map(BPF_MAP_TYPE_HASH, sizeof(long long),
675 sizeof(struct other_val), 1);
676 do {
677 prog[*fixup_map_hash_16b].imm = map_fds[2];
678 fixup_map_hash_16b++;
679 } while (*fixup_map_hash_16b);
680 }
681
682 if (*fixup_map_array_48b) {
683 map_fds[3] = create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
684 sizeof(struct test_val), 1);
685 update_map(map_fds[3], 0);
686 do {
687 prog[*fixup_map_array_48b].imm = map_fds[3];
688 fixup_map_array_48b++;
689 } while (*fixup_map_array_48b);
690 }
691
692 if (*fixup_prog1) {
693 map_fds[4] = create_prog_array(prog_type, 4, 0, 1, 2);
694 do {
695 prog[*fixup_prog1].imm = map_fds[4];
696 fixup_prog1++;
697 } while (*fixup_prog1);
698 }
699
700 if (*fixup_prog2) {
701 map_fds[5] = create_prog_array(prog_type, 8, 7, 1, 2);
702 do {
703 prog[*fixup_prog2].imm = map_fds[5];
704 fixup_prog2++;
705 } while (*fixup_prog2);
706 }
707
708 if (*fixup_map_in_map) {
709 map_fds[6] = create_map_in_map();
710 do {
711 prog[*fixup_map_in_map].imm = map_fds[6];
712 fixup_map_in_map++;
713 } while (*fixup_map_in_map);
714 }
715
716 if (*fixup_cgroup_storage) {
717 map_fds[7] = create_cgroup_storage(false);
718 do {
719 prog[*fixup_cgroup_storage].imm = map_fds[7];
720 fixup_cgroup_storage++;
721 } while (*fixup_cgroup_storage);
722 }
723
724 if (*fixup_percpu_cgroup_storage) {
725 map_fds[8] = create_cgroup_storage(true);
726 do {
727 prog[*fixup_percpu_cgroup_storage].imm = map_fds[8];
728 fixup_percpu_cgroup_storage++;
729 } while (*fixup_percpu_cgroup_storage);
730 }
731 if (*fixup_map_sockmap) {
732 map_fds[9] = create_map(BPF_MAP_TYPE_SOCKMAP, sizeof(int),
733 sizeof(int), 1);
734 do {
735 prog[*fixup_map_sockmap].imm = map_fds[9];
736 fixup_map_sockmap++;
737 } while (*fixup_map_sockmap);
738 }
739 if (*fixup_map_sockhash) {
740 map_fds[10] = create_map(BPF_MAP_TYPE_SOCKHASH, sizeof(int),
741 sizeof(int), 1);
742 do {
743 prog[*fixup_map_sockhash].imm = map_fds[10];
744 fixup_map_sockhash++;
745 } while (*fixup_map_sockhash);
746 }
747 if (*fixup_map_xskmap) {
748 map_fds[11] = create_map(BPF_MAP_TYPE_XSKMAP, sizeof(int),
749 sizeof(int), 1);
750 do {
751 prog[*fixup_map_xskmap].imm = map_fds[11];
752 fixup_map_xskmap++;
753 } while (*fixup_map_xskmap);
754 }
755 if (*fixup_map_stacktrace) {
756 map_fds[12] = create_map(BPF_MAP_TYPE_STACK_TRACE, sizeof(u32),
757 sizeof(u64), 1);
758 do {
759 prog[*fixup_map_stacktrace].imm = map_fds[12];
760 fixup_map_stacktrace++;
761 } while (*fixup_map_stacktrace);
762 }
763 if (*fixup_map_spin_lock) {
764 map_fds[13] = create_map_spin_lock();
765 do {
766 prog[*fixup_map_spin_lock].imm = map_fds[13];
767 fixup_map_spin_lock++;
768 } while (*fixup_map_spin_lock);
769 }
770 if (*fixup_map_array_ro) {
771 map_fds[14] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
772 sizeof(struct test_val), 1,
773 BPF_F_RDONLY_PROG);
774 update_map(map_fds[14], 0);
775 do {
776 prog[*fixup_map_array_ro].imm = map_fds[14];
777 fixup_map_array_ro++;
778 } while (*fixup_map_array_ro);
779 }
780 if (*fixup_map_array_wo) {
781 map_fds[15] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
782 sizeof(struct test_val), 1,
783 BPF_F_WRONLY_PROG);
784 update_map(map_fds[15], 0);
785 do {
786 prog[*fixup_map_array_wo].imm = map_fds[15];
787 fixup_map_array_wo++;
788 } while (*fixup_map_array_wo);
789 }
790 if (*fixup_map_array_small) {
791 map_fds[16] = __create_map(BPF_MAP_TYPE_ARRAY, sizeof(int),
792 1, 1, 0);
793 update_map(map_fds[16], 0);
794 do {
795 prog[*fixup_map_array_small].imm = map_fds[16];
796 fixup_map_array_small++;
797 } while (*fixup_map_array_small);
798 }
799 if (*fixup_sk_storage_map) {
800 map_fds[17] = create_sk_storage_map();
801 do {
802 prog[*fixup_sk_storage_map].imm = map_fds[17];
803 fixup_sk_storage_map++;
804 } while (*fixup_sk_storage_map);
805 }
806 if (*fixup_map_event_output) {
807 map_fds[18] = __create_map(BPF_MAP_TYPE_PERF_EVENT_ARRAY,
808 sizeof(int), sizeof(int), 1, 0);
809 do {
810 prog[*fixup_map_event_output].imm = map_fds[18];
811 fixup_map_event_output++;
812 } while (*fixup_map_event_output);
813 }
814 if (*fixup_map_reuseport_array) {
815 map_fds[19] = __create_map(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
816 sizeof(u32), sizeof(u64), 1, 0);
817 do {
818 prog[*fixup_map_reuseport_array].imm = map_fds[19];
819 fixup_map_reuseport_array++;
820 } while (*fixup_map_reuseport_array);
821 }
822}
823
824struct libcap {
825 struct __user_cap_header_struct hdr;
826 struct __user_cap_data_struct data[2];
827};
828
829static int set_admin(bool admin)
830{
831 cap_t caps;
832
833 const cap_value_t cap_net_admin = CAP_NET_ADMIN;
834 const cap_value_t cap_sys_admin = CAP_SYS_ADMIN;
835 struct libcap *cap;
836 int ret = -1;
837
838 caps = cap_get_proc();
839 if (!caps) {
840 perror("cap_get_proc");
841 return -1;
842 }
843 cap = (struct libcap *)caps;
844 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_sys_admin, CAP_CLEAR)) {
845 perror("cap_set_flag clear admin");
846 goto out;
847 }
848 if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_net_admin,
849 admin ? CAP_SET : CAP_CLEAR)) {
850 perror("cap_set_flag set_or_clear net");
851 goto out;
852 }
853
854
855
856 if (admin) {
857 cap->data[1].effective |= 1 << (38 - 32);
858 cap->data[1].effective |= 1 << (39 - 32);
859 } else {
860 cap->data[1].effective &= ~(1 << (38 - 32));
861 cap->data[1].effective &= ~(1 << (39 - 32));
862 }
863 if (cap_set_proc(caps)) {
864 perror("cap_set_proc");
865 goto out;
866 }
867 ret = 0;
868out:
869 if (cap_free(caps))
870 perror("cap_free");
871 return ret;
872}
873
874static int do_prog_test_run(int fd_prog, bool unpriv, uint32_t expected_val,
875 void *data, size_t size_data)
876{
877 __u8 tmp[TEST_DATA_LEN << 2];
878 __u32 size_tmp = sizeof(tmp);
879 uint32_t retval;
880 int err;
881
882 if (unpriv)
883 set_admin(true);
884 err = bpf_prog_test_run(fd_prog, 1, data, size_data,
885 tmp, &size_tmp, &retval, NULL);
886 if (unpriv)
887 set_admin(false);
888 if (err && errno != 524 && errno != EPERM) {
889 printf("Unexpected bpf_prog_test_run error ");
890 return err;
891 }
892 if (!err && retval != expected_val &&
893 expected_val != POINTER_VALUE) {
894 printf("FAIL retval %d != %d ", retval, expected_val);
895 return 1;
896 }
897
898 return 0;
899}
900
901static bool cmp_str_seq(const char *log, const char *exp)
902{
903 char needle[80];
904 const char *p, *q;
905 int len;
906
907 do {
908 p = strchr(exp, '\t');
909 if (!p)
910 p = exp + strlen(exp);
911
912 len = p - exp;
913 if (len >= sizeof(needle) || !len) {
914 printf("FAIL\nTestcase bug\n");
915 return false;
916 }
917 strncpy(needle, exp, len);
918 needle[len] = 0;
919 q = strstr(log, needle);
920 if (!q) {
921 printf("FAIL\nUnexpected verifier log in successful load!\n"
922 "EXP: %s\nRES:\n", needle);
923 return false;
924 }
925 log = q + len;
926 exp = p + 1;
927 } while (*p);
928 return true;
929}
930
931static void do_test_single(struct bpf_test *test, bool unpriv,
932 int *passes, int *errors)
933{
934 int fd_prog, expected_ret, alignment_prevented_execution;
935 int prog_len, prog_type = test->prog_type;
936 struct bpf_insn *prog = test->insns;
937 struct bpf_load_program_attr attr;
938 int run_errs, run_successes;
939 int map_fds[MAX_NR_MAPS];
940 const char *expected_err;
941 int fixup_skips;
942 __u32 pflags;
943 int i, err;
944
945 for (i = 0; i < MAX_NR_MAPS; i++)
946 map_fds[i] = -1;
947
948 if (!prog_type)
949 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
950 fixup_skips = skips;
951 do_test_fixup(test, prog_type, prog, map_fds);
952 if (test->fill_insns) {
953 prog = test->fill_insns;
954 prog_len = test->prog_len;
955 } else {
956 prog_len = probe_filter_length(prog);
957 }
958
959
960
961 if (fixup_skips != skips)
962 return;
963
964 pflags = BPF_F_TEST_RND_HI32;
965 if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
966 pflags |= BPF_F_STRICT_ALIGNMENT;
967 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
968 pflags |= BPF_F_ANY_ALIGNMENT;
969 if (test->flags & ~3)
970 pflags |= test->flags;
971
972 expected_ret = unpriv && test->result_unpriv != UNDEF ?
973 test->result_unpriv : test->result;
974 expected_err = unpriv && test->errstr_unpriv ?
975 test->errstr_unpriv : test->errstr;
976 memset(&attr, 0, sizeof(attr));
977 attr.prog_type = prog_type;
978 attr.expected_attach_type = test->expected_attach_type;
979 attr.insns = prog;
980 attr.insns_cnt = prog_len;
981 attr.license = "GPL";
982 if (verbose)
983 attr.log_level = 1;
984 else if (expected_ret == VERBOSE_ACCEPT)
985 attr.log_level = 2;
986 else
987 attr.log_level = 4;
988 attr.prog_flags = pflags;
989
990 fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
991 if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
992 printf("SKIP (unsupported program type %d)\n", prog_type);
993 skips++;
994 goto close_fds;
995 }
996
997 alignment_prevented_execution = 0;
998
999 if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) {
1000 if (fd_prog < 0) {
1001 printf("FAIL\nFailed to load prog '%s'!\n",
1002 strerror(errno));
1003 goto fail_log;
1004 }
1005#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
1006 if (fd_prog >= 0 &&
1007 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS))
1008 alignment_prevented_execution = 1;
1009#endif
1010 if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) {
1011 goto fail_log;
1012 }
1013 } else {
1014 if (fd_prog >= 0) {
1015 printf("FAIL\nUnexpected success to load!\n");
1016 goto fail_log;
1017 }
1018 if (!expected_err || !strstr(bpf_vlog, expected_err)) {
1019 printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
1020 expected_err, bpf_vlog);
1021 goto fail_log;
1022 }
1023 }
1024
1025 if (test->insn_processed) {
1026 uint32_t insn_processed;
1027 char *proc;
1028
1029 proc = strstr(bpf_vlog, "processed ");
1030 insn_processed = atoi(proc + 10);
1031 if (test->insn_processed != insn_processed) {
1032 printf("FAIL\nUnexpected insn_processed %u vs %u\n",
1033 insn_processed, test->insn_processed);
1034 goto fail_log;
1035 }
1036 }
1037
1038 if (verbose)
1039 printf(", verifier log:\n%s", bpf_vlog);
1040
1041 run_errs = 0;
1042 run_successes = 0;
1043 if (!alignment_prevented_execution && fd_prog >= 0) {
1044 uint32_t expected_val;
1045 int i;
1046
1047 if (!test->runs)
1048 test->runs = 1;
1049
1050 for (i = 0; i < test->runs; i++) {
1051 if (unpriv && test->retvals[i].retval_unpriv)
1052 expected_val = test->retvals[i].retval_unpriv;
1053 else
1054 expected_val = test->retvals[i].retval;
1055
1056 err = do_prog_test_run(fd_prog, unpriv, expected_val,
1057 test->retvals[i].data,
1058 sizeof(test->retvals[i].data));
1059 if (err) {
1060 printf("(run %d/%d) ", i + 1, test->runs);
1061 run_errs++;
1062 } else {
1063 run_successes++;
1064 }
1065 }
1066 }
1067
1068 if (!run_errs) {
1069 (*passes)++;
1070 if (run_successes > 1)
1071 printf("%d cases ", run_successes);
1072 printf("OK");
1073 if (alignment_prevented_execution)
1074 printf(" (NOTE: not executed due to unknown alignment)");
1075 printf("\n");
1076 } else {
1077 printf("\n");
1078 goto fail_log;
1079 }
1080close_fds:
1081 if (test->fill_insns)
1082 free(test->fill_insns);
1083 close(fd_prog);
1084 for (i = 0; i < MAX_NR_MAPS; i++)
1085 close(map_fds[i]);
1086 sched_yield();
1087 return;
1088fail_log:
1089 (*errors)++;
1090 printf("%s", bpf_vlog);
1091 goto close_fds;
1092}
1093
1094static bool is_admin(void)
1095{
1096 cap_flag_value_t net_priv = CAP_CLEAR;
1097 bool perfmon_priv = false;
1098 bool bpf_priv = false;
1099 struct libcap *cap;
1100 cap_t caps;
1101
1102#ifdef CAP_IS_SUPPORTED
1103 if (!CAP_IS_SUPPORTED(CAP_SETFCAP)) {
1104 perror("cap_get_flag");
1105 return false;
1106 }
1107#endif
1108 caps = cap_get_proc();
1109 if (!caps) {
1110 perror("cap_get_proc");
1111 return false;
1112 }
1113 cap = (struct libcap *)caps;
1114 bpf_priv = cap->data[1].effective & (1 << (39 - 32));
1115 perfmon_priv = cap->data[1].effective & (1 << (38 - 32));
1116 if (cap_get_flag(caps, CAP_NET_ADMIN, CAP_EFFECTIVE, &net_priv))
1117 perror("cap_get_flag NET");
1118 if (cap_free(caps))
1119 perror("cap_free");
1120 return bpf_priv && perfmon_priv && net_priv == CAP_SET;
1121}
1122
1123static void get_unpriv_disabled()
1124{
1125 char buf[2];
1126 FILE *fd;
1127
1128 fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r");
1129 if (!fd) {
1130 perror("fopen /proc/sys/"UNPRIV_SYSCTL);
1131 unpriv_disabled = true;
1132 return;
1133 }
1134 if (fgets(buf, 2, fd) == buf && atoi(buf))
1135 unpriv_disabled = true;
1136 fclose(fd);
1137}
1138
1139static bool test_as_unpriv(struct bpf_test *test)
1140{
1141 return !test->prog_type ||
1142 test->prog_type == BPF_PROG_TYPE_SOCKET_FILTER ||
1143 test->prog_type == BPF_PROG_TYPE_CGROUP_SKB;
1144}
1145
1146static int do_test(bool unpriv, unsigned int from, unsigned int to)
1147{
1148 int i, passes = 0, errors = 0;
1149
1150 for (i = from; i < to; i++) {
1151 struct bpf_test *test = &tests[i];
1152
1153
1154
1155
1156 if (test_as_unpriv(test) && unpriv_disabled) {
1157 printf("#%d/u %s SKIP\n", i, test->descr);
1158 skips++;
1159 } else if (test_as_unpriv(test)) {
1160 if (!unpriv)
1161 set_admin(false);
1162 printf("#%d/u %s ", i, test->descr);
1163 do_test_single(test, true, &passes, &errors);
1164 if (!unpriv)
1165 set_admin(true);
1166 }
1167
1168 if (unpriv) {
1169 printf("#%d/p %s SKIP\n", i, test->descr);
1170 skips++;
1171 } else {
1172 printf("#%d/p %s ", i, test->descr);
1173 do_test_single(test, false, &passes, &errors);
1174 }
1175 }
1176
1177 printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes,
1178 skips, errors);
1179 return errors ? EXIT_FAILURE : EXIT_SUCCESS;
1180}
1181
1182int main(int argc, char **argv)
1183{
1184 unsigned int from = 0, to = ARRAY_SIZE(tests);
1185 bool unpriv = !is_admin();
1186 int arg = 1;
1187
1188 if (argc > 1 && strcmp(argv[1], "-v") == 0) {
1189 arg++;
1190 verbose = true;
1191 argc--;
1192 }
1193
1194 if (argc == 3) {
1195 unsigned int l = atoi(argv[arg]);
1196 unsigned int u = atoi(argv[arg + 1]);
1197
1198 if (l < to && u < to) {
1199 from = l;
1200 to = u + 1;
1201 }
1202 } else if (argc == 2) {
1203 unsigned int t = atoi(argv[arg]);
1204
1205 if (t < to) {
1206 from = t;
1207 to = t + 1;
1208 }
1209 }
1210
1211 get_unpriv_disabled();
1212 if (unpriv && unpriv_disabled) {
1213 printf("Cannot run as unprivileged user with sysctl %s.\n",
1214 UNPRIV_SYSCTL);
1215 return EXIT_FAILURE;
1216 }
1217
1218 bpf_semi_rand_init();
1219 return do_test(unpriv, from, to);
1220}
1221