1
2
3
4
5
6
7#include <stdio.h>
8#include <unistd.h>
9#include <errno.h>
10#include <string.h>
11#include <assert.h>
12#include <stdlib.h>
13#include <time.h>
14
15#include <linux/types.h>
16typedef __u16 __sum16;
17#include <arpa/inet.h>
18#include <linux/if_ether.h>
19#include <linux/if_packet.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include <linux/tcp.h>
23#include <linux/filter.h>
24#include <linux/perf_event.h>
25#include <linux/unistd.h>
26
27#include <sys/ioctl.h>
28#include <sys/wait.h>
29#include <sys/types.h>
30#include <fcntl.h>
31
32#include <linux/bpf.h>
33#include <linux/err.h>
34#include <bpf/bpf.h>
35#include <bpf/libbpf.h>
36
37#include "test_iptunnel_common.h"
38#include "bpf_util.h"
39#include "bpf_endian.h"
40#include "bpf_rlimit.h"
41#include "trace_helpers.h"
42
43static int error_cnt, pass_cnt;
44static bool jit_enabled;
45
46#define MAGIC_BYTES 123
47
48
49static struct {
50 struct ethhdr eth;
51 struct iphdr iph;
52 struct tcphdr tcp;
53} __packed pkt_v4 = {
54 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
55 .iph.ihl = 5,
56 .iph.protocol = 6,
57 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
58 .tcp.urg_ptr = 123,
59};
60
61
62static struct {
63 struct ethhdr eth;
64 struct ipv6hdr iph;
65 struct tcphdr tcp;
66} __packed pkt_v6 = {
67 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
68 .iph.nexthdr = 6,
69 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
70 .tcp.urg_ptr = 123,
71};
72
73#define _CHECK(condition, tag, duration, format...) ({ \
74 int __ret = !!(condition); \
75 if (__ret) { \
76 error_cnt++; \
77 printf("%s:FAIL:%s ", __func__, tag); \
78 printf(format); \
79 } else { \
80 pass_cnt++; \
81 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
82 } \
83 __ret; \
84})
85
86#define CHECK(condition, tag, format...) \
87 _CHECK(condition, tag, duration, format)
88#define CHECK_ATTR(condition, tag, format...) \
89 _CHECK(condition, tag, tattr.duration, format)
90
91static int bpf_find_map(const char *test, struct bpf_object *obj,
92 const char *name)
93{
94 struct bpf_map *map;
95
96 map = bpf_object__find_map_by_name(obj, name);
97 if (!map) {
98 printf("%s:FAIL:map '%s' not found\n", test, name);
99 error_cnt++;
100 return -1;
101 }
102 return bpf_map__fd(map);
103}
104
105static void test_pkt_access(void)
106{
107 const char *file = "./test_pkt_access.o";
108 struct bpf_object *obj;
109 __u32 duration, retval;
110 int err, prog_fd;
111
112 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
113 if (err) {
114 error_cnt++;
115 return;
116 }
117
118 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
119 NULL, NULL, &retval, &duration);
120 CHECK(err || retval, "ipv4",
121 "err %d errno %d retval %d duration %d\n",
122 err, errno, retval, duration);
123
124 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
125 NULL, NULL, &retval, &duration);
126 CHECK(err || retval, "ipv6",
127 "err %d errno %d retval %d duration %d\n",
128 err, errno, retval, duration);
129 bpf_object__close(obj);
130}
131
132static void test_prog_run_xattr(void)
133{
134 const char *file = "./test_pkt_access.o";
135 struct bpf_object *obj;
136 char buf[10];
137 int err;
138 struct bpf_prog_test_run_attr tattr = {
139 .repeat = 1,
140 .data_in = &pkt_v4,
141 .data_size_in = sizeof(pkt_v4),
142 .data_out = buf,
143 .data_size_out = 5,
144 };
145
146 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj,
147 &tattr.prog_fd);
148 if (CHECK_ATTR(err, "load", "err %d errno %d\n", err, errno))
149 return;
150
151 memset(buf, 0, sizeof(buf));
152
153 err = bpf_prog_test_run_xattr(&tattr);
154 CHECK_ATTR(err != -1 || errno != ENOSPC || tattr.retval, "run",
155 "err %d errno %d retval %d\n", err, errno, tattr.retval);
156
157 CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out",
158 "incorrect output size, want %lu have %u\n",
159 sizeof(pkt_v4), tattr.data_size_out);
160
161 CHECK_ATTR(buf[5] != 0, "overflow",
162 "BPF_PROG_TEST_RUN ignored size hint\n");
163
164 tattr.data_out = NULL;
165 tattr.data_size_out = 0;
166 errno = 0;
167
168 err = bpf_prog_test_run_xattr(&tattr);
169 CHECK_ATTR(err || errno || tattr.retval, "run_no_output",
170 "err %d errno %d retval %d\n", err, errno, tattr.retval);
171
172 tattr.data_size_out = 1;
173 err = bpf_prog_test_run_xattr(&tattr);
174 CHECK_ATTR(err != -EINVAL, "run_wrong_size_out", "err %d\n", err);
175
176 bpf_object__close(obj);
177}
178
179static void test_xdp(void)
180{
181 struct vip key4 = {.protocol = 6, .family = AF_INET};
182 struct vip key6 = {.protocol = 6, .family = AF_INET6};
183 struct iptnl_info value4 = {.family = AF_INET};
184 struct iptnl_info value6 = {.family = AF_INET6};
185 const char *file = "./test_xdp.o";
186 struct bpf_object *obj;
187 char buf[128];
188 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
189 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
190 __u32 duration, retval, size;
191 int err, prog_fd, map_fd;
192
193 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
194 if (err) {
195 error_cnt++;
196 return;
197 }
198
199 map_fd = bpf_find_map(__func__, obj, "vip2tnl");
200 if (map_fd < 0)
201 goto out;
202 bpf_map_update_elem(map_fd, &key4, &value4, 0);
203 bpf_map_update_elem(map_fd, &key6, &value6, 0);
204
205 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
206 buf, &size, &retval, &duration);
207
208 CHECK(err || retval != XDP_TX || size != 74 ||
209 iph->protocol != IPPROTO_IPIP, "ipv4",
210 "err %d errno %d retval %d size %d\n",
211 err, errno, retval, size);
212
213 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
214 buf, &size, &retval, &duration);
215 CHECK(err || retval != XDP_TX || size != 114 ||
216 iph6->nexthdr != IPPROTO_IPV6, "ipv6",
217 "err %d errno %d retval %d size %d\n",
218 err, errno, retval, size);
219out:
220 bpf_object__close(obj);
221}
222
223static void test_xdp_adjust_tail(void)
224{
225 const char *file = "./test_adjust_tail.o";
226 struct bpf_object *obj;
227 char buf[128];
228 __u32 duration, retval, size;
229 int err, prog_fd;
230
231 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
232 if (err) {
233 error_cnt++;
234 return;
235 }
236
237 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
238 buf, &size, &retval, &duration);
239
240 CHECK(err || retval != XDP_DROP,
241 "ipv4", "err %d errno %d retval %d size %d\n",
242 err, errno, retval, size);
243
244 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
245 buf, &size, &retval, &duration);
246 CHECK(err || retval != XDP_TX || size != 54,
247 "ipv6", "err %d errno %d retval %d size %d\n",
248 err, errno, retval, size);
249 bpf_object__close(obj);
250}
251
252
253
254#define MAGIC_VAL 0x1234
255#define NUM_ITER 100000
256#define VIP_NUM 5
257
258static void test_l4lb(const char *file)
259{
260 unsigned int nr_cpus = bpf_num_possible_cpus();
261 struct vip key = {.protocol = 6};
262 struct vip_meta {
263 __u32 flags;
264 __u32 vip_num;
265 } value = {.vip_num = VIP_NUM};
266 __u32 stats_key = VIP_NUM;
267 struct vip_stats {
268 __u64 bytes;
269 __u64 pkts;
270 } stats[nr_cpus];
271 struct real_definition {
272 union {
273 __be32 dst;
274 __be32 dstv6[4];
275 };
276 __u8 flags;
277 } real_def = {.dst = MAGIC_VAL};
278 __u32 ch_key = 11, real_num = 3;
279 __u32 duration, retval, size;
280 int err, i, prog_fd, map_fd;
281 __u64 bytes = 0, pkts = 0;
282 struct bpf_object *obj;
283 char buf[128];
284 u32 *magic = (u32 *)buf;
285
286 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
287 if (err) {
288 error_cnt++;
289 return;
290 }
291
292 map_fd = bpf_find_map(__func__, obj, "vip_map");
293 if (map_fd < 0)
294 goto out;
295 bpf_map_update_elem(map_fd, &key, &value, 0);
296
297 map_fd = bpf_find_map(__func__, obj, "ch_rings");
298 if (map_fd < 0)
299 goto out;
300 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
301
302 map_fd = bpf_find_map(__func__, obj, "reals");
303 if (map_fd < 0)
304 goto out;
305 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
306
307 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
308 buf, &size, &retval, &duration);
309 CHECK(err || retval != 7 || size != 54 ||
310 *magic != MAGIC_VAL, "ipv4",
311 "err %d errno %d retval %d size %d magic %x\n",
312 err, errno, retval, size, *magic);
313
314 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
315 buf, &size, &retval, &duration);
316 CHECK(err || retval != 7 || size != 74 ||
317 *magic != MAGIC_VAL, "ipv6",
318 "err %d errno %d retval %d size %d magic %x\n",
319 err, errno, retval, size, *magic);
320
321 map_fd = bpf_find_map(__func__, obj, "stats");
322 if (map_fd < 0)
323 goto out;
324 bpf_map_lookup_elem(map_fd, &stats_key, stats);
325 for (i = 0; i < nr_cpus; i++) {
326 bytes += stats[i].bytes;
327 pkts += stats[i].pkts;
328 }
329 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
330 error_cnt++;
331 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
332 }
333out:
334 bpf_object__close(obj);
335}
336
337static void test_l4lb_all(void)
338{
339 const char *file1 = "./test_l4lb.o";
340 const char *file2 = "./test_l4lb_noinline.o";
341
342 test_l4lb(file1);
343 test_l4lb(file2);
344}
345
346static void test_xdp_noinline(void)
347{
348 const char *file = "./test_xdp_noinline.o";
349 unsigned int nr_cpus = bpf_num_possible_cpus();
350 struct vip key = {.protocol = 6};
351 struct vip_meta {
352 __u32 flags;
353 __u32 vip_num;
354 } value = {.vip_num = VIP_NUM};
355 __u32 stats_key = VIP_NUM;
356 struct vip_stats {
357 __u64 bytes;
358 __u64 pkts;
359 } stats[nr_cpus];
360 struct real_definition {
361 union {
362 __be32 dst;
363 __be32 dstv6[4];
364 };
365 __u8 flags;
366 } real_def = {.dst = MAGIC_VAL};
367 __u32 ch_key = 11, real_num = 3;
368 __u32 duration, retval, size;
369 int err, i, prog_fd, map_fd;
370 __u64 bytes = 0, pkts = 0;
371 struct bpf_object *obj;
372 char buf[128];
373 u32 *magic = (u32 *)buf;
374
375 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
376 if (err) {
377 error_cnt++;
378 return;
379 }
380
381 map_fd = bpf_find_map(__func__, obj, "vip_map");
382 if (map_fd < 0)
383 goto out;
384 bpf_map_update_elem(map_fd, &key, &value, 0);
385
386 map_fd = bpf_find_map(__func__, obj, "ch_rings");
387 if (map_fd < 0)
388 goto out;
389 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
390
391 map_fd = bpf_find_map(__func__, obj, "reals");
392 if (map_fd < 0)
393 goto out;
394 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
395
396 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
397 buf, &size, &retval, &duration);
398 CHECK(err || retval != 1 || size != 54 ||
399 *magic != MAGIC_VAL, "ipv4",
400 "err %d errno %d retval %d size %d magic %x\n",
401 err, errno, retval, size, *magic);
402
403 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
404 buf, &size, &retval, &duration);
405 CHECK(err || retval != 1 || size != 74 ||
406 *magic != MAGIC_VAL, "ipv6",
407 "err %d errno %d retval %d size %d magic %x\n",
408 err, errno, retval, size, *magic);
409
410 map_fd = bpf_find_map(__func__, obj, "stats");
411 if (map_fd < 0)
412 goto out;
413 bpf_map_lookup_elem(map_fd, &stats_key, stats);
414 for (i = 0; i < nr_cpus; i++) {
415 bytes += stats[i].bytes;
416 pkts += stats[i].pkts;
417 }
418 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
419 error_cnt++;
420 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
421 }
422out:
423 bpf_object__close(obj);
424}
425
426static void test_tcp_estats(void)
427{
428 const char *file = "./test_tcp_estats.o";
429 int err, prog_fd;
430 struct bpf_object *obj;
431 __u32 duration = 0;
432
433 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
434 CHECK(err, "", "err %d errno %d\n", err, errno);
435 if (err) {
436 error_cnt++;
437 return;
438 }
439
440 bpf_object__close(obj);
441}
442
443static inline __u64 ptr_to_u64(const void *ptr)
444{
445 return (__u64) (unsigned long) ptr;
446}
447
448static bool is_jit_enabled(void)
449{
450 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
451 bool enabled = false;
452 int sysctl_fd;
453
454 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
455 if (sysctl_fd != -1) {
456 char tmpc;
457
458 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
459 enabled = (tmpc != '0');
460 close(sysctl_fd);
461 }
462
463 return enabled;
464}
465
466static void test_bpf_obj_id(void)
467{
468 const __u64 array_magic_value = 0xfaceb00c;
469 const __u32 array_key = 0;
470 const int nr_iters = 2;
471 const char *file = "./test_obj_id.o";
472 const char *expected_prog_name = "test_obj_id";
473 const char *expected_map_name = "test_map_id";
474 const __u64 nsec_per_sec = 1000000000;
475
476 struct bpf_object *objs[nr_iters];
477 int prog_fds[nr_iters], map_fds[nr_iters];
478
479 struct bpf_prog_info prog_infos[nr_iters + 1];
480 struct bpf_map_info map_infos[nr_iters + 1];
481
482
483
484 __u32 map_ids[nr_iters + 1];
485 char jited_insns[128], xlated_insns[128], zeros[128];
486 __u32 i, next_id, info_len, nr_id_found, duration = 0;
487 struct timespec real_time_ts, boot_time_ts;
488 int err = 0;
489 __u64 array_value;
490 uid_t my_uid = getuid();
491 time_t now, load_time;
492
493 err = bpf_prog_get_fd_by_id(0);
494 CHECK(err >= 0 || errno != ENOENT,
495 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
496
497 err = bpf_map_get_fd_by_id(0);
498 CHECK(err >= 0 || errno != ENOENT,
499 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
500
501 for (i = 0; i < nr_iters; i++)
502 objs[i] = NULL;
503
504
505 bzero(zeros, sizeof(zeros));
506 for (i = 0; i < nr_iters; i++) {
507 now = time(NULL);
508 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
509 &objs[i], &prog_fds[i]);
510
511
512
513 if (err)
514 error_cnt++;
515 assert(!err);
516
517
518 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
519 assert(map_fds[i] >= 0);
520 err = bpf_map_update_elem(map_fds[i], &array_key,
521 &array_magic_value, 0);
522 assert(!err);
523
524
525 info_len = sizeof(struct bpf_map_info) * 2;
526 bzero(&map_infos[i], info_len);
527 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
528 &info_len);
529 if (CHECK(err ||
530 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
531 map_infos[i].key_size != sizeof(__u32) ||
532 map_infos[i].value_size != sizeof(__u64) ||
533 map_infos[i].max_entries != 1 ||
534 map_infos[i].map_flags != 0 ||
535 info_len != sizeof(struct bpf_map_info) ||
536 strcmp((char *)map_infos[i].name, expected_map_name),
537 "get-map-info(fd)",
538 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
539 err, errno,
540 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
541 info_len, sizeof(struct bpf_map_info),
542 map_infos[i].key_size,
543 map_infos[i].value_size,
544 map_infos[i].max_entries,
545 map_infos[i].map_flags,
546 map_infos[i].name, expected_map_name))
547 goto done;
548
549
550 info_len = sizeof(struct bpf_prog_info) * 2;
551 bzero(&prog_infos[i], info_len);
552 bzero(jited_insns, sizeof(jited_insns));
553 bzero(xlated_insns, sizeof(xlated_insns));
554 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
555 prog_infos[i].jited_prog_len = sizeof(jited_insns);
556 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
557 prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
558 prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
559 prog_infos[i].nr_map_ids = 2;
560 err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
561 assert(!err);
562 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
563 assert(!err);
564 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
565 &info_len);
566 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
567 + (prog_infos[i].load_time / nsec_per_sec);
568 if (CHECK(err ||
569 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
570 info_len != sizeof(struct bpf_prog_info) ||
571 (jit_enabled && !prog_infos[i].jited_prog_len) ||
572 (jit_enabled &&
573 !memcmp(jited_insns, zeros, sizeof(zeros))) ||
574 !prog_infos[i].xlated_prog_len ||
575 !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
576 load_time < now - 60 || load_time > now + 60 ||
577 prog_infos[i].created_by_uid != my_uid ||
578 prog_infos[i].nr_map_ids != 1 ||
579 *(int *)(long)prog_infos[i].map_ids != map_infos[i].id ||
580 strcmp((char *)prog_infos[i].name, expected_prog_name),
581 "get-prog-info(fd)",
582 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
583 err, errno, i,
584 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
585 info_len, sizeof(struct bpf_prog_info),
586 jit_enabled,
587 prog_infos[i].jited_prog_len,
588 prog_infos[i].xlated_prog_len,
589 !!memcmp(jited_insns, zeros, sizeof(zeros)),
590 !!memcmp(xlated_insns, zeros, sizeof(zeros)),
591 load_time, now,
592 prog_infos[i].created_by_uid, my_uid,
593 prog_infos[i].nr_map_ids, 1,
594 *(int *)(long)prog_infos[i].map_ids, map_infos[i].id,
595 prog_infos[i].name, expected_prog_name))
596 goto done;
597 }
598
599
600 nr_id_found = 0;
601 next_id = 0;
602 while (!bpf_prog_get_next_id(next_id, &next_id)) {
603 struct bpf_prog_info prog_info = {};
604 __u32 saved_map_id;
605 int prog_fd;
606
607 info_len = sizeof(prog_info);
608
609 prog_fd = bpf_prog_get_fd_by_id(next_id);
610 if (prog_fd < 0 && errno == ENOENT)
611
612 continue;
613 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
614 "prog_fd %d next_id %d errno %d\n",
615 prog_fd, next_id, errno))
616 break;
617
618 for (i = 0; i < nr_iters; i++)
619 if (prog_infos[i].id == next_id)
620 break;
621
622 if (i == nr_iters)
623 continue;
624
625 nr_id_found++;
626
627
628
629
630
631 prog_info.nr_map_ids = 1;
632 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
633 if (CHECK(!err || errno != EFAULT,
634 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
635 err, errno, EFAULT))
636 break;
637 bzero(&prog_info, sizeof(prog_info));
638 info_len = sizeof(prog_info);
639
640 saved_map_id = *(int *)((long)prog_infos[i].map_ids);
641 prog_info.map_ids = prog_infos[i].map_ids;
642 prog_info.nr_map_ids = 2;
643 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
644 prog_infos[i].jited_prog_insns = 0;
645 prog_infos[i].xlated_prog_insns = 0;
646 CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
647 memcmp(&prog_info, &prog_infos[i], info_len) ||
648 *(int *)(long)prog_info.map_ids != saved_map_id,
649 "get-prog-info(next_id->fd)",
650 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
651 err, errno, info_len, sizeof(struct bpf_prog_info),
652 memcmp(&prog_info, &prog_infos[i], info_len),
653 *(int *)(long)prog_info.map_ids, saved_map_id);
654 close(prog_fd);
655 }
656 CHECK(nr_id_found != nr_iters,
657 "check total prog id found by get_next_id",
658 "nr_id_found %u(%u)\n",
659 nr_id_found, nr_iters);
660
661
662 nr_id_found = 0;
663 next_id = 0;
664 while (!bpf_map_get_next_id(next_id, &next_id)) {
665 struct bpf_map_info map_info = {};
666 int map_fd;
667
668 info_len = sizeof(map_info);
669
670 map_fd = bpf_map_get_fd_by_id(next_id);
671 if (map_fd < 0 && errno == ENOENT)
672
673 continue;
674 if (CHECK(map_fd < 0, "get-map-fd(next_id)",
675 "map_fd %d next_id %u errno %d\n",
676 map_fd, next_id, errno))
677 break;
678
679 for (i = 0; i < nr_iters; i++)
680 if (map_infos[i].id == next_id)
681 break;
682
683 if (i == nr_iters)
684 continue;
685
686 nr_id_found++;
687
688 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
689 assert(!err);
690
691 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
692 CHECK(err || info_len != sizeof(struct bpf_map_info) ||
693 memcmp(&map_info, &map_infos[i], info_len) ||
694 array_value != array_magic_value,
695 "check get-map-info(next_id->fd)",
696 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
697 err, errno, info_len, sizeof(struct bpf_map_info),
698 memcmp(&map_info, &map_infos[i], info_len),
699 array_value, array_magic_value);
700
701 close(map_fd);
702 }
703 CHECK(nr_id_found != nr_iters,
704 "check total map id found by get_next_id",
705 "nr_id_found %u(%u)\n",
706 nr_id_found, nr_iters);
707
708done:
709 for (i = 0; i < nr_iters; i++)
710 bpf_object__close(objs[i]);
711}
712
713static void test_pkt_md_access(void)
714{
715 const char *file = "./test_pkt_md_access.o";
716 struct bpf_object *obj;
717 __u32 duration, retval;
718 int err, prog_fd;
719
720 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
721 if (err) {
722 error_cnt++;
723 return;
724 }
725
726 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
727 NULL, NULL, &retval, &duration);
728 CHECK(err || retval, "",
729 "err %d errno %d retval %d duration %d\n",
730 err, errno, retval, duration);
731
732 bpf_object__close(obj);
733}
734
735static void test_obj_name(void)
736{
737 struct {
738 const char *name;
739 int success;
740 int expected_errno;
741 } tests[] = {
742 { "", 1, 0 },
743 { "_123456789ABCDE", 1, 0 },
744 { "_123456789ABCDEF", 0, EINVAL },
745 { "_123456789ABCD\n", 0, EINVAL },
746 };
747 struct bpf_insn prog[] = {
748 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
749 BPF_EXIT_INSN(),
750 };
751 __u32 duration = 0;
752 int i;
753
754 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
755 size_t name_len = strlen(tests[i].name) + 1;
756 union bpf_attr attr;
757 size_t ncopy;
758 int fd;
759
760
761 ncopy = name_len < sizeof(attr.prog_name) ?
762 name_len : sizeof(attr.prog_name);
763 bzero(&attr, sizeof(attr));
764 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
765 attr.insn_cnt = 2;
766 attr.insns = ptr_to_u64(prog);
767 attr.license = ptr_to_u64("");
768 memcpy(attr.prog_name, tests[i].name, ncopy);
769
770 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
771 CHECK((tests[i].success && fd < 0) ||
772 (!tests[i].success && fd != -1) ||
773 (!tests[i].success && errno != tests[i].expected_errno),
774 "check-bpf-prog-name",
775 "fd %d(%d) errno %d(%d)\n",
776 fd, tests[i].success, errno, tests[i].expected_errno);
777
778 if (fd != -1)
779 close(fd);
780
781
782 ncopy = name_len < sizeof(attr.map_name) ?
783 name_len : sizeof(attr.map_name);
784 bzero(&attr, sizeof(attr));
785 attr.map_type = BPF_MAP_TYPE_ARRAY;
786 attr.key_size = 4;
787 attr.value_size = 4;
788 attr.max_entries = 1;
789 attr.map_flags = 0;
790 memcpy(attr.map_name, tests[i].name, ncopy);
791 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
792 CHECK((tests[i].success && fd < 0) ||
793 (!tests[i].success && fd != -1) ||
794 (!tests[i].success && errno != tests[i].expected_errno),
795 "check-bpf-map-name",
796 "fd %d(%d) errno %d(%d)\n",
797 fd, tests[i].success, errno, tests[i].expected_errno);
798
799 if (fd != -1)
800 close(fd);
801 }
802}
803
804static void test_tp_attach_query(void)
805{
806 const int num_progs = 3;
807 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
808 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
809 const char *file = "./test_tracepoint.o";
810 struct perf_event_query_bpf *query;
811 struct perf_event_attr attr = {};
812 struct bpf_object *obj[num_progs];
813 struct bpf_prog_info prog_info;
814 char buf[256];
815
816 snprintf(buf, sizeof(buf),
817 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
818 efd = open(buf, O_RDONLY, 0);
819 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
820 return;
821 bytes = read(efd, buf, sizeof(buf));
822 close(efd);
823 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
824 "read", "bytes %d errno %d\n", bytes, errno))
825 return;
826
827 attr.config = strtol(buf, NULL, 0);
828 attr.type = PERF_TYPE_TRACEPOINT;
829 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
830 attr.sample_period = 1;
831 attr.wakeup_events = 1;
832
833 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
834 for (i = 0; i < num_progs; i++) {
835 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
836 &prog_fd[i]);
837 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
838 goto cleanup1;
839
840 bzero(&prog_info, sizeof(prog_info));
841 prog_info.jited_prog_len = 0;
842 prog_info.xlated_prog_len = 0;
843 prog_info.nr_map_ids = 0;
844 info_len = sizeof(prog_info);
845 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
846 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
847 err, errno))
848 goto cleanup1;
849 saved_prog_ids[i] = prog_info.id;
850
851 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 ,
852 0 , -1 ,
853 0 );
854 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
855 pmu_fd[i], errno))
856 goto cleanup2;
857 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
858 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
859 err, errno))
860 goto cleanup3;
861
862 if (i == 0) {
863
864 query->ids_len = num_progs;
865 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
866 if (CHECK(err || query->prog_cnt != 0,
867 "perf_event_ioc_query_bpf",
868 "err %d errno %d query->prog_cnt %u\n",
869 err, errno, query->prog_cnt))
870 goto cleanup3;
871 }
872
873 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
874 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
875 err, errno))
876 goto cleanup3;
877
878 if (i == 1) {
879
880 query->ids_len = 0;
881 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
882 if (CHECK(err || query->prog_cnt != 2,
883 "perf_event_ioc_query_bpf",
884 "err %d errno %d query->prog_cnt %u\n",
885 err, errno, query->prog_cnt))
886 goto cleanup3;
887
888
889
890 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
891 (struct perf_event_query_bpf *)0x1);
892 if (CHECK(!err || errno != EFAULT,
893 "perf_event_ioc_query_bpf",
894 "err %d errno %d\n", err, errno))
895 goto cleanup3;
896
897
898 query->ids_len = 1;
899 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
900 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
901 "perf_event_ioc_query_bpf",
902 "err %d errno %d query->prog_cnt %u\n",
903 err, errno, query->prog_cnt))
904 goto cleanup3;
905 }
906
907 query->ids_len = num_progs;
908 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
909 if (CHECK(err || query->prog_cnt != (i + 1),
910 "perf_event_ioc_query_bpf",
911 "err %d errno %d query->prog_cnt %u\n",
912 err, errno, query->prog_cnt))
913 goto cleanup3;
914 for (j = 0; j < i + 1; j++)
915 if (CHECK(saved_prog_ids[j] != query->ids[j],
916 "perf_event_ioc_query_bpf",
917 "#%d saved_prog_id %x query prog_id %x\n",
918 j, saved_prog_ids[j], query->ids[j]))
919 goto cleanup3;
920 }
921
922 i = num_progs - 1;
923 for (; i >= 0; i--) {
924 cleanup3:
925 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
926 cleanup2:
927 close(pmu_fd[i]);
928 cleanup1:
929 bpf_object__close(obj[i]);
930 }
931 free(query);
932}
933
934static int compare_map_keys(int map1_fd, int map2_fd)
935{
936 __u32 key, next_key;
937 char val_buf[PERF_MAX_STACK_DEPTH *
938 sizeof(struct bpf_stack_build_id)];
939 int err;
940
941 err = bpf_map_get_next_key(map1_fd, NULL, &key);
942 if (err)
943 return err;
944 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
945 if (err)
946 return err;
947
948 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
949 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
950 if (err)
951 return err;
952
953 key = next_key;
954 }
955 if (errno != ENOENT)
956 return -1;
957
958 return 0;
959}
960
961static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
962{
963 __u32 key, next_key, *cur_key_p, *next_key_p;
964 char *val_buf1, *val_buf2;
965 int i, err = 0;
966
967 val_buf1 = malloc(stack_trace_len);
968 val_buf2 = malloc(stack_trace_len);
969 cur_key_p = NULL;
970 next_key_p = &key;
971 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
972 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
973 if (err)
974 goto out;
975 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
976 if (err)
977 goto out;
978 for (i = 0; i < stack_trace_len; i++) {
979 if (val_buf1[i] != val_buf2[i]) {
980 err = -1;
981 goto out;
982 }
983 }
984 key = *next_key_p;
985 cur_key_p = &key;
986 next_key_p = &next_key;
987 }
988 if (errno != ENOENT)
989 err = -1;
990
991out:
992 free(val_buf1);
993 free(val_buf2);
994 return err;
995}
996
997static void test_stacktrace_map()
998{
999 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1000 const char *file = "./test_stacktrace_map.o";
1001 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
1002 struct perf_event_attr attr = {};
1003 __u32 key, val, duration = 0;
1004 struct bpf_object *obj;
1005 char buf[256];
1006
1007 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1008 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1009 return;
1010
1011
1012 snprintf(buf, sizeof(buf),
1013 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
1014 efd = open(buf, O_RDONLY, 0);
1015 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1016 goto close_prog;
1017
1018 bytes = read(efd, buf, sizeof(buf));
1019 close(efd);
1020 if (bytes <= 0 || bytes >= sizeof(buf))
1021 goto close_prog;
1022
1023
1024 attr.config = strtol(buf, NULL, 0);
1025 attr.type = PERF_TYPE_TRACEPOINT;
1026 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1027 attr.sample_period = 1;
1028 attr.wakeup_events = 1;
1029 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 ,
1030 0 , -1 ,
1031 0 );
1032 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1033 pmu_fd, errno))
1034 goto close_prog;
1035
1036 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1037 if (err)
1038 goto disable_pmu;
1039
1040 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1041 if (err)
1042 goto disable_pmu;
1043
1044
1045 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1046 if (control_map_fd < 0)
1047 goto disable_pmu;
1048
1049 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1050 if (stackid_hmap_fd < 0)
1051 goto disable_pmu;
1052
1053 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1054 if (stackmap_fd < 0)
1055 goto disable_pmu;
1056
1057 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1058 if (stack_amap_fd < 0)
1059 goto disable_pmu;
1060
1061
1062 sleep(1);
1063
1064
1065 key = 0;
1066 val = 1;
1067 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1068
1069
1070
1071
1072 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1073 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1074 "err %d errno %d\n", err, errno))
1075 goto disable_pmu_noerr;
1076
1077 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1078 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1079 "err %d errno %d\n", err, errno))
1080 goto disable_pmu_noerr;
1081
1082 stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
1083 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1084 if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1085 "err %d errno %d\n", err, errno))
1086 goto disable_pmu_noerr;
1087
1088 goto disable_pmu_noerr;
1089disable_pmu:
1090 error_cnt++;
1091disable_pmu_noerr:
1092 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1093 close(pmu_fd);
1094close_prog:
1095 bpf_object__close(obj);
1096}
1097
1098static void test_stacktrace_map_raw_tp()
1099{
1100 int control_map_fd, stackid_hmap_fd, stackmap_fd;
1101 const char *file = "./test_stacktrace_map.o";
1102 int efd, err, prog_fd;
1103 __u32 key, val, duration = 0;
1104 struct bpf_object *obj;
1105
1106 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1107 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1108 return;
1109
1110 efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
1111 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1112 goto close_prog;
1113
1114
1115 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1116 if (control_map_fd < 0)
1117 goto close_prog;
1118
1119 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1120 if (stackid_hmap_fd < 0)
1121 goto close_prog;
1122
1123 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1124 if (stackmap_fd < 0)
1125 goto close_prog;
1126
1127
1128 sleep(1);
1129
1130
1131 key = 0;
1132 val = 1;
1133 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1134
1135
1136
1137
1138 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1139 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1140 "err %d errno %d\n", err, errno))
1141 goto close_prog;
1142
1143 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1144 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1145 "err %d errno %d\n", err, errno))
1146 goto close_prog;
1147
1148 goto close_prog_noerr;
1149close_prog:
1150 error_cnt++;
1151close_prog_noerr:
1152 bpf_object__close(obj);
1153}
1154
1155static int extract_build_id(char *build_id, size_t size)
1156{
1157 FILE *fp;
1158 char *line = NULL;
1159 size_t len = 0;
1160
1161 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1162 if (fp == NULL)
1163 return -1;
1164
1165 if (getline(&line, &len, fp) == -1)
1166 goto err;
1167 fclose(fp);
1168
1169 if (len > size)
1170 len = size;
1171 memcpy(build_id, line, len);
1172 build_id[len] = '\0';
1173 return 0;
1174err:
1175 fclose(fp);
1176 return -1;
1177}
1178
1179static void test_stacktrace_build_id(void)
1180{
1181 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1182 const char *file = "./test_stacktrace_build_id.o";
1183 int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
1184 struct perf_event_attr attr = {};
1185 __u32 key, previous_key, val, duration = 0;
1186 struct bpf_object *obj;
1187 char buf[256];
1188 int i, j;
1189 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1190 int build_id_matches = 0;
1191 int retry = 1;
1192
1193retry:
1194 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1195 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1196 goto out;
1197
1198
1199 snprintf(buf, sizeof(buf),
1200 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1201 efd = open(buf, O_RDONLY, 0);
1202 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1203 goto close_prog;
1204
1205 bytes = read(efd, buf, sizeof(buf));
1206 close(efd);
1207 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1208 "read", "bytes %d errno %d\n", bytes, errno))
1209 goto close_prog;
1210
1211
1212 attr.config = strtol(buf, NULL, 0);
1213 attr.type = PERF_TYPE_TRACEPOINT;
1214 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1215 attr.sample_period = 1;
1216 attr.wakeup_events = 1;
1217 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 ,
1218 0 , -1 ,
1219 0 );
1220 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1221 pmu_fd, errno))
1222 goto close_prog;
1223
1224 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1225 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1226 err, errno))
1227 goto close_pmu;
1228
1229 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1230 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1231 err, errno))
1232 goto disable_pmu;
1233
1234
1235 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1236 if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1237 "err %d errno %d\n", err, errno))
1238 goto disable_pmu;
1239
1240 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1241 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1242 "err %d errno %d\n", err, errno))
1243 goto disable_pmu;
1244
1245 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1246 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1247 err, errno))
1248 goto disable_pmu;
1249
1250 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1251 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1252 "err %d errno %d\n", err, errno))
1253 goto disable_pmu;
1254
1255 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1256 == 0);
1257 assert(system("./urandom_read") == 0);
1258
1259 key = 0;
1260 val = 1;
1261 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1262
1263
1264
1265
1266 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1267 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1268 "err %d errno %d\n", err, errno))
1269 goto disable_pmu;
1270
1271 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1272 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1273 "err %d errno %d\n", err, errno))
1274 goto disable_pmu;
1275
1276 err = extract_build_id(buf, 256);
1277
1278 if (CHECK(err, "get build_id with readelf",
1279 "err %d errno %d\n", err, errno))
1280 goto disable_pmu;
1281
1282 err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1283 if (CHECK(err, "get_next_key from stackmap",
1284 "err %d, errno %d\n", err, errno))
1285 goto disable_pmu;
1286
1287 do {
1288 char build_id[64];
1289
1290 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1291 if (CHECK(err, "lookup_elem from stackmap",
1292 "err %d, errno %d\n", err, errno))
1293 goto disable_pmu;
1294 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1295 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1296 id_offs[i].offset != 0) {
1297 for (j = 0; j < 20; ++j)
1298 sprintf(build_id + 2 * j, "%02x",
1299 id_offs[i].build_id[j] & 0xff);
1300 if (strstr(buf, build_id) != NULL)
1301 build_id_matches = 1;
1302 }
1303 previous_key = key;
1304 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1305
1306
1307
1308
1309
1310 if (build_id_matches < 1 && retry--) {
1311 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1312 close(pmu_fd);
1313 bpf_object__close(obj);
1314 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1315 __func__);
1316 goto retry;
1317 }
1318
1319 if (CHECK(build_id_matches < 1, "build id match",
1320 "Didn't find expected build ID from the map\n"))
1321 goto disable_pmu;
1322
1323 stack_trace_len = PERF_MAX_STACK_DEPTH
1324 * sizeof(struct bpf_stack_build_id);
1325 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1326 CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1327 "err %d errno %d\n", err, errno);
1328
1329disable_pmu:
1330 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1331
1332close_pmu:
1333 close(pmu_fd);
1334
1335close_prog:
1336 bpf_object__close(obj);
1337
1338out:
1339 return;
1340}
1341
1342static void test_stacktrace_build_id_nmi(void)
1343{
1344 int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1345 const char *file = "./test_stacktrace_build_id.o";
1346 int err, pmu_fd, prog_fd;
1347 struct perf_event_attr attr = {
1348 .sample_freq = 5000,
1349 .freq = 1,
1350 .type = PERF_TYPE_HARDWARE,
1351 .config = PERF_COUNT_HW_CPU_CYCLES,
1352 };
1353 __u32 key, previous_key, val, duration = 0;
1354 struct bpf_object *obj;
1355 char buf[256];
1356 int i, j;
1357 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1358 int build_id_matches = 0;
1359 int retry = 1;
1360
1361retry:
1362 err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
1363 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1364 return;
1365
1366 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 ,
1367 0 , -1 ,
1368 0 );
1369 if (CHECK(pmu_fd < 0, "perf_event_open",
1370 "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
1371 pmu_fd, errno))
1372 goto close_prog;
1373
1374 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1375 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1376 err, errno))
1377 goto close_pmu;
1378
1379 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1380 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1381 err, errno))
1382 goto disable_pmu;
1383
1384
1385 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1386 if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1387 "err %d errno %d\n", err, errno))
1388 goto disable_pmu;
1389
1390 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1391 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1392 "err %d errno %d\n", err, errno))
1393 goto disable_pmu;
1394
1395 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1396 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1397 err, errno))
1398 goto disable_pmu;
1399
1400 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1401 if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1402 "err %d errno %d\n", err, errno))
1403 goto disable_pmu;
1404
1405 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1406 == 0);
1407 assert(system("taskset 0x1 ./urandom_read 100000") == 0);
1408
1409 key = 0;
1410 val = 1;
1411 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1412
1413
1414
1415
1416 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1417 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1418 "err %d errno %d\n", err, errno))
1419 goto disable_pmu;
1420
1421 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1422 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1423 "err %d errno %d\n", err, errno))
1424 goto disable_pmu;
1425
1426 err = extract_build_id(buf, 256);
1427
1428 if (CHECK(err, "get build_id with readelf",
1429 "err %d errno %d\n", err, errno))
1430 goto disable_pmu;
1431
1432 err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1433 if (CHECK(err, "get_next_key from stackmap",
1434 "err %d, errno %d\n", err, errno))
1435 goto disable_pmu;
1436
1437 do {
1438 char build_id[64];
1439
1440 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1441 if (CHECK(err, "lookup_elem from stackmap",
1442 "err %d, errno %d\n", err, errno))
1443 goto disable_pmu;
1444 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1445 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1446 id_offs[i].offset != 0) {
1447 for (j = 0; j < 20; ++j)
1448 sprintf(build_id + 2 * j, "%02x",
1449 id_offs[i].build_id[j] & 0xff);
1450 if (strstr(buf, build_id) != NULL)
1451 build_id_matches = 1;
1452 }
1453 previous_key = key;
1454 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1455
1456
1457
1458
1459
1460 if (build_id_matches < 1 && retry--) {
1461 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1462 close(pmu_fd);
1463 bpf_object__close(obj);
1464 printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
1465 __func__);
1466 goto retry;
1467 }
1468
1469 if (CHECK(build_id_matches < 1, "build id match",
1470 "Didn't find expected build ID from the map\n"))
1471 goto disable_pmu;
1472
1473
1474
1475
1476
1477
1478
1479
1480disable_pmu:
1481 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1482
1483close_pmu:
1484 close(pmu_fd);
1485
1486close_prog:
1487 bpf_object__close(obj);
1488}
1489
1490#define MAX_CNT_RAWTP 10ull
1491#define MAX_STACK_RAWTP 100
1492struct get_stack_trace_t {
1493 int pid;
1494 int kern_stack_size;
1495 int user_stack_size;
1496 int user_stack_buildid_size;
1497 __u64 kern_stack[MAX_STACK_RAWTP];
1498 __u64 user_stack[MAX_STACK_RAWTP];
1499 struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
1500};
1501
1502static int get_stack_print_output(void *data, int size)
1503{
1504 bool good_kern_stack = false, good_user_stack = false;
1505 const char *nonjit_func = "___bpf_prog_run";
1506 struct get_stack_trace_t *e = data;
1507 int i, num_stack;
1508 static __u64 cnt;
1509 struct ksym *ks;
1510
1511 cnt++;
1512
1513 if (size < sizeof(struct get_stack_trace_t)) {
1514 __u64 *raw_data = data;
1515 bool found = false;
1516
1517 num_stack = size / sizeof(__u64);
1518
1519
1520
1521
1522
1523 if (jit_enabled) {
1524 found = num_stack > 0;
1525 } else {
1526 for (i = 0; i < num_stack; i++) {
1527 ks = ksym_search(raw_data[i]);
1528 if (strcmp(ks->name, nonjit_func) == 0) {
1529 found = true;
1530 break;
1531 }
1532 }
1533 }
1534 if (found) {
1535 good_kern_stack = true;
1536 good_user_stack = true;
1537 }
1538 } else {
1539 num_stack = e->kern_stack_size / sizeof(__u64);
1540 if (jit_enabled) {
1541 good_kern_stack = num_stack > 0;
1542 } else {
1543 for (i = 0; i < num_stack; i++) {
1544 ks = ksym_search(e->kern_stack[i]);
1545 if (strcmp(ks->name, nonjit_func) == 0) {
1546 good_kern_stack = true;
1547 break;
1548 }
1549 }
1550 }
1551 if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
1552 good_user_stack = true;
1553 }
1554 if (!good_kern_stack || !good_user_stack)
1555 return LIBBPF_PERF_EVENT_ERROR;
1556
1557 if (cnt == MAX_CNT_RAWTP)
1558 return LIBBPF_PERF_EVENT_DONE;
1559
1560 return LIBBPF_PERF_EVENT_CONT;
1561}
1562
1563static void test_get_stack_raw_tp(void)
1564{
1565 const char *file = "./test_get_stack_rawtp.o";
1566 int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
1567 struct perf_event_attr attr = {};
1568 struct timespec tv = {0, 10};
1569 __u32 key = 0, duration = 0;
1570 struct bpf_object *obj;
1571
1572 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1573 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1574 return;
1575
1576 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1577 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1578 goto close_prog;
1579
1580 perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
1581 if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
1582 perfmap_fd, errno))
1583 goto close_prog;
1584
1585 err = load_kallsyms();
1586 if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
1587 goto close_prog;
1588
1589 attr.sample_type = PERF_SAMPLE_RAW;
1590 attr.type = PERF_TYPE_SOFTWARE;
1591 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
1592 pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1,
1593 -1, 0);
1594 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
1595 errno))
1596 goto close_prog;
1597
1598 err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
1599 if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
1600 errno))
1601 goto close_prog;
1602
1603 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1604 if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
1605 err, errno))
1606 goto close_prog;
1607
1608 err = perf_event_mmap(pmu_fd);
1609 if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
1610 goto close_prog;
1611
1612
1613 for (i = 0; i < MAX_CNT_RAWTP; i++)
1614 nanosleep(&tv, NULL);
1615
1616 err = perf_event_poller(pmu_fd, get_stack_print_output);
1617 if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
1618 goto close_prog;
1619
1620 goto close_prog_noerr;
1621close_prog:
1622 error_cnt++;
1623close_prog_noerr:
1624 bpf_object__close(obj);
1625}
1626
1627static void test_task_fd_query_rawtp(void)
1628{
1629 const char *file = "./test_get_stack_rawtp.o";
1630 __u64 probe_offset, probe_addr;
1631 __u32 len, prog_id, fd_type;
1632 struct bpf_object *obj;
1633 int efd, err, prog_fd;
1634 __u32 duration = 0;
1635 char buf[256];
1636
1637 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1638 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1639 return;
1640
1641 efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1642 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1643 goto close_prog;
1644
1645
1646 len = sizeof(buf);
1647 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1648 &fd_type, &probe_offset, &probe_addr);
1649 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1650 errno))
1651 goto close_prog;
1652
1653 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1654 strcmp(buf, "sys_enter") == 0;
1655 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1656 fd_type, buf))
1657 goto close_prog;
1658
1659
1660 len = 0;
1661 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1662 &fd_type, &probe_offset, &probe_addr);
1663 if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
1664 err, errno))
1665 goto close_prog;
1666 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1667 len == strlen("sys_enter");
1668 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1669 goto close_prog;
1670
1671
1672 len = sizeof(buf);
1673 err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
1674 &fd_type, &probe_offset, &probe_addr);
1675 if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
1676 err, errno))
1677 goto close_prog;
1678 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1679 len == strlen("sys_enter");
1680 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1681 goto close_prog;
1682
1683
1684 len = 3;
1685 err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1686 &fd_type, &probe_offset, &probe_addr);
1687 if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
1688 "err %d errno %d\n", err, errno))
1689 goto close_prog;
1690 err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1691 len == strlen("sys_enter") &&
1692 strcmp(buf, "sy") == 0;
1693 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1694 goto close_prog;
1695
1696 goto close_prog_noerr;
1697close_prog:
1698 error_cnt++;
1699close_prog_noerr:
1700 bpf_object__close(obj);
1701}
1702
1703static void test_task_fd_query_tp_core(const char *probe_name,
1704 const char *tp_name)
1705{
1706 const char *file = "./test_tracepoint.o";
1707 int err, bytes, efd, prog_fd, pmu_fd;
1708 struct perf_event_attr attr = {};
1709 __u64 probe_offset, probe_addr;
1710 __u32 len, prog_id, fd_type;
1711 struct bpf_object *obj;
1712 __u32 duration = 0;
1713 char buf[256];
1714
1715 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1716 if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
1717 goto close_prog;
1718
1719 snprintf(buf, sizeof(buf),
1720 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
1721 efd = open(buf, O_RDONLY, 0);
1722 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1723 goto close_prog;
1724 bytes = read(efd, buf, sizeof(buf));
1725 close(efd);
1726 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
1727 "bytes %d errno %d\n", bytes, errno))
1728 goto close_prog;
1729
1730 attr.config = strtol(buf, NULL, 0);
1731 attr.type = PERF_TYPE_TRACEPOINT;
1732 attr.sample_type = PERF_SAMPLE_RAW;
1733 attr.sample_period = 1;
1734 attr.wakeup_events = 1;
1735 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 ,
1736 0 , -1 ,
1737 0 );
1738 if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
1739 goto close_pmu;
1740
1741 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1742 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
1743 errno))
1744 goto close_pmu;
1745
1746 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1747 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
1748 errno))
1749 goto close_pmu;
1750
1751
1752 len = sizeof(buf);
1753 err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
1754 &fd_type, &probe_offset, &probe_addr);
1755 if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1756 errno))
1757 goto close_pmu;
1758
1759 err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
1760 if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1761 fd_type, buf))
1762 goto close_pmu;
1763
1764 close(pmu_fd);
1765 goto close_prog_noerr;
1766
1767close_pmu:
1768 close(pmu_fd);
1769close_prog:
1770 error_cnt++;
1771close_prog_noerr:
1772 bpf_object__close(obj);
1773}
1774
1775static void test_task_fd_query_tp(void)
1776{
1777 test_task_fd_query_tp_core("sched/sched_switch",
1778 "sched_switch");
1779 test_task_fd_query_tp_core("syscalls/sys_enter_read",
1780 "sys_enter_read");
1781}
1782
1783static void test_reference_tracking()
1784{
1785 const char *file = "./test_sk_lookup_kern.o";
1786 struct bpf_object *obj;
1787 struct bpf_program *prog;
1788 __u32 duration = 0;
1789 int err = 0;
1790
1791 obj = bpf_object__open(file);
1792 if (IS_ERR(obj)) {
1793 error_cnt++;
1794 return;
1795 }
1796
1797 bpf_object__for_each_program(prog, obj) {
1798 const char *title;
1799
1800
1801 title = bpf_program__title(prog, false);
1802 if (strstr(title, ".text") != NULL)
1803 continue;
1804
1805 bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS);
1806
1807
1808 if (strstr(title, "fail") != NULL) {
1809 libbpf_set_print(NULL, NULL, NULL);
1810 err = !bpf_program__load(prog, "GPL", 0);
1811 libbpf_set_print(printf, printf, NULL);
1812 } else {
1813 err = bpf_program__load(prog, "GPL", 0);
1814 }
1815 CHECK(err, title, "\n");
1816 }
1817 bpf_object__close(obj);
1818}
1819
1820enum {
1821 QUEUE,
1822 STACK,
1823};
1824
1825static void test_queue_stack_map(int type)
1826{
1827 const int MAP_SIZE = 32;
1828 __u32 vals[MAP_SIZE], duration, retval, size, val;
1829 int i, err, prog_fd, map_in_fd, map_out_fd;
1830 char file[32], buf[128];
1831 struct bpf_object *obj;
1832 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
1833
1834
1835 for (i = 0; i < MAP_SIZE; i++)
1836 vals[i] = rand();
1837
1838 if (type == QUEUE)
1839 strncpy(file, "./test_queue_map.o", sizeof(file));
1840 else if (type == STACK)
1841 strncpy(file, "./test_stack_map.o", sizeof(file));
1842 else
1843 return;
1844
1845 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
1846 if (err) {
1847 error_cnt++;
1848 return;
1849 }
1850
1851 map_in_fd = bpf_find_map(__func__, obj, "map_in");
1852 if (map_in_fd < 0)
1853 goto out;
1854
1855 map_out_fd = bpf_find_map(__func__, obj, "map_out");
1856 if (map_out_fd < 0)
1857 goto out;
1858
1859
1860 for (i = 0; i < MAP_SIZE; i++) {
1861 err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0);
1862 if (err) {
1863 error_cnt++;
1864 goto out;
1865 }
1866 }
1867
1868
1869
1870
1871 for (i = 0; i < MAP_SIZE; i++) {
1872 if (type == QUEUE) {
1873 val = vals[i];
1874 pkt_v4.iph.saddr = vals[i] * 5;
1875 } else if (type == STACK) {
1876 val = vals[MAP_SIZE - 1 - i];
1877 pkt_v4.iph.saddr = vals[MAP_SIZE - 1 - i] * 5;
1878 }
1879
1880 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
1881 buf, &size, &retval, &duration);
1882 if (err || retval || size != sizeof(pkt_v4) ||
1883 iph->daddr != val)
1884 break;
1885 }
1886
1887 CHECK(err || retval || size != sizeof(pkt_v4) || iph->daddr != val,
1888 "bpf_map_pop_elem",
1889 "err %d errno %d retval %d size %d iph->daddr %u\n",
1890 err, errno, retval, size, iph->daddr);
1891
1892
1893 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
1894 buf, &size, &retval, &duration);
1895 CHECK(err || retval != 2 || size != sizeof(pkt_v4),
1896 "check-queue-stack-map-empty",
1897 "err %d errno %d retval %d size %d\n",
1898 err, errno, retval, size);
1899
1900
1901 for (i = 0; i < MAP_SIZE; i++) {
1902 err = bpf_map_lookup_and_delete_elem(map_out_fd, NULL, &val);
1903 if (err || val != vals[i] * 5)
1904 break;
1905 }
1906
1907 CHECK(i != MAP_SIZE && (err || val != vals[i] * 5),
1908 "bpf_map_push_elem", "err %d value %u\n", err, val);
1909
1910out:
1911 pkt_v4.iph.saddr = 0;
1912 bpf_object__close(obj);
1913}
1914
1915int main(void)
1916{
1917 srand(time(NULL));
1918
1919 jit_enabled = is_jit_enabled();
1920
1921 test_pkt_access();
1922 test_prog_run_xattr();
1923 test_xdp();
1924 test_xdp_adjust_tail();
1925 test_l4lb_all();
1926 test_xdp_noinline();
1927 test_tcp_estats();
1928 test_bpf_obj_id();
1929 test_pkt_md_access();
1930 test_obj_name();
1931 test_tp_attach_query();
1932 test_stacktrace_map();
1933 test_stacktrace_build_id();
1934 test_stacktrace_build_id_nmi();
1935 test_stacktrace_map_raw_tp();
1936 test_get_stack_raw_tp();
1937 test_task_fd_query_rawtp();
1938 test_task_fd_query_tp();
1939 test_reference_tracking();
1940 test_queue_stack_map(QUEUE);
1941 test_queue_stack_map(STACK);
1942
1943 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1944 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1945}
1946