linux/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2020 Facebook */
   3#include <test_progs.h>
   4#include "bpf_iter_ipv6_route.skel.h"
   5#include "bpf_iter_netlink.skel.h"
   6#include "bpf_iter_bpf_map.skel.h"
   7#include "bpf_iter_task.skel.h"
   8#include "bpf_iter_task_stack.skel.h"
   9#include "bpf_iter_task_file.skel.h"
  10#include "bpf_iter_task_vma.skel.h"
  11#include "bpf_iter_task_btf.skel.h"
  12#include "bpf_iter_tcp4.skel.h"
  13#include "bpf_iter_tcp6.skel.h"
  14#include "bpf_iter_udp4.skel.h"
  15#include "bpf_iter_udp6.skel.h"
  16#include "bpf_iter_unix.skel.h"
  17#include "bpf_iter_test_kern1.skel.h"
  18#include "bpf_iter_test_kern2.skel.h"
  19#include "bpf_iter_test_kern3.skel.h"
  20#include "bpf_iter_test_kern4.skel.h"
  21#include "bpf_iter_bpf_hash_map.skel.h"
  22#include "bpf_iter_bpf_percpu_hash_map.skel.h"
  23#include "bpf_iter_bpf_array_map.skel.h"
  24#include "bpf_iter_bpf_percpu_array_map.skel.h"
  25#include "bpf_iter_bpf_sk_storage_helpers.skel.h"
  26#include "bpf_iter_bpf_sk_storage_map.skel.h"
  27#include "bpf_iter_test_kern5.skel.h"
  28#include "bpf_iter_test_kern6.skel.h"
  29
  30static int duration;
  31
  32static void test_btf_id_or_null(void)
  33{
  34        struct bpf_iter_test_kern3 *skel;
  35
  36        skel = bpf_iter_test_kern3__open_and_load();
  37        if (CHECK(skel, "bpf_iter_test_kern3__open_and_load",
  38                  "skeleton open_and_load unexpectedly succeeded\n")) {
  39                bpf_iter_test_kern3__destroy(skel);
  40                return;
  41        }
  42}
  43
  44static void do_dummy_read(struct bpf_program *prog)
  45{
  46        struct bpf_link *link;
  47        char buf[16] = {};
  48        int iter_fd, len;
  49
  50        link = bpf_program__attach_iter(prog, NULL);
  51        if (!ASSERT_OK_PTR(link, "attach_iter"))
  52                return;
  53
  54        iter_fd = bpf_iter_create(bpf_link__fd(link));
  55        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
  56                goto free_link;
  57
  58        /* not check contents, but ensure read() ends without error */
  59        while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
  60                ;
  61        CHECK(len < 0, "read", "read failed: %s\n", strerror(errno));
  62
  63        close(iter_fd);
  64
  65free_link:
  66        bpf_link__destroy(link);
  67}
  68
  69static int read_fd_into_buffer(int fd, char *buf, int size)
  70{
  71        int bufleft = size;
  72        int len;
  73
  74        do {
  75                len = read(fd, buf, bufleft);
  76                if (len > 0) {
  77                        buf += len;
  78                        bufleft -= len;
  79                }
  80        } while (len > 0);
  81
  82        return len < 0 ? len : size - bufleft;
  83}
  84
  85static void test_ipv6_route(void)
  86{
  87        struct bpf_iter_ipv6_route *skel;
  88
  89        skel = bpf_iter_ipv6_route__open_and_load();
  90        if (CHECK(!skel, "bpf_iter_ipv6_route__open_and_load",
  91                  "skeleton open_and_load failed\n"))
  92                return;
  93
  94        do_dummy_read(skel->progs.dump_ipv6_route);
  95
  96        bpf_iter_ipv6_route__destroy(skel);
  97}
  98
  99static void test_netlink(void)
 100{
 101        struct bpf_iter_netlink *skel;
 102
 103        skel = bpf_iter_netlink__open_and_load();
 104        if (CHECK(!skel, "bpf_iter_netlink__open_and_load",
 105                  "skeleton open_and_load failed\n"))
 106                return;
 107
 108        do_dummy_read(skel->progs.dump_netlink);
 109
 110        bpf_iter_netlink__destroy(skel);
 111}
 112
 113static void test_bpf_map(void)
 114{
 115        struct bpf_iter_bpf_map *skel;
 116
 117        skel = bpf_iter_bpf_map__open_and_load();
 118        if (CHECK(!skel, "bpf_iter_bpf_map__open_and_load",
 119                  "skeleton open_and_load failed\n"))
 120                return;
 121
 122        do_dummy_read(skel->progs.dump_bpf_map);
 123
 124        bpf_iter_bpf_map__destroy(skel);
 125}
 126
 127static void test_task(void)
 128{
 129        struct bpf_iter_task *skel;
 130
 131        skel = bpf_iter_task__open_and_load();
 132        if (CHECK(!skel, "bpf_iter_task__open_and_load",
 133                  "skeleton open_and_load failed\n"))
 134                return;
 135
 136        do_dummy_read(skel->progs.dump_task);
 137
 138        bpf_iter_task__destroy(skel);
 139}
 140
 141static void test_task_stack(void)
 142{
 143        struct bpf_iter_task_stack *skel;
 144
 145        skel = bpf_iter_task_stack__open_and_load();
 146        if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
 147                  "skeleton open_and_load failed\n"))
 148                return;
 149
 150        do_dummy_read(skel->progs.dump_task_stack);
 151        do_dummy_read(skel->progs.get_task_user_stacks);
 152
 153        bpf_iter_task_stack__destroy(skel);
 154}
 155
 156static void *do_nothing(void *arg)
 157{
 158        pthread_exit(arg);
 159}
 160
 161static void test_task_file(void)
 162{
 163        struct bpf_iter_task_file *skel;
 164        pthread_t thread_id;
 165        void *ret;
 166
 167        skel = bpf_iter_task_file__open_and_load();
 168        if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
 169                  "skeleton open_and_load failed\n"))
 170                return;
 171
 172        skel->bss->tgid = getpid();
 173
 174        if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
 175                  "pthread_create", "pthread_create failed\n"))
 176                goto done;
 177
 178        do_dummy_read(skel->progs.dump_task_file);
 179
 180        if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
 181                  "pthread_join", "pthread_join failed\n"))
 182                goto done;
 183
 184        CHECK(skel->bss->count != 0, "check_count",
 185              "invalid non pthread file visit count %d\n", skel->bss->count);
 186
 187done:
 188        bpf_iter_task_file__destroy(skel);
 189}
 190
 191#define TASKBUFSZ               32768
 192
 193static char taskbuf[TASKBUFSZ];
 194
 195static int do_btf_read(struct bpf_iter_task_btf *skel)
 196{
 197        struct bpf_program *prog = skel->progs.dump_task_struct;
 198        struct bpf_iter_task_btf__bss *bss = skel->bss;
 199        int iter_fd = -1, err;
 200        struct bpf_link *link;
 201        char *buf = taskbuf;
 202        int ret = 0;
 203
 204        link = bpf_program__attach_iter(prog, NULL);
 205        if (!ASSERT_OK_PTR(link, "attach_iter"))
 206                return ret;
 207
 208        iter_fd = bpf_iter_create(bpf_link__fd(link));
 209        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 210                goto free_link;
 211
 212        err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
 213        if (bss->skip) {
 214                printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
 215                ret = 1;
 216                test__skip();
 217                goto free_link;
 218        }
 219
 220        if (CHECK(err < 0, "read", "read failed: %s\n", strerror(errno)))
 221                goto free_link;
 222
 223        CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
 224              "check for btf representation of task_struct in iter data",
 225              "struct task_struct not found");
 226free_link:
 227        if (iter_fd > 0)
 228                close(iter_fd);
 229        bpf_link__destroy(link);
 230        return ret;
 231}
 232
 233static void test_task_btf(void)
 234{
 235        struct bpf_iter_task_btf__bss *bss;
 236        struct bpf_iter_task_btf *skel;
 237        int ret;
 238
 239        skel = bpf_iter_task_btf__open_and_load();
 240        if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
 241                  "skeleton open_and_load failed\n"))
 242                return;
 243
 244        bss = skel->bss;
 245
 246        ret = do_btf_read(skel);
 247        if (ret)
 248                goto cleanup;
 249
 250        if (CHECK(bss->tasks == 0, "check if iterated over tasks",
 251                  "no task iteration, did BPF program run?\n"))
 252                goto cleanup;
 253
 254        CHECK(bss->seq_err != 0, "check for unexpected err",
 255              "bpf_seq_printf_btf returned %ld", bss->seq_err);
 256
 257cleanup:
 258        bpf_iter_task_btf__destroy(skel);
 259}
 260
 261static void test_tcp4(void)
 262{
 263        struct bpf_iter_tcp4 *skel;
 264
 265        skel = bpf_iter_tcp4__open_and_load();
 266        if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
 267                  "skeleton open_and_load failed\n"))
 268                return;
 269
 270        do_dummy_read(skel->progs.dump_tcp4);
 271
 272        bpf_iter_tcp4__destroy(skel);
 273}
 274
 275static void test_tcp6(void)
 276{
 277        struct bpf_iter_tcp6 *skel;
 278
 279        skel = bpf_iter_tcp6__open_and_load();
 280        if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
 281                  "skeleton open_and_load failed\n"))
 282                return;
 283
 284        do_dummy_read(skel->progs.dump_tcp6);
 285
 286        bpf_iter_tcp6__destroy(skel);
 287}
 288
 289static void test_udp4(void)
 290{
 291        struct bpf_iter_udp4 *skel;
 292
 293        skel = bpf_iter_udp4__open_and_load();
 294        if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
 295                  "skeleton open_and_load failed\n"))
 296                return;
 297
 298        do_dummy_read(skel->progs.dump_udp4);
 299
 300        bpf_iter_udp4__destroy(skel);
 301}
 302
 303static void test_udp6(void)
 304{
 305        struct bpf_iter_udp6 *skel;
 306
 307        skel = bpf_iter_udp6__open_and_load();
 308        if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
 309                  "skeleton open_and_load failed\n"))
 310                return;
 311
 312        do_dummy_read(skel->progs.dump_udp6);
 313
 314        bpf_iter_udp6__destroy(skel);
 315}
 316
 317static void test_unix(void)
 318{
 319        struct bpf_iter_unix *skel;
 320
 321        skel = bpf_iter_unix__open_and_load();
 322        if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
 323                return;
 324
 325        do_dummy_read(skel->progs.dump_unix);
 326
 327        bpf_iter_unix__destroy(skel);
 328}
 329
 330/* The expected string is less than 16 bytes */
 331static int do_read_with_fd(int iter_fd, const char *expected,
 332                           bool read_one_char)
 333{
 334        int err = -1, len, read_buf_len, start;
 335        char buf[16] = {};
 336
 337        read_buf_len = read_one_char ? 1 : 16;
 338        start = 0;
 339        while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
 340                start += len;
 341                if (CHECK(start >= 16, "read", "read len %d\n", len))
 342                        return -1;
 343                read_buf_len = read_one_char ? 1 : 16 - start;
 344        }
 345        if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 346                return -1;
 347
 348        err = strcmp(buf, expected);
 349        if (CHECK(err, "read", "incorrect read result: buf %s, expected %s\n",
 350                  buf, expected))
 351                return -1;
 352
 353        return 0;
 354}
 355
 356static void test_anon_iter(bool read_one_char)
 357{
 358        struct bpf_iter_test_kern1 *skel;
 359        struct bpf_link *link;
 360        int iter_fd, err;
 361
 362        skel = bpf_iter_test_kern1__open_and_load();
 363        if (CHECK(!skel, "bpf_iter_test_kern1__open_and_load",
 364                  "skeleton open_and_load failed\n"))
 365                return;
 366
 367        err = bpf_iter_test_kern1__attach(skel);
 368        if (CHECK(err, "bpf_iter_test_kern1__attach",
 369                  "skeleton attach failed\n")) {
 370                goto out;
 371        }
 372
 373        link = skel->links.dump_task;
 374        iter_fd = bpf_iter_create(bpf_link__fd(link));
 375        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 376                goto out;
 377
 378        do_read_with_fd(iter_fd, "abcd", read_one_char);
 379        close(iter_fd);
 380
 381out:
 382        bpf_iter_test_kern1__destroy(skel);
 383}
 384
 385static int do_read(const char *path, const char *expected)
 386{
 387        int err, iter_fd;
 388
 389        iter_fd = open(path, O_RDONLY);
 390        if (CHECK(iter_fd < 0, "open", "open %s failed: %s\n",
 391                  path, strerror(errno)))
 392                return -1;
 393
 394        err = do_read_with_fd(iter_fd, expected, false);
 395        close(iter_fd);
 396        return err;
 397}
 398
 399static void test_file_iter(void)
 400{
 401        const char *path = "/sys/fs/bpf/bpf_iter_test1";
 402        struct bpf_iter_test_kern1 *skel1;
 403        struct bpf_iter_test_kern2 *skel2;
 404        struct bpf_link *link;
 405        int err;
 406
 407        skel1 = bpf_iter_test_kern1__open_and_load();
 408        if (CHECK(!skel1, "bpf_iter_test_kern1__open_and_load",
 409                  "skeleton open_and_load failed\n"))
 410                return;
 411
 412        link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
 413        if (!ASSERT_OK_PTR(link, "attach_iter"))
 414                goto out;
 415
 416        /* unlink this path if it exists. */
 417        unlink(path);
 418
 419        err = bpf_link__pin(link, path);
 420        if (CHECK(err, "pin_iter", "pin_iter to %s failed: %d\n", path, err))
 421                goto free_link;
 422
 423        err = do_read(path, "abcd");
 424        if (err)
 425                goto unlink_path;
 426
 427        /* file based iterator seems working fine. Let us a link update
 428         * of the underlying link and `cat` the iterator again, its content
 429         * should change.
 430         */
 431        skel2 = bpf_iter_test_kern2__open_and_load();
 432        if (CHECK(!skel2, "bpf_iter_test_kern2__open_and_load",
 433                  "skeleton open_and_load failed\n"))
 434                goto unlink_path;
 435
 436        err = bpf_link__update_program(link, skel2->progs.dump_task);
 437        if (CHECK(err, "update_prog", "update_prog failed\n"))
 438                goto destroy_skel2;
 439
 440        do_read(path, "ABCD");
 441
 442destroy_skel2:
 443        bpf_iter_test_kern2__destroy(skel2);
 444unlink_path:
 445        unlink(path);
 446free_link:
 447        bpf_link__destroy(link);
 448out:
 449        bpf_iter_test_kern1__destroy(skel1);
 450}
 451
 452static void test_overflow(bool test_e2big_overflow, bool ret1)
 453{
 454        __u32 map_info_len, total_read_len, expected_read_len;
 455        int err, iter_fd, map1_fd, map2_fd, len;
 456        struct bpf_map_info map_info = {};
 457        struct bpf_iter_test_kern4 *skel;
 458        struct bpf_link *link;
 459        __u32 iter_size;
 460        char *buf;
 461
 462        skel = bpf_iter_test_kern4__open();
 463        if (CHECK(!skel, "bpf_iter_test_kern4__open",
 464                  "skeleton open failed\n"))
 465                return;
 466
 467        /* create two maps: bpf program will only do bpf_seq_write
 468         * for these two maps. The goal is one map output almost
 469         * fills seq_file buffer and then the other will trigger
 470         * overflow and needs restart.
 471         */
 472        map1_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
 473        if (CHECK(map1_fd < 0, "bpf_create_map",
 474                  "map_creation failed: %s\n", strerror(errno)))
 475                goto out;
 476        map2_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, 4, 8, 1, 0);
 477        if (CHECK(map2_fd < 0, "bpf_create_map",
 478                  "map_creation failed: %s\n", strerror(errno)))
 479                goto free_map1;
 480
 481        /* bpf_seq_printf kernel buffer is 8 pages, so one map
 482         * bpf_seq_write will mostly fill it, and the other map
 483         * will partially fill and then trigger overflow and need
 484         * bpf_seq_read restart.
 485         */
 486        iter_size = sysconf(_SC_PAGE_SIZE) << 3;
 487
 488        if (test_e2big_overflow) {
 489                skel->rodata->print_len = (iter_size + 8) / 8;
 490                expected_read_len = 2 * (iter_size + 8);
 491        } else if (!ret1) {
 492                skel->rodata->print_len = (iter_size - 8) / 8;
 493                expected_read_len = 2 * (iter_size - 8);
 494        } else {
 495                skel->rodata->print_len = 1;
 496                expected_read_len = 2 * 8;
 497        }
 498        skel->rodata->ret1 = ret1;
 499
 500        if (CHECK(bpf_iter_test_kern4__load(skel),
 501                  "bpf_iter_test_kern4__load", "skeleton load failed\n"))
 502                goto free_map2;
 503
 504        /* setup filtering map_id in bpf program */
 505        map_info_len = sizeof(map_info);
 506        err = bpf_obj_get_info_by_fd(map1_fd, &map_info, &map_info_len);
 507        if (CHECK(err, "get_map_info", "get map info failed: %s\n",
 508                  strerror(errno)))
 509                goto free_map2;
 510        skel->bss->map1_id = map_info.id;
 511
 512        err = bpf_obj_get_info_by_fd(map2_fd, &map_info, &map_info_len);
 513        if (CHECK(err, "get_map_info", "get map info failed: %s\n",
 514                  strerror(errno)))
 515                goto free_map2;
 516        skel->bss->map2_id = map_info.id;
 517
 518        link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
 519        if (!ASSERT_OK_PTR(link, "attach_iter"))
 520                goto free_map2;
 521
 522        iter_fd = bpf_iter_create(bpf_link__fd(link));
 523        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 524                goto free_link;
 525
 526        buf = malloc(expected_read_len);
 527        if (!buf)
 528                goto close_iter;
 529
 530        /* do read */
 531        total_read_len = 0;
 532        if (test_e2big_overflow) {
 533                while ((len = read(iter_fd, buf, expected_read_len)) > 0)
 534                        total_read_len += len;
 535
 536                CHECK(len != -1 || errno != E2BIG, "read",
 537                      "expected ret -1, errno E2BIG, but get ret %d, error %s\n",
 538                          len, strerror(errno));
 539                goto free_buf;
 540        } else if (!ret1) {
 541                while ((len = read(iter_fd, buf, expected_read_len)) > 0)
 542                        total_read_len += len;
 543
 544                if (CHECK(len < 0, "read", "read failed: %s\n",
 545                          strerror(errno)))
 546                        goto free_buf;
 547        } else {
 548                do {
 549                        len = read(iter_fd, buf, expected_read_len);
 550                        if (len > 0)
 551                                total_read_len += len;
 552                } while (len > 0 || len == -EAGAIN);
 553
 554                if (CHECK(len < 0, "read", "read failed: %s\n",
 555                          strerror(errno)))
 556                        goto free_buf;
 557        }
 558
 559        if (CHECK(total_read_len != expected_read_len, "read",
 560                  "total len %u, expected len %u\n", total_read_len,
 561                  expected_read_len))
 562                goto free_buf;
 563
 564        if (CHECK(skel->bss->map1_accessed != 1, "map1_accessed",
 565                  "expected 1 actual %d\n", skel->bss->map1_accessed))
 566                goto free_buf;
 567
 568        if (CHECK(skel->bss->map2_accessed != 2, "map2_accessed",
 569                  "expected 2 actual %d\n", skel->bss->map2_accessed))
 570                goto free_buf;
 571
 572        CHECK(skel->bss->map2_seqnum1 != skel->bss->map2_seqnum2,
 573              "map2_seqnum", "two different seqnum %lld %lld\n",
 574              skel->bss->map2_seqnum1, skel->bss->map2_seqnum2);
 575
 576free_buf:
 577        free(buf);
 578close_iter:
 579        close(iter_fd);
 580free_link:
 581        bpf_link__destroy(link);
 582free_map2:
 583        close(map2_fd);
 584free_map1:
 585        close(map1_fd);
 586out:
 587        bpf_iter_test_kern4__destroy(skel);
 588}
 589
 590static void test_bpf_hash_map(void)
 591{
 592        __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
 593        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 594        struct bpf_iter_bpf_hash_map *skel;
 595        int err, i, len, map_fd, iter_fd;
 596        union bpf_iter_link_info linfo;
 597        __u64 val, expected_val = 0;
 598        struct bpf_link *link;
 599        struct key_t {
 600                int a;
 601                int b;
 602                int c;
 603        } key;
 604        char buf[64];
 605
 606        skel = bpf_iter_bpf_hash_map__open();
 607        if (CHECK(!skel, "bpf_iter_bpf_hash_map__open",
 608                  "skeleton open failed\n"))
 609                return;
 610
 611        skel->bss->in_test_mode = true;
 612
 613        err = bpf_iter_bpf_hash_map__load(skel);
 614        if (CHECK(!skel, "bpf_iter_bpf_hash_map__load",
 615                  "skeleton load failed\n"))
 616                goto out;
 617
 618        /* iterator with hashmap2 and hashmap3 should fail */
 619        memset(&linfo, 0, sizeof(linfo));
 620        linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
 621        opts.link_info = &linfo;
 622        opts.link_info_len = sizeof(linfo);
 623        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 624        if (!ASSERT_ERR_PTR(link, "attach_iter"))
 625                goto out;
 626
 627        linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
 628        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 629        if (!ASSERT_ERR_PTR(link, "attach_iter"))
 630                goto out;
 631
 632        /* hashmap1 should be good, update map values here */
 633        map_fd = bpf_map__fd(skel->maps.hashmap1);
 634        for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
 635                key.a = i + 1;
 636                key.b = i + 2;
 637                key.c = i + 3;
 638                val = i + 4;
 639                expected_key_a += key.a;
 640                expected_key_b += key.b;
 641                expected_key_c += key.c;
 642                expected_val += val;
 643
 644                err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
 645                if (CHECK(err, "map_update", "map_update failed\n"))
 646                        goto out;
 647        }
 648
 649        linfo.map.map_fd = map_fd;
 650        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
 651        if (!ASSERT_OK_PTR(link, "attach_iter"))
 652                goto out;
 653
 654        iter_fd = bpf_iter_create(bpf_link__fd(link));
 655        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 656                goto free_link;
 657
 658        /* do some tests */
 659        while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 660                ;
 661        if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 662                goto close_iter;
 663
 664        /* test results */
 665        if (CHECK(skel->bss->key_sum_a != expected_key_a,
 666                  "key_sum_a", "got %u expected %u\n",
 667                  skel->bss->key_sum_a, expected_key_a))
 668                goto close_iter;
 669        if (CHECK(skel->bss->key_sum_b != expected_key_b,
 670                  "key_sum_b", "got %u expected %u\n",
 671                  skel->bss->key_sum_b, expected_key_b))
 672                goto close_iter;
 673        if (CHECK(skel->bss->val_sum != expected_val,
 674                  "val_sum", "got %llu expected %llu\n",
 675                  skel->bss->val_sum, expected_val))
 676                goto close_iter;
 677
 678close_iter:
 679        close(iter_fd);
 680free_link:
 681        bpf_link__destroy(link);
 682out:
 683        bpf_iter_bpf_hash_map__destroy(skel);
 684}
 685
 686static void test_bpf_percpu_hash_map(void)
 687{
 688        __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0;
 689        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 690        struct bpf_iter_bpf_percpu_hash_map *skel;
 691        int err, i, j, len, map_fd, iter_fd;
 692        union bpf_iter_link_info linfo;
 693        __u32 expected_val = 0;
 694        struct bpf_link *link;
 695        struct key_t {
 696                int a;
 697                int b;
 698                int c;
 699        } key;
 700        char buf[64];
 701        void *val;
 702
 703        val = malloc(8 * bpf_num_possible_cpus());
 704
 705        skel = bpf_iter_bpf_percpu_hash_map__open();
 706        if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
 707                  "skeleton open failed\n"))
 708                return;
 709
 710        skel->rodata->num_cpus = bpf_num_possible_cpus();
 711
 712        err = bpf_iter_bpf_percpu_hash_map__load(skel);
 713        if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
 714                  "skeleton load failed\n"))
 715                goto out;
 716
 717        /* update map values here */
 718        map_fd = bpf_map__fd(skel->maps.hashmap1);
 719        for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
 720                key.a = i + 1;
 721                key.b = i + 2;
 722                key.c = i + 3;
 723                expected_key_a += key.a;
 724                expected_key_b += key.b;
 725                expected_key_c += key.c;
 726
 727                for (j = 0; j < bpf_num_possible_cpus(); j++) {
 728                        *(__u32 *)(val + j * 8) = i + j;
 729                        expected_val += i + j;
 730                }
 731
 732                err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
 733                if (CHECK(err, "map_update", "map_update failed\n"))
 734                        goto out;
 735        }
 736
 737        memset(&linfo, 0, sizeof(linfo));
 738        linfo.map.map_fd = map_fd;
 739        opts.link_info = &linfo;
 740        opts.link_info_len = sizeof(linfo);
 741        link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
 742        if (!ASSERT_OK_PTR(link, "attach_iter"))
 743                goto out;
 744
 745        iter_fd = bpf_iter_create(bpf_link__fd(link));
 746        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 747                goto free_link;
 748
 749        /* do some tests */
 750        while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 751                ;
 752        if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 753                goto close_iter;
 754
 755        /* test results */
 756        if (CHECK(skel->bss->key_sum_a != expected_key_a,
 757                  "key_sum_a", "got %u expected %u\n",
 758                  skel->bss->key_sum_a, expected_key_a))
 759                goto close_iter;
 760        if (CHECK(skel->bss->key_sum_b != expected_key_b,
 761                  "key_sum_b", "got %u expected %u\n",
 762                  skel->bss->key_sum_b, expected_key_b))
 763                goto close_iter;
 764        if (CHECK(skel->bss->val_sum != expected_val,
 765                  "val_sum", "got %u expected %u\n",
 766                  skel->bss->val_sum, expected_val))
 767                goto close_iter;
 768
 769close_iter:
 770        close(iter_fd);
 771free_link:
 772        bpf_link__destroy(link);
 773out:
 774        bpf_iter_bpf_percpu_hash_map__destroy(skel);
 775}
 776
 777static void test_bpf_array_map(void)
 778{
 779        __u64 val, expected_val = 0, res_first_val, first_val = 0;
 780        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 781        __u32 expected_key = 0, res_first_key;
 782        struct bpf_iter_bpf_array_map *skel;
 783        union bpf_iter_link_info linfo;
 784        int err, i, map_fd, iter_fd;
 785        struct bpf_link *link;
 786        char buf[64] = {};
 787        int len, start;
 788
 789        skel = bpf_iter_bpf_array_map__open_and_load();
 790        if (CHECK(!skel, "bpf_iter_bpf_array_map__open_and_load",
 791                  "skeleton open_and_load failed\n"))
 792                return;
 793
 794        map_fd = bpf_map__fd(skel->maps.arraymap1);
 795        for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
 796                val = i + 4;
 797                expected_key += i;
 798                expected_val += val;
 799
 800                if (i == 0)
 801                        first_val = val;
 802
 803                err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
 804                if (CHECK(err, "map_update", "map_update failed\n"))
 805                        goto out;
 806        }
 807
 808        memset(&linfo, 0, sizeof(linfo));
 809        linfo.map.map_fd = map_fd;
 810        opts.link_info = &linfo;
 811        opts.link_info_len = sizeof(linfo);
 812        link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
 813        if (!ASSERT_OK_PTR(link, "attach_iter"))
 814                goto out;
 815
 816        iter_fd = bpf_iter_create(bpf_link__fd(link));
 817        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 818                goto free_link;
 819
 820        /* do some tests */
 821        start = 0;
 822        while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
 823                start += len;
 824        if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 825                goto close_iter;
 826
 827        /* test results */
 828        res_first_key = *(__u32 *)buf;
 829        res_first_val = *(__u64 *)(buf + sizeof(__u32));
 830        if (CHECK(res_first_key != 0 || res_first_val != first_val,
 831                  "bpf_seq_write",
 832                  "seq_write failure: first key %u vs expected 0, "
 833                  " first value %llu vs expected %llu\n",
 834                  res_first_key, res_first_val, first_val))
 835                goto close_iter;
 836
 837        if (CHECK(skel->bss->key_sum != expected_key,
 838                  "key_sum", "got %u expected %u\n",
 839                  skel->bss->key_sum, expected_key))
 840                goto close_iter;
 841        if (CHECK(skel->bss->val_sum != expected_val,
 842                  "val_sum", "got %llu expected %llu\n",
 843                  skel->bss->val_sum, expected_val))
 844                goto close_iter;
 845
 846        for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
 847                err = bpf_map_lookup_elem(map_fd, &i, &val);
 848                if (CHECK(err, "map_lookup", "map_lookup failed\n"))
 849                        goto out;
 850                if (CHECK(i != val, "invalid_val",
 851                          "got value %llu expected %u\n", val, i))
 852                        goto out;
 853        }
 854
 855close_iter:
 856        close(iter_fd);
 857free_link:
 858        bpf_link__destroy(link);
 859out:
 860        bpf_iter_bpf_array_map__destroy(skel);
 861}
 862
 863static void test_bpf_percpu_array_map(void)
 864{
 865        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 866        struct bpf_iter_bpf_percpu_array_map *skel;
 867        __u32 expected_key = 0, expected_val = 0;
 868        union bpf_iter_link_info linfo;
 869        int err, i, j, map_fd, iter_fd;
 870        struct bpf_link *link;
 871        char buf[64];
 872        void *val;
 873        int len;
 874
 875        val = malloc(8 * bpf_num_possible_cpus());
 876
 877        skel = bpf_iter_bpf_percpu_array_map__open();
 878        if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
 879                  "skeleton open failed\n"))
 880                return;
 881
 882        skel->rodata->num_cpus = bpf_num_possible_cpus();
 883
 884        err = bpf_iter_bpf_percpu_array_map__load(skel);
 885        if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
 886                  "skeleton load failed\n"))
 887                goto out;
 888
 889        /* update map values here */
 890        map_fd = bpf_map__fd(skel->maps.arraymap1);
 891        for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
 892                expected_key += i;
 893
 894                for (j = 0; j < bpf_num_possible_cpus(); j++) {
 895                        *(__u32 *)(val + j * 8) = i + j;
 896                        expected_val += i + j;
 897                }
 898
 899                err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
 900                if (CHECK(err, "map_update", "map_update failed\n"))
 901                        goto out;
 902        }
 903
 904        memset(&linfo, 0, sizeof(linfo));
 905        linfo.map.map_fd = map_fd;
 906        opts.link_info = &linfo;
 907        opts.link_info_len = sizeof(linfo);
 908        link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
 909        if (!ASSERT_OK_PTR(link, "attach_iter"))
 910                goto out;
 911
 912        iter_fd = bpf_iter_create(bpf_link__fd(link));
 913        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 914                goto free_link;
 915
 916        /* do some tests */
 917        while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 918                ;
 919        if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 920                goto close_iter;
 921
 922        /* test results */
 923        if (CHECK(skel->bss->key_sum != expected_key,
 924                  "key_sum", "got %u expected %u\n",
 925                  skel->bss->key_sum, expected_key))
 926                goto close_iter;
 927        if (CHECK(skel->bss->val_sum != expected_val,
 928                  "val_sum", "got %u expected %u\n",
 929                  skel->bss->val_sum, expected_val))
 930                goto close_iter;
 931
 932close_iter:
 933        close(iter_fd);
 934free_link:
 935        bpf_link__destroy(link);
 936out:
 937        bpf_iter_bpf_percpu_array_map__destroy(skel);
 938}
 939
 940/* An iterator program deletes all local storage in a map. */
 941static void test_bpf_sk_storage_delete(void)
 942{
 943        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
 944        struct bpf_iter_bpf_sk_storage_helpers *skel;
 945        union bpf_iter_link_info linfo;
 946        int err, len, map_fd, iter_fd;
 947        struct bpf_link *link;
 948        int sock_fd = -1;
 949        __u32 val = 42;
 950        char buf[64];
 951
 952        skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
 953        if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
 954                  "skeleton open_and_load failed\n"))
 955                return;
 956
 957        map_fd = bpf_map__fd(skel->maps.sk_stg_map);
 958
 959        sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
 960        if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
 961                goto out;
 962        err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
 963        if (CHECK(err, "map_update", "map_update failed\n"))
 964                goto out;
 965
 966        memset(&linfo, 0, sizeof(linfo));
 967        linfo.map.map_fd = map_fd;
 968        opts.link_info = &linfo;
 969        opts.link_info_len = sizeof(linfo);
 970        link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
 971                                        &opts);
 972        if (!ASSERT_OK_PTR(link, "attach_iter"))
 973                goto out;
 974
 975        iter_fd = bpf_iter_create(bpf_link__fd(link));
 976        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
 977                goto free_link;
 978
 979        /* do some tests */
 980        while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
 981                ;
 982        if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
 983                goto close_iter;
 984
 985        /* test results */
 986        err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
 987        if (CHECK(!err || errno != ENOENT, "bpf_map_lookup_elem",
 988                  "map value wasn't deleted (err=%d, errno=%d)\n", err, errno))
 989                goto close_iter;
 990
 991close_iter:
 992        close(iter_fd);
 993free_link:
 994        bpf_link__destroy(link);
 995out:
 996        if (sock_fd >= 0)
 997                close(sock_fd);
 998        bpf_iter_bpf_sk_storage_helpers__destroy(skel);
 999}
1000
1001/* This creates a socket and its local storage. It then runs a task_iter BPF
1002 * program that replaces the existing socket local storage with the tgid of the
1003 * only task owning a file descriptor to this socket, this process, prog_tests.
1004 * It then runs a tcp socket iterator that negates the value in the existing
1005 * socket local storage, the test verifies that the resulting value is -pid.
1006 */
1007static void test_bpf_sk_storage_get(void)
1008{
1009        struct bpf_iter_bpf_sk_storage_helpers *skel;
1010        int err, map_fd, val = -1;
1011        int sock_fd = -1;
1012
1013        skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1014        if (CHECK(!skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load",
1015                  "skeleton open_and_load failed\n"))
1016                return;
1017
1018        sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1019        if (CHECK(sock_fd < 0, "socket", "errno: %d\n", errno))
1020                goto out;
1021
1022        err = listen(sock_fd, 1);
1023        if (CHECK(err != 0, "listen", "errno: %d\n", errno))
1024                goto close_socket;
1025
1026        map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1027
1028        err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1029        if (CHECK(err, "bpf_map_update_elem", "map_update_failed\n"))
1030                goto close_socket;
1031
1032        do_dummy_read(skel->progs.fill_socket_owner);
1033
1034        err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1035        if (CHECK(err || val != getpid(), "bpf_map_lookup_elem",
1036            "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1037            getpid(), val, err))
1038                goto close_socket;
1039
1040        do_dummy_read(skel->progs.negate_socket_local_storage);
1041
1042        err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1043        CHECK(err || val != -getpid(), "bpf_map_lookup_elem",
1044              "map value wasn't set correctly (expected %d, got %d, err=%d)\n",
1045              -getpid(), val, err);
1046
1047close_socket:
1048        close(sock_fd);
1049out:
1050        bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1051}
1052
1053static void test_bpf_sk_storage_map(void)
1054{
1055        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1056        int err, i, len, map_fd, iter_fd, num_sockets;
1057        struct bpf_iter_bpf_sk_storage_map *skel;
1058        union bpf_iter_link_info linfo;
1059        int sock_fd[3] = {-1, -1, -1};
1060        __u32 val, expected_val = 0;
1061        struct bpf_link *link;
1062        char buf[64];
1063
1064        skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1065        if (CHECK(!skel, "bpf_iter_bpf_sk_storage_map__open_and_load",
1066                  "skeleton open_and_load failed\n"))
1067                return;
1068
1069        map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1070        num_sockets = ARRAY_SIZE(sock_fd);
1071        for (i = 0; i < num_sockets; i++) {
1072                sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1073                if (CHECK(sock_fd[i] < 0, "socket", "errno: %d\n", errno))
1074                        goto out;
1075
1076                val = i + 1;
1077                expected_val += val;
1078
1079                err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1080                                          BPF_NOEXIST);
1081                if (CHECK(err, "map_update", "map_update failed\n"))
1082                        goto out;
1083        }
1084
1085        memset(&linfo, 0, sizeof(linfo));
1086        linfo.map.map_fd = map_fd;
1087        opts.link_info = &linfo;
1088        opts.link_info_len = sizeof(linfo);
1089        link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
1090        if (!ASSERT_OK_PTR(link, "attach_iter"))
1091                goto out;
1092
1093        iter_fd = bpf_iter_create(bpf_link__fd(link));
1094        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
1095                goto free_link;
1096
1097        /* do some tests */
1098        while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1099                ;
1100        if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
1101                goto close_iter;
1102
1103        /* test results */
1104        if (CHECK(skel->bss->ipv6_sk_count != num_sockets,
1105                  "ipv6_sk_count", "got %u expected %u\n",
1106                  skel->bss->ipv6_sk_count, num_sockets))
1107                goto close_iter;
1108
1109        if (CHECK(skel->bss->val_sum != expected_val,
1110                  "val_sum", "got %u expected %u\n",
1111                  skel->bss->val_sum, expected_val))
1112                goto close_iter;
1113
1114close_iter:
1115        close(iter_fd);
1116free_link:
1117        bpf_link__destroy(link);
1118out:
1119        for (i = 0; i < num_sockets; i++) {
1120                if (sock_fd[i] >= 0)
1121                        close(sock_fd[i]);
1122        }
1123        bpf_iter_bpf_sk_storage_map__destroy(skel);
1124}
1125
1126static void test_rdonly_buf_out_of_bound(void)
1127{
1128        DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1129        struct bpf_iter_test_kern5 *skel;
1130        union bpf_iter_link_info linfo;
1131        struct bpf_link *link;
1132
1133        skel = bpf_iter_test_kern5__open_and_load();
1134        if (CHECK(!skel, "bpf_iter_test_kern5__open_and_load",
1135                  "skeleton open_and_load failed\n"))
1136                return;
1137
1138        memset(&linfo, 0, sizeof(linfo));
1139        linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1140        opts.link_info = &linfo;
1141        opts.link_info_len = sizeof(linfo);
1142        link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1143        if (!ASSERT_ERR_PTR(link, "attach_iter"))
1144                bpf_link__destroy(link);
1145
1146        bpf_iter_test_kern5__destroy(skel);
1147}
1148
1149static void test_buf_neg_offset(void)
1150{
1151        struct bpf_iter_test_kern6 *skel;
1152
1153        skel = bpf_iter_test_kern6__open_and_load();
1154        if (CHECK(skel, "bpf_iter_test_kern6__open_and_load",
1155                  "skeleton open_and_load unexpected success\n"))
1156                bpf_iter_test_kern6__destroy(skel);
1157}
1158
1159#define CMP_BUFFER_SIZE 1024
1160static char task_vma_output[CMP_BUFFER_SIZE];
1161static char proc_maps_output[CMP_BUFFER_SIZE];
1162
1163/* remove \0 and \t from str, and only keep the first line */
1164static void str_strip_first_line(char *str)
1165{
1166        char *dst = str, *src = str;
1167
1168        do {
1169                if (*src == ' ' || *src == '\t')
1170                        src++;
1171                else
1172                        *(dst++) = *(src++);
1173
1174        } while (*src != '\0' && *src != '\n');
1175
1176        *dst = '\0';
1177}
1178
1179#define min(a, b) ((a) < (b) ? (a) : (b))
1180
1181static void test_task_vma(void)
1182{
1183        int err, iter_fd = -1, proc_maps_fd = -1;
1184        struct bpf_iter_task_vma *skel;
1185        int len, read_size = 4;
1186        char maps_path[64];
1187
1188        skel = bpf_iter_task_vma__open();
1189        if (CHECK(!skel, "bpf_iter_task_vma__open", "skeleton open failed\n"))
1190                return;
1191
1192        skel->bss->pid = getpid();
1193
1194        err = bpf_iter_task_vma__load(skel);
1195        if (CHECK(err, "bpf_iter_task_vma__load", "skeleton load failed\n"))
1196                goto out;
1197
1198        skel->links.proc_maps = bpf_program__attach_iter(
1199                skel->progs.proc_maps, NULL);
1200
1201        if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1202                skel->links.proc_maps = NULL;
1203                goto out;
1204        }
1205
1206        iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1207        if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
1208                goto out;
1209
1210        /* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1211         * to trigger seq_file corner cases. The expected output is much
1212         * longer than 1kB, so the while loop will terminate.
1213         */
1214        len = 0;
1215        while (len < CMP_BUFFER_SIZE) {
1216                err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1217                                          min(read_size, CMP_BUFFER_SIZE - len));
1218                if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n"))
1219                        goto out;
1220                len += err;
1221        }
1222
1223        /* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1224        snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1225        proc_maps_fd = open(maps_path, O_RDONLY);
1226        if (CHECK(proc_maps_fd < 0, "open_proc_maps", "open_proc_maps failed\n"))
1227                goto out;
1228        err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1229        if (CHECK(err < 0, "read_prog_maps_fd", "read_prog_maps_fd failed\n"))
1230                goto out;
1231
1232        /* strip and compare the first line of the two files */
1233        str_strip_first_line(task_vma_output);
1234        str_strip_first_line(proc_maps_output);
1235
1236        CHECK(strcmp(task_vma_output, proc_maps_output), "compare_output",
1237              "found mismatch\n");
1238out:
1239        close(proc_maps_fd);
1240        close(iter_fd);
1241        bpf_iter_task_vma__destroy(skel);
1242}
1243
1244void test_bpf_iter(void)
1245{
1246        if (test__start_subtest("btf_id_or_null"))
1247                test_btf_id_or_null();
1248        if (test__start_subtest("ipv6_route"))
1249                test_ipv6_route();
1250        if (test__start_subtest("netlink"))
1251                test_netlink();
1252        if (test__start_subtest("bpf_map"))
1253                test_bpf_map();
1254        if (test__start_subtest("task"))
1255                test_task();
1256        if (test__start_subtest("task_stack"))
1257                test_task_stack();
1258        if (test__start_subtest("task_file"))
1259                test_task_file();
1260        if (test__start_subtest("task_vma"))
1261                test_task_vma();
1262        if (test__start_subtest("task_btf"))
1263                test_task_btf();
1264        if (test__start_subtest("tcp4"))
1265                test_tcp4();
1266        if (test__start_subtest("tcp6"))
1267                test_tcp6();
1268        if (test__start_subtest("udp4"))
1269                test_udp4();
1270        if (test__start_subtest("udp6"))
1271                test_udp6();
1272        if (test__start_subtest("unix"))
1273                test_unix();
1274        if (test__start_subtest("anon"))
1275                test_anon_iter(false);
1276        if (test__start_subtest("anon-read-one-char"))
1277                test_anon_iter(true);
1278        if (test__start_subtest("file"))
1279                test_file_iter();
1280        if (test__start_subtest("overflow"))
1281                test_overflow(false, false);
1282        if (test__start_subtest("overflow-e2big"))
1283                test_overflow(true, false);
1284        if (test__start_subtest("prog-ret-1"))
1285                test_overflow(false, true);
1286        if (test__start_subtest("bpf_hash_map"))
1287                test_bpf_hash_map();
1288        if (test__start_subtest("bpf_percpu_hash_map"))
1289                test_bpf_percpu_hash_map();
1290        if (test__start_subtest("bpf_array_map"))
1291                test_bpf_array_map();
1292        if (test__start_subtest("bpf_percpu_array_map"))
1293                test_bpf_percpu_array_map();
1294        if (test__start_subtest("bpf_sk_storage_map"))
1295                test_bpf_sk_storage_map();
1296        if (test__start_subtest("bpf_sk_storage_delete"))
1297                test_bpf_sk_storage_delete();
1298        if (test__start_subtest("bpf_sk_storage_get"))
1299                test_bpf_sk_storage_get();
1300        if (test__start_subtest("rdonly-buf-out-of-bound"))
1301                test_rdonly_buf_out_of_bound();
1302        if (test__start_subtest("buf-neg-offset"))
1303                test_buf_neg_offset();
1304}
1305