linux/tools/testing/selftests/bpf/test_progs.c
<<
>>
Prefs
   1/* Copyright (c) 2017 Facebook
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of version 2 of the GNU General Public
   5 * License as published by the Free Software Foundation.
   6 */
   7#include <stdio.h>
   8#include <unistd.h>
   9#include <errno.h>
  10#include <string.h>
  11#include <assert.h>
  12#include <stdlib.h>
  13#include <time.h>
  14
  15#include <linux/types.h>
  16typedef __u16 __sum16;
  17#include <arpa/inet.h>
  18#include <linux/if_ether.h>
  19#include <linux/if_packet.h>
  20#include <linux/ip.h>
  21#include <linux/ipv6.h>
  22#include <linux/tcp.h>
  23#include <linux/filter.h>
  24#include <linux/perf_event.h>
  25#include <linux/unistd.h>
  26
  27#include <sys/ioctl.h>
  28#include <sys/wait.h>
  29#include <sys/types.h>
  30#include <fcntl.h>
  31
  32#include <linux/bpf.h>
  33#include <linux/err.h>
  34#include <bpf/bpf.h>
  35#include <bpf/libbpf.h>
  36
  37#include "test_iptunnel_common.h"
  38#include "bpf_util.h"
  39#include "bpf_endian.h"
  40#include "bpf_rlimit.h"
  41#include "trace_helpers.h"
  42
  43static int error_cnt, pass_cnt;
  44static bool jit_enabled;
  45
  46#define MAGIC_BYTES 123
  47
  48/* ipv4 test vector */
  49static struct {
  50        struct ethhdr eth;
  51        struct iphdr iph;
  52        struct tcphdr tcp;
  53} __packed pkt_v4 = {
  54        .eth.h_proto = bpf_htons(ETH_P_IP),
  55        .iph.ihl = 5,
  56        .iph.protocol = 6,
  57        .iph.tot_len = bpf_htons(MAGIC_BYTES),
  58        .tcp.urg_ptr = 123,
  59};
  60
  61/* ipv6 test vector */
  62static struct {
  63        struct ethhdr eth;
  64        struct ipv6hdr iph;
  65        struct tcphdr tcp;
  66} __packed pkt_v6 = {
  67        .eth.h_proto = bpf_htons(ETH_P_IPV6),
  68        .iph.nexthdr = 6,
  69        .iph.payload_len = bpf_htons(MAGIC_BYTES),
  70        .tcp.urg_ptr = 123,
  71};
  72
  73#define CHECK(condition, tag, format...) ({                             \
  74        int __ret = !!(condition);                                      \
  75        if (__ret) {                                                    \
  76                error_cnt++;                                            \
  77                printf("%s:FAIL:%s ", __func__, tag);                   \
  78                printf(format);                                         \
  79        } else {                                                        \
  80                pass_cnt++;                                             \
  81                printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
  82        }                                                               \
  83        __ret;                                                          \
  84})
  85
  86static int bpf_find_map(const char *test, struct bpf_object *obj,
  87                        const char *name)
  88{
  89        struct bpf_map *map;
  90
  91        map = bpf_object__find_map_by_name(obj, name);
  92        if (!map) {
  93                printf("%s:FAIL:map '%s' not found\n", test, name);
  94                error_cnt++;
  95                return -1;
  96        }
  97        return bpf_map__fd(map);
  98}
  99
 100static void test_pkt_access(void)
 101{
 102        const char *file = "./test_pkt_access.o";
 103        struct bpf_object *obj;
 104        __u32 duration, retval;
 105        int err, prog_fd;
 106
 107        err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
 108        if (err) {
 109                error_cnt++;
 110                return;
 111        }
 112
 113        err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
 114                                NULL, NULL, &retval, &duration);
 115        CHECK(err || errno || retval, "ipv4",
 116              "err %d errno %d retval %d duration %d\n",
 117              err, errno, retval, duration);
 118
 119        err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
 120                                NULL, NULL, &retval, &duration);
 121        CHECK(err || errno || retval, "ipv6",
 122              "err %d errno %d retval %d duration %d\n",
 123              err, errno, retval, duration);
 124        bpf_object__close(obj);
 125}
 126
 127static void test_xdp(void)
 128{
 129        struct vip key4 = {.protocol = 6, .family = AF_INET};
 130        struct vip key6 = {.protocol = 6, .family = AF_INET6};
 131        struct iptnl_info value4 = {.family = AF_INET};
 132        struct iptnl_info value6 = {.family = AF_INET6};
 133        const char *file = "./test_xdp.o";
 134        struct bpf_object *obj;
 135        char buf[128];
 136        struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
 137        struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
 138        __u32 duration, retval, size;
 139        int err, prog_fd, map_fd;
 140
 141        err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
 142        if (err) {
 143                error_cnt++;
 144                return;
 145        }
 146
 147        map_fd = bpf_find_map(__func__, obj, "vip2tnl");
 148        if (map_fd < 0)
 149                goto out;
 150        bpf_map_update_elem(map_fd, &key4, &value4, 0);
 151        bpf_map_update_elem(map_fd, &key6, &value6, 0);
 152
 153        err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
 154                                buf, &size, &retval, &duration);
 155
 156        CHECK(err || errno || retval != XDP_TX || size != 74 ||
 157              iph->protocol != IPPROTO_IPIP, "ipv4",
 158              "err %d errno %d retval %d size %d\n",
 159              err, errno, retval, size);
 160
 161        err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
 162                                buf, &size, &retval, &duration);
 163        CHECK(err || errno || retval != XDP_TX || size != 114 ||
 164              iph6->nexthdr != IPPROTO_IPV6, "ipv6",
 165              "err %d errno %d retval %d size %d\n",
 166              err, errno, retval, size);
 167out:
 168        bpf_object__close(obj);
 169}
 170
 171static void test_xdp_adjust_tail(void)
 172{
 173        const char *file = "./test_adjust_tail.o";
 174        struct bpf_object *obj;
 175        char buf[128];
 176        __u32 duration, retval, size;
 177        int err, prog_fd;
 178
 179        err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
 180        if (err) {
 181                error_cnt++;
 182                return;
 183        }
 184
 185        err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
 186                                buf, &size, &retval, &duration);
 187
 188        CHECK(err || errno || retval != XDP_DROP,
 189              "ipv4", "err %d errno %d retval %d size %d\n",
 190              err, errno, retval, size);
 191
 192        err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
 193                                buf, &size, &retval, &duration);
 194        CHECK(err || errno || retval != XDP_TX || size != 54,
 195              "ipv6", "err %d errno %d retval %d size %d\n",
 196              err, errno, retval, size);
 197        bpf_object__close(obj);
 198}
 199
 200
 201
 202#define MAGIC_VAL 0x1234
 203#define NUM_ITER 100000
 204#define VIP_NUM 5
 205
 206static void test_l4lb(const char *file)
 207{
 208        unsigned int nr_cpus = bpf_num_possible_cpus();
 209        struct vip key = {.protocol = 6};
 210        struct vip_meta {
 211                __u32 flags;
 212                __u32 vip_num;
 213        } value = {.vip_num = VIP_NUM};
 214        __u32 stats_key = VIP_NUM;
 215        struct vip_stats {
 216                __u64 bytes;
 217                __u64 pkts;
 218        } stats[nr_cpus];
 219        struct real_definition {
 220                union {
 221                        __be32 dst;
 222                        __be32 dstv6[4];
 223                };
 224                __u8 flags;
 225        } real_def = {.dst = MAGIC_VAL};
 226        __u32 ch_key = 11, real_num = 3;
 227        __u32 duration, retval, size;
 228        int err, i, prog_fd, map_fd;
 229        __u64 bytes = 0, pkts = 0;
 230        struct bpf_object *obj;
 231        char buf[128];
 232        u32 *magic = (u32 *)buf;
 233
 234        err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
 235        if (err) {
 236                error_cnt++;
 237                return;
 238        }
 239
 240        map_fd = bpf_find_map(__func__, obj, "vip_map");
 241        if (map_fd < 0)
 242                goto out;
 243        bpf_map_update_elem(map_fd, &key, &value, 0);
 244
 245        map_fd = bpf_find_map(__func__, obj, "ch_rings");
 246        if (map_fd < 0)
 247                goto out;
 248        bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
 249
 250        map_fd = bpf_find_map(__func__, obj, "reals");
 251        if (map_fd < 0)
 252                goto out;
 253        bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
 254
 255        err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
 256                                buf, &size, &retval, &duration);
 257        CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
 258              *magic != MAGIC_VAL, "ipv4",
 259              "err %d errno %d retval %d size %d magic %x\n",
 260              err, errno, retval, size, *magic);
 261
 262        err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
 263                                buf, &size, &retval, &duration);
 264        CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
 265              *magic != MAGIC_VAL, "ipv6",
 266              "err %d errno %d retval %d size %d magic %x\n",
 267              err, errno, retval, size, *magic);
 268
 269        map_fd = bpf_find_map(__func__, obj, "stats");
 270        if (map_fd < 0)
 271                goto out;
 272        bpf_map_lookup_elem(map_fd, &stats_key, stats);
 273        for (i = 0; i < nr_cpus; i++) {
 274                bytes += stats[i].bytes;
 275                pkts += stats[i].pkts;
 276        }
 277        if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
 278                error_cnt++;
 279                printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
 280        }
 281out:
 282        bpf_object__close(obj);
 283}
 284
 285static void test_l4lb_all(void)
 286{
 287        const char *file1 = "./test_l4lb.o";
 288        const char *file2 = "./test_l4lb_noinline.o";
 289
 290        test_l4lb(file1);
 291        test_l4lb(file2);
 292}
 293
 294static void test_xdp_noinline(void)
 295{
 296        const char *file = "./test_xdp_noinline.o";
 297        unsigned int nr_cpus = bpf_num_possible_cpus();
 298        struct vip key = {.protocol = 6};
 299        struct vip_meta {
 300                __u32 flags;
 301                __u32 vip_num;
 302        } value = {.vip_num = VIP_NUM};
 303        __u32 stats_key = VIP_NUM;
 304        struct vip_stats {
 305                __u64 bytes;
 306                __u64 pkts;
 307        } stats[nr_cpus];
 308        struct real_definition {
 309                union {
 310                        __be32 dst;
 311                        __be32 dstv6[4];
 312                };
 313                __u8 flags;
 314        } real_def = {.dst = MAGIC_VAL};
 315        __u32 ch_key = 11, real_num = 3;
 316        __u32 duration, retval, size;
 317        int err, i, prog_fd, map_fd;
 318        __u64 bytes = 0, pkts = 0;
 319        struct bpf_object *obj;
 320        char buf[128];
 321        u32 *magic = (u32 *)buf;
 322
 323        err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
 324        if (err) {
 325                error_cnt++;
 326                return;
 327        }
 328
 329        map_fd = bpf_find_map(__func__, obj, "vip_map");
 330        if (map_fd < 0)
 331                goto out;
 332        bpf_map_update_elem(map_fd, &key, &value, 0);
 333
 334        map_fd = bpf_find_map(__func__, obj, "ch_rings");
 335        if (map_fd < 0)
 336                goto out;
 337        bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
 338
 339        map_fd = bpf_find_map(__func__, obj, "reals");
 340        if (map_fd < 0)
 341                goto out;
 342        bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
 343
 344        err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
 345                                buf, &size, &retval, &duration);
 346        CHECK(err || errno || retval != 1 || size != 54 ||
 347              *magic != MAGIC_VAL, "ipv4",
 348              "err %d errno %d retval %d size %d magic %x\n",
 349              err, errno, retval, size, *magic);
 350
 351        err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
 352                                buf, &size, &retval, &duration);
 353        CHECK(err || errno || retval != 1 || size != 74 ||
 354              *magic != MAGIC_VAL, "ipv6",
 355              "err %d errno %d retval %d size %d magic %x\n",
 356              err, errno, retval, size, *magic);
 357
 358        map_fd = bpf_find_map(__func__, obj, "stats");
 359        if (map_fd < 0)
 360                goto out;
 361        bpf_map_lookup_elem(map_fd, &stats_key, stats);
 362        for (i = 0; i < nr_cpus; i++) {
 363                bytes += stats[i].bytes;
 364                pkts += stats[i].pkts;
 365        }
 366        if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
 367                error_cnt++;
 368                printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
 369        }
 370out:
 371        bpf_object__close(obj);
 372}
 373
 374static void test_tcp_estats(void)
 375{
 376        const char *file = "./test_tcp_estats.o";
 377        int err, prog_fd;
 378        struct bpf_object *obj;
 379        __u32 duration = 0;
 380
 381        err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
 382        CHECK(err, "", "err %d errno %d\n", err, errno);
 383        if (err) {
 384                error_cnt++;
 385                return;
 386        }
 387
 388        bpf_object__close(obj);
 389}
 390
 391static inline __u64 ptr_to_u64(const void *ptr)
 392{
 393        return (__u64) (unsigned long) ptr;
 394}
 395
 396static bool is_jit_enabled(void)
 397{
 398        const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
 399        bool enabled = false;
 400        int sysctl_fd;
 401
 402        sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
 403        if (sysctl_fd != -1) {
 404                char tmpc;
 405
 406                if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
 407                        enabled = (tmpc != '0');
 408                close(sysctl_fd);
 409        }
 410
 411        return enabled;
 412}
 413
 414static void test_bpf_obj_id(void)
 415{
 416        const __u64 array_magic_value = 0xfaceb00c;
 417        const __u32 array_key = 0;
 418        const int nr_iters = 2;
 419        const char *file = "./test_obj_id.o";
 420        const char *expected_prog_name = "test_obj_id";
 421        const char *expected_map_name = "test_map_id";
 422        const __u64 nsec_per_sec = 1000000000;
 423
 424        struct bpf_object *objs[nr_iters];
 425        int prog_fds[nr_iters], map_fds[nr_iters];
 426        /* +1 to test for the info_len returned by kernel */
 427        struct bpf_prog_info prog_infos[nr_iters + 1];
 428        struct bpf_map_info map_infos[nr_iters + 1];
 429        /* Each prog only uses one map. +1 to test nr_map_ids
 430         * returned by kernel.
 431         */
 432        __u32 map_ids[nr_iters + 1];
 433        char jited_insns[128], xlated_insns[128], zeros[128];
 434        __u32 i, next_id, info_len, nr_id_found, duration = 0;
 435        struct timespec real_time_ts, boot_time_ts;
 436        int err = 0;
 437        __u64 array_value;
 438        uid_t my_uid = getuid();
 439        time_t now, load_time;
 440
 441        err = bpf_prog_get_fd_by_id(0);
 442        CHECK(err >= 0 || errno != ENOENT,
 443              "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
 444
 445        err = bpf_map_get_fd_by_id(0);
 446        CHECK(err >= 0 || errno != ENOENT,
 447              "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
 448
 449        for (i = 0; i < nr_iters; i++)
 450                objs[i] = NULL;
 451
 452        /* Check bpf_obj_get_info_by_fd() */
 453        bzero(zeros, sizeof(zeros));
 454        for (i = 0; i < nr_iters; i++) {
 455                now = time(NULL);
 456                err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
 457                                    &objs[i], &prog_fds[i]);
 458                /* test_obj_id.o is a dumb prog. It should never fail
 459                 * to load.
 460                 */
 461                if (err)
 462                        error_cnt++;
 463                assert(!err);
 464
 465                /* Insert a magic value to the map */
 466                map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
 467                assert(map_fds[i] >= 0);
 468                err = bpf_map_update_elem(map_fds[i], &array_key,
 469                                          &array_magic_value, 0);
 470                assert(!err);
 471
 472                /* Check getting map info */
 473                info_len = sizeof(struct bpf_map_info) * 2;
 474                bzero(&map_infos[i], info_len);
 475                err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
 476                                             &info_len);
 477                if (CHECK(err ||
 478                          map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
 479                          map_infos[i].key_size != sizeof(__u32) ||
 480                          map_infos[i].value_size != sizeof(__u64) ||
 481                          map_infos[i].max_entries != 1 ||
 482                          map_infos[i].map_flags != 0 ||
 483                          info_len != sizeof(struct bpf_map_info) ||
 484                          strcmp((char *)map_infos[i].name, expected_map_name),
 485                          "get-map-info(fd)",
 486                          "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
 487                          err, errno,
 488                          map_infos[i].type, BPF_MAP_TYPE_ARRAY,
 489                          info_len, sizeof(struct bpf_map_info),
 490                          map_infos[i].key_size,
 491                          map_infos[i].value_size,
 492                          map_infos[i].max_entries,
 493                          map_infos[i].map_flags,
 494                          map_infos[i].name, expected_map_name))
 495                        goto done;
 496
 497                /* Check getting prog info */
 498                info_len = sizeof(struct bpf_prog_info) * 2;
 499                bzero(&prog_infos[i], info_len);
 500                bzero(jited_insns, sizeof(jited_insns));
 501                bzero(xlated_insns, sizeof(xlated_insns));
 502                prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
 503                prog_infos[i].jited_prog_len = sizeof(jited_insns);
 504                prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
 505                prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
 506                prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
 507                prog_infos[i].nr_map_ids = 2;
 508                err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
 509                assert(!err);
 510                err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
 511                assert(!err);
 512                err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
 513                                             &info_len);
 514                load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
 515                        + (prog_infos[i].load_time / nsec_per_sec);
 516                if (CHECK(err ||
 517                          prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
 518                          info_len != sizeof(struct bpf_prog_info) ||
 519                          (jit_enabled && !prog_infos[i].jited_prog_len) ||
 520                          (jit_enabled &&
 521                           !memcmp(jited_insns, zeros, sizeof(zeros))) ||
 522                          !prog_infos[i].xlated_prog_len ||
 523                          !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
 524                          load_time < now - 60 || load_time > now + 60 ||
 525                          prog_infos[i].created_by_uid != my_uid ||
 526                          prog_infos[i].nr_map_ids != 1 ||
 527                          *(int *)prog_infos[i].map_ids != map_infos[i].id ||
 528                          strcmp((char *)prog_infos[i].name, expected_prog_name),
 529                          "get-prog-info(fd)",
 530                          "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
 531                          err, errno, i,
 532                          prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
 533                          info_len, sizeof(struct bpf_prog_info),
 534                          jit_enabled,
 535                          prog_infos[i].jited_prog_len,
 536                          prog_infos[i].xlated_prog_len,
 537                          !!memcmp(jited_insns, zeros, sizeof(zeros)),
 538                          !!memcmp(xlated_insns, zeros, sizeof(zeros)),
 539                          load_time, now,
 540                          prog_infos[i].created_by_uid, my_uid,
 541                          prog_infos[i].nr_map_ids, 1,
 542                          *(int *)prog_infos[i].map_ids, map_infos[i].id,
 543                          prog_infos[i].name, expected_prog_name))
 544                        goto done;
 545        }
 546
 547        /* Check bpf_prog_get_next_id() */
 548        nr_id_found = 0;
 549        next_id = 0;
 550        while (!bpf_prog_get_next_id(next_id, &next_id)) {
 551                struct bpf_prog_info prog_info = {};
 552                __u32 saved_map_id;
 553                int prog_fd;
 554
 555                info_len = sizeof(prog_info);
 556
 557                prog_fd = bpf_prog_get_fd_by_id(next_id);
 558                if (prog_fd < 0 && errno == ENOENT)
 559                        /* The bpf_prog is in the dead row */
 560                        continue;
 561                if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
 562                          "prog_fd %d next_id %d errno %d\n",
 563                          prog_fd, next_id, errno))
 564                        break;
 565
 566                for (i = 0; i < nr_iters; i++)
 567                        if (prog_infos[i].id == next_id)
 568                                break;
 569
 570                if (i == nr_iters)
 571                        continue;
 572
 573                nr_id_found++;
 574
 575                /* Negative test:
 576                 * prog_info.nr_map_ids = 1
 577                 * prog_info.map_ids = NULL
 578                 */
 579                prog_info.nr_map_ids = 1;
 580                err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
 581                if (CHECK(!err || errno != EFAULT,
 582                          "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
 583                          err, errno, EFAULT))
 584                        break;
 585                bzero(&prog_info, sizeof(prog_info));
 586                info_len = sizeof(prog_info);
 587
 588                saved_map_id = *(int *)(prog_infos[i].map_ids);
 589                prog_info.map_ids = prog_infos[i].map_ids;
 590                prog_info.nr_map_ids = 2;
 591                err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
 592                prog_infos[i].jited_prog_insns = 0;
 593                prog_infos[i].xlated_prog_insns = 0;
 594                CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
 595                      memcmp(&prog_info, &prog_infos[i], info_len) ||
 596                      *(int *)prog_info.map_ids != saved_map_id,
 597                      "get-prog-info(next_id->fd)",
 598                      "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
 599                      err, errno, info_len, sizeof(struct bpf_prog_info),
 600                      memcmp(&prog_info, &prog_infos[i], info_len),
 601                      *(int *)prog_info.map_ids, saved_map_id);
 602                close(prog_fd);
 603        }
 604        CHECK(nr_id_found != nr_iters,
 605              "check total prog id found by get_next_id",
 606              "nr_id_found %u(%u)\n",
 607              nr_id_found, nr_iters);
 608
 609        /* Check bpf_map_get_next_id() */
 610        nr_id_found = 0;
 611        next_id = 0;
 612        while (!bpf_map_get_next_id(next_id, &next_id)) {
 613                struct bpf_map_info map_info = {};
 614                int map_fd;
 615
 616                info_len = sizeof(map_info);
 617
 618                map_fd = bpf_map_get_fd_by_id(next_id);
 619                if (map_fd < 0 && errno == ENOENT)
 620                        /* The bpf_map is in the dead row */
 621                        continue;
 622                if (CHECK(map_fd < 0, "get-map-fd(next_id)",
 623                          "map_fd %d next_id %u errno %d\n",
 624                          map_fd, next_id, errno))
 625                        break;
 626
 627                for (i = 0; i < nr_iters; i++)
 628                        if (map_infos[i].id == next_id)
 629                                break;
 630
 631                if (i == nr_iters)
 632                        continue;
 633
 634                nr_id_found++;
 635
 636                err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
 637                assert(!err);
 638
 639                err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
 640                CHECK(err || info_len != sizeof(struct bpf_map_info) ||
 641                      memcmp(&map_info, &map_infos[i], info_len) ||
 642                      array_value != array_magic_value,
 643                      "check get-map-info(next_id->fd)",
 644                      "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
 645                      err, errno, info_len, sizeof(struct bpf_map_info),
 646                      memcmp(&map_info, &map_infos[i], info_len),
 647                      array_value, array_magic_value);
 648
 649                close(map_fd);
 650        }
 651        CHECK(nr_id_found != nr_iters,
 652              "check total map id found by get_next_id",
 653              "nr_id_found %u(%u)\n",
 654              nr_id_found, nr_iters);
 655
 656done:
 657        for (i = 0; i < nr_iters; i++)
 658                bpf_object__close(objs[i]);
 659}
 660
 661static void test_pkt_md_access(void)
 662{
 663        const char *file = "./test_pkt_md_access.o";
 664        struct bpf_object *obj;
 665        __u32 duration, retval;
 666        int err, prog_fd;
 667
 668        err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
 669        if (err) {
 670                error_cnt++;
 671                return;
 672        }
 673
 674        err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
 675                                NULL, NULL, &retval, &duration);
 676        CHECK(err || retval, "",
 677              "err %d errno %d retval %d duration %d\n",
 678              err, errno, retval, duration);
 679
 680        bpf_object__close(obj);
 681}
 682
 683static void test_obj_name(void)
 684{
 685        struct {
 686                const char *name;
 687                int success;
 688                int expected_errno;
 689        } tests[] = {
 690                { "", 1, 0 },
 691                { "_123456789ABCDE", 1, 0 },
 692                { "_123456789ABCDEF", 0, EINVAL },
 693                { "_123456789ABCD\n", 0, EINVAL },
 694        };
 695        struct bpf_insn prog[] = {
 696                BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
 697                BPF_EXIT_INSN(),
 698        };
 699        __u32 duration = 0;
 700        int i;
 701
 702        for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
 703                size_t name_len = strlen(tests[i].name) + 1;
 704                union bpf_attr attr;
 705                size_t ncopy;
 706                int fd;
 707
 708                /* test different attr.prog_name during BPF_PROG_LOAD */
 709                ncopy = name_len < sizeof(attr.prog_name) ?
 710                        name_len : sizeof(attr.prog_name);
 711                bzero(&attr, sizeof(attr));
 712                attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
 713                attr.insn_cnt = 2;
 714                attr.insns = ptr_to_u64(prog);
 715                attr.license = ptr_to_u64("");
 716                memcpy(attr.prog_name, tests[i].name, ncopy);
 717
 718                fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
 719                CHECK((tests[i].success && fd < 0) ||
 720                      (!tests[i].success && fd != -1) ||
 721                      (!tests[i].success && errno != tests[i].expected_errno),
 722                      "check-bpf-prog-name",
 723                      "fd %d(%d) errno %d(%d)\n",
 724                       fd, tests[i].success, errno, tests[i].expected_errno);
 725
 726                if (fd != -1)
 727                        close(fd);
 728
 729                /* test different attr.map_name during BPF_MAP_CREATE */
 730                ncopy = name_len < sizeof(attr.map_name) ?
 731                        name_len : sizeof(attr.map_name);
 732                bzero(&attr, sizeof(attr));
 733                attr.map_type = BPF_MAP_TYPE_ARRAY;
 734                attr.key_size = 4;
 735                attr.value_size = 4;
 736                attr.max_entries = 1;
 737                attr.map_flags = 0;
 738                memcpy(attr.map_name, tests[i].name, ncopy);
 739                fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
 740                CHECK((tests[i].success && fd < 0) ||
 741                      (!tests[i].success && fd != -1) ||
 742                      (!tests[i].success && errno != tests[i].expected_errno),
 743                      "check-bpf-map-name",
 744                      "fd %d(%d) errno %d(%d)\n",
 745                      fd, tests[i].success, errno, tests[i].expected_errno);
 746
 747                if (fd != -1)
 748                        close(fd);
 749        }
 750}
 751
 752static void test_tp_attach_query(void)
 753{
 754        const int num_progs = 3;
 755        int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
 756        __u32 duration = 0, info_len, saved_prog_ids[num_progs];
 757        const char *file = "./test_tracepoint.o";
 758        struct perf_event_query_bpf *query;
 759        struct perf_event_attr attr = {};
 760        struct bpf_object *obj[num_progs];
 761        struct bpf_prog_info prog_info;
 762        char buf[256];
 763
 764        snprintf(buf, sizeof(buf),
 765                 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
 766        efd = open(buf, O_RDONLY, 0);
 767        if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
 768                return;
 769        bytes = read(efd, buf, sizeof(buf));
 770        close(efd);
 771        if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
 772                  "read", "bytes %d errno %d\n", bytes, errno))
 773                return;
 774
 775        attr.config = strtol(buf, NULL, 0);
 776        attr.type = PERF_TYPE_TRACEPOINT;
 777        attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
 778        attr.sample_period = 1;
 779        attr.wakeup_events = 1;
 780
 781        query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
 782        for (i = 0; i < num_progs; i++) {
 783                err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
 784                                    &prog_fd[i]);
 785                if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
 786                        goto cleanup1;
 787
 788                bzero(&prog_info, sizeof(prog_info));
 789                prog_info.jited_prog_len = 0;
 790                prog_info.xlated_prog_len = 0;
 791                prog_info.nr_map_ids = 0;
 792                info_len = sizeof(prog_info);
 793                err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
 794                if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
 795                          err, errno))
 796                        goto cleanup1;
 797                saved_prog_ids[i] = prog_info.id;
 798
 799                pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
 800                                    0 /* cpu 0 */, -1 /* group id */,
 801                                    0 /* flags */);
 802                if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
 803                          pmu_fd[i], errno))
 804                        goto cleanup2;
 805                err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
 806                if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
 807                          err, errno))
 808                        goto cleanup3;
 809
 810                if (i == 0) {
 811                        /* check NULL prog array query */
 812                        query->ids_len = num_progs;
 813                        err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 814                        if (CHECK(err || query->prog_cnt != 0,
 815                                  "perf_event_ioc_query_bpf",
 816                                  "err %d errno %d query->prog_cnt %u\n",
 817                                  err, errno, query->prog_cnt))
 818                                goto cleanup3;
 819                }
 820
 821                err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
 822                if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
 823                          err, errno))
 824                        goto cleanup3;
 825
 826                if (i == 1) {
 827                        /* try to get # of programs only */
 828                        query->ids_len = 0;
 829                        err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 830                        if (CHECK(err || query->prog_cnt != 2,
 831                                  "perf_event_ioc_query_bpf",
 832                                  "err %d errno %d query->prog_cnt %u\n",
 833                                  err, errno, query->prog_cnt))
 834                                goto cleanup3;
 835
 836                        /* try a few negative tests */
 837                        /* invalid query pointer */
 838                        err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
 839                                    (struct perf_event_query_bpf *)0x1);
 840                        if (CHECK(!err || errno != EFAULT,
 841                                  "perf_event_ioc_query_bpf",
 842                                  "err %d errno %d\n", err, errno))
 843                                goto cleanup3;
 844
 845                        /* no enough space */
 846                        query->ids_len = 1;
 847                        err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 848                        if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
 849                                  "perf_event_ioc_query_bpf",
 850                                  "err %d errno %d query->prog_cnt %u\n",
 851                                  err, errno, query->prog_cnt))
 852                                goto cleanup3;
 853                }
 854
 855                query->ids_len = num_progs;
 856                err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
 857                if (CHECK(err || query->prog_cnt != (i + 1),
 858                          "perf_event_ioc_query_bpf",
 859                          "err %d errno %d query->prog_cnt %u\n",
 860                          err, errno, query->prog_cnt))
 861                        goto cleanup3;
 862                for (j = 0; j < i + 1; j++)
 863                        if (CHECK(saved_prog_ids[j] != query->ids[j],
 864                                  "perf_event_ioc_query_bpf",
 865                                  "#%d saved_prog_id %x query prog_id %x\n",
 866                                  j, saved_prog_ids[j], query->ids[j]))
 867                                goto cleanup3;
 868        }
 869
 870        i = num_progs - 1;
 871        for (; i >= 0; i--) {
 872 cleanup3:
 873                ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
 874 cleanup2:
 875                close(pmu_fd[i]);
 876 cleanup1:
 877                bpf_object__close(obj[i]);
 878        }
 879        free(query);
 880}
 881
 882static int compare_map_keys(int map1_fd, int map2_fd)
 883{
 884        __u32 key, next_key;
 885        char val_buf[PERF_MAX_STACK_DEPTH *
 886                     sizeof(struct bpf_stack_build_id)];
 887        int err;
 888
 889        err = bpf_map_get_next_key(map1_fd, NULL, &key);
 890        if (err)
 891                return err;
 892        err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
 893        if (err)
 894                return err;
 895
 896        while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
 897                err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
 898                if (err)
 899                        return err;
 900
 901                key = next_key;
 902        }
 903        if (errno != ENOENT)
 904                return -1;
 905
 906        return 0;
 907}
 908
 909static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
 910{
 911        __u32 key, next_key, *cur_key_p, *next_key_p;
 912        char *val_buf1, *val_buf2;
 913        int i, err = 0;
 914
 915        val_buf1 = malloc(stack_trace_len);
 916        val_buf2 = malloc(stack_trace_len);
 917        cur_key_p = NULL;
 918        next_key_p = &key;
 919        while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
 920                err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
 921                if (err)
 922                        goto out;
 923                err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
 924                if (err)
 925                        goto out;
 926                for (i = 0; i < stack_trace_len; i++) {
 927                        if (val_buf1[i] != val_buf2[i]) {
 928                                err = -1;
 929                                goto out;
 930                        }
 931                }
 932                key = *next_key_p;
 933                cur_key_p = &key;
 934                next_key_p = &next_key;
 935        }
 936        if (errno != ENOENT)
 937                err = -1;
 938
 939out:
 940        free(val_buf1);
 941        free(val_buf2);
 942        return err;
 943}
 944
 945static void test_stacktrace_map()
 946{
 947        int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
 948        const char *file = "./test_stacktrace_map.o";
 949        int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
 950        struct perf_event_attr attr = {};
 951        __u32 key, val, duration = 0;
 952        struct bpf_object *obj;
 953        char buf[256];
 954
 955        err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
 956        if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
 957                return;
 958
 959        /* Get the ID for the sched/sched_switch tracepoint */
 960        snprintf(buf, sizeof(buf),
 961                 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
 962        efd = open(buf, O_RDONLY, 0);
 963        if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
 964                goto close_prog;
 965
 966        bytes = read(efd, buf, sizeof(buf));
 967        close(efd);
 968        if (bytes <= 0 || bytes >= sizeof(buf))
 969                goto close_prog;
 970
 971        /* Open the perf event and attach bpf progrram */
 972        attr.config = strtol(buf, NULL, 0);
 973        attr.type = PERF_TYPE_TRACEPOINT;
 974        attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
 975        attr.sample_period = 1;
 976        attr.wakeup_events = 1;
 977        pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
 978                         0 /* cpu 0 */, -1 /* group id */,
 979                         0 /* flags */);
 980        if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
 981                  pmu_fd, errno))
 982                goto close_prog;
 983
 984        err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
 985        if (err)
 986                goto disable_pmu;
 987
 988        err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
 989        if (err)
 990                goto disable_pmu;
 991
 992        /* find map fds */
 993        control_map_fd = bpf_find_map(__func__, obj, "control_map");
 994        if (control_map_fd < 0)
 995                goto disable_pmu;
 996
 997        stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
 998        if (stackid_hmap_fd < 0)
 999                goto disable_pmu;
1000
1001        stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1002        if (stackmap_fd < 0)
1003                goto disable_pmu;
1004
1005        stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1006        if (stack_amap_fd < 0)
1007                goto disable_pmu;
1008
1009        /* give some time for bpf program run */
1010        sleep(1);
1011
1012        /* disable stack trace collection */
1013        key = 0;
1014        val = 1;
1015        bpf_map_update_elem(control_map_fd, &key, &val, 0);
1016
1017        /* for every element in stackid_hmap, we can find a corresponding one
1018         * in stackmap, and vise versa.
1019         */
1020        err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1021        if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1022                  "err %d errno %d\n", err, errno))
1023                goto disable_pmu_noerr;
1024
1025        err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1026        if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1027                  "err %d errno %d\n", err, errno))
1028                goto disable_pmu_noerr;
1029
1030        stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
1031        err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1032        if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1033                  "err %d errno %d\n", err, errno))
1034                goto disable_pmu_noerr;
1035
1036        goto disable_pmu_noerr;
1037disable_pmu:
1038        error_cnt++;
1039disable_pmu_noerr:
1040        ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1041        close(pmu_fd);
1042close_prog:
1043        bpf_object__close(obj);
1044}
1045
1046static void test_stacktrace_map_raw_tp()
1047{
1048        int control_map_fd, stackid_hmap_fd, stackmap_fd;
1049        const char *file = "./test_stacktrace_map.o";
1050        int efd, err, prog_fd;
1051        __u32 key, val, duration = 0;
1052        struct bpf_object *obj;
1053
1054        err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1055        if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1056                return;
1057
1058        efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
1059        if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1060                goto close_prog;
1061
1062        /* find map fds */
1063        control_map_fd = bpf_find_map(__func__, obj, "control_map");
1064        if (control_map_fd < 0)
1065                goto close_prog;
1066
1067        stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1068        if (stackid_hmap_fd < 0)
1069                goto close_prog;
1070
1071        stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1072        if (stackmap_fd < 0)
1073                goto close_prog;
1074
1075        /* give some time for bpf program run */
1076        sleep(1);
1077
1078        /* disable stack trace collection */
1079        key = 0;
1080        val = 1;
1081        bpf_map_update_elem(control_map_fd, &key, &val, 0);
1082
1083        /* for every element in stackid_hmap, we can find a corresponding one
1084         * in stackmap, and vise versa.
1085         */
1086        err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1087        if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1088                  "err %d errno %d\n", err, errno))
1089                goto close_prog;
1090
1091        err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1092        if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1093                  "err %d errno %d\n", err, errno))
1094                goto close_prog;
1095
1096        goto close_prog_noerr;
1097close_prog:
1098        error_cnt++;
1099close_prog_noerr:
1100        bpf_object__close(obj);
1101}
1102
1103static int extract_build_id(char *build_id, size_t size)
1104{
1105        FILE *fp;
1106        char *line = NULL;
1107        size_t len = 0;
1108
1109        fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1110        if (fp == NULL)
1111                return -1;
1112
1113        if (getline(&line, &len, fp) == -1)
1114                goto err;
1115        fclose(fp);
1116
1117        if (len > size)
1118                len = size;
1119        memcpy(build_id, line, len);
1120        build_id[len] = '\0';
1121        return 0;
1122err:
1123        fclose(fp);
1124        return -1;
1125}
1126
1127static void test_stacktrace_build_id(void)
1128{
1129        int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1130        const char *file = "./test_stacktrace_build_id.o";
1131        int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
1132        struct perf_event_attr attr = {};
1133        __u32 key, previous_key, val, duration = 0;
1134        struct bpf_object *obj;
1135        char buf[256];
1136        int i, j;
1137        struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1138        int build_id_matches = 0;
1139
1140        err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1141        if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1142                goto out;
1143
1144        /* Get the ID for the sched/sched_switch tracepoint */
1145        snprintf(buf, sizeof(buf),
1146                 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1147        efd = open(buf, O_RDONLY, 0);
1148        if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1149                goto close_prog;
1150
1151        bytes = read(efd, buf, sizeof(buf));
1152        close(efd);
1153        if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1154                  "read", "bytes %d errno %d\n", bytes, errno))
1155                goto close_prog;
1156
1157        /* Open the perf event and attach bpf progrram */
1158        attr.config = strtol(buf, NULL, 0);
1159        attr.type = PERF_TYPE_TRACEPOINT;
1160        attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1161        attr.sample_period = 1;
1162        attr.wakeup_events = 1;
1163        pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1164                         0 /* cpu 0 */, -1 /* group id */,
1165                         0 /* flags */);
1166        if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1167                  pmu_fd, errno))
1168                goto close_prog;
1169
1170        err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1171        if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1172                  err, errno))
1173                goto close_pmu;
1174
1175        err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1176        if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1177                  err, errno))
1178                goto disable_pmu;
1179
1180        /* find map fds */
1181        control_map_fd = bpf_find_map(__func__, obj, "control_map");
1182        if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1183                  "err %d errno %d\n", err, errno))
1184                goto disable_pmu;
1185
1186        stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1187        if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1188                  "err %d errno %d\n", err, errno))
1189                goto disable_pmu;
1190
1191        stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1192        if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1193                  err, errno))
1194                goto disable_pmu;
1195
1196        stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1197        if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1198                  "err %d errno %d\n", err, errno))
1199                goto disable_pmu;
1200
1201        assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1202               == 0);
1203        assert(system("./urandom_read") == 0);
1204        /* disable stack trace collection */
1205        key = 0;
1206        val = 1;
1207        bpf_map_update_elem(control_map_fd, &key, &val, 0);
1208
1209        /* for every element in stackid_hmap, we can find a corresponding one
1210         * in stackmap, and vise versa.
1211         */
1212        err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1213        if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1214                  "err %d errno %d\n", err, errno))
1215                goto disable_pmu;
1216
1217        err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1218        if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1219                  "err %d errno %d\n", err, errno))
1220                goto disable_pmu;
1221
1222        err = extract_build_id(buf, 256);
1223
1224        if (CHECK(err, "get build_id with readelf",
1225                  "err %d errno %d\n", err, errno))
1226                goto disable_pmu;
1227
1228        err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1229        if (CHECK(err, "get_next_key from stackmap",
1230                  "err %d, errno %d\n", err, errno))
1231                goto disable_pmu;
1232
1233        do {
1234                char build_id[64];
1235
1236                err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1237                if (CHECK(err, "lookup_elem from stackmap",
1238                          "err %d, errno %d\n", err, errno))
1239                        goto disable_pmu;
1240                for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1241                        if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1242                            id_offs[i].offset != 0) {
1243                                for (j = 0; j < 20; ++j)
1244                                        sprintf(build_id + 2 * j, "%02x",
1245                                                id_offs[i].build_id[j] & 0xff);
1246                                if (strstr(buf, build_id) != NULL)
1247                                        build_id_matches = 1;
1248                        }
1249                previous_key = key;
1250        } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1251
1252        if (CHECK(build_id_matches < 1, "build id match",
1253                  "Didn't find expected build ID from the map\n"))
1254                goto disable_pmu;
1255
1256        stack_trace_len = PERF_MAX_STACK_DEPTH
1257                * sizeof(struct bpf_stack_build_id);
1258        err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
1259        CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
1260              "err %d errno %d\n", err, errno);
1261
1262disable_pmu:
1263        ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1264
1265close_pmu:
1266        close(pmu_fd);
1267
1268close_prog:
1269        bpf_object__close(obj);
1270
1271out:
1272        return;
1273}
1274
1275static void test_stacktrace_build_id_nmi(void)
1276{
1277        int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
1278        const char *file = "./test_stacktrace_build_id.o";
1279        int err, pmu_fd, prog_fd;
1280        struct perf_event_attr attr = {
1281                .sample_freq = 5000,
1282                .freq = 1,
1283                .type = PERF_TYPE_HARDWARE,
1284                .config = PERF_COUNT_HW_CPU_CYCLES,
1285        };
1286        __u32 key, previous_key, val, duration = 0;
1287        struct bpf_object *obj;
1288        char buf[256];
1289        int i, j;
1290        struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1291        int build_id_matches = 0;
1292
1293        err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
1294        if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1295                return;
1296
1297        pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1298                         0 /* cpu 0 */, -1 /* group id */,
1299                         0 /* flags */);
1300        if (CHECK(pmu_fd < 0, "perf_event_open",
1301                  "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
1302                  pmu_fd, errno))
1303                goto close_prog;
1304
1305        err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1306        if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1307                  err, errno))
1308                goto close_pmu;
1309
1310        err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1311        if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1312                  err, errno))
1313                goto disable_pmu;
1314
1315        /* find map fds */
1316        control_map_fd = bpf_find_map(__func__, obj, "control_map");
1317        if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1318                  "err %d errno %d\n", err, errno))
1319                goto disable_pmu;
1320
1321        stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1322        if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1323                  "err %d errno %d\n", err, errno))
1324                goto disable_pmu;
1325
1326        stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1327        if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1328                  err, errno))
1329                goto disable_pmu;
1330
1331        stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
1332        if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
1333                  "err %d errno %d\n", err, errno))
1334                goto disable_pmu;
1335
1336        assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1337               == 0);
1338        assert(system("taskset 0x1 ./urandom_read 100000") == 0);
1339        /* disable stack trace collection */
1340        key = 0;
1341        val = 1;
1342        bpf_map_update_elem(control_map_fd, &key, &val, 0);
1343
1344        /* for every element in stackid_hmap, we can find a corresponding one
1345         * in stackmap, and vise versa.
1346         */
1347        err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1348        if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1349                  "err %d errno %d\n", err, errno))
1350                goto disable_pmu;
1351
1352        err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1353        if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1354                  "err %d errno %d\n", err, errno))
1355                goto disable_pmu;
1356
1357        err = extract_build_id(buf, 256);
1358
1359        if (CHECK(err, "get build_id with readelf",
1360                  "err %d errno %d\n", err, errno))
1361                goto disable_pmu;
1362
1363        err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1364        if (CHECK(err, "get_next_key from stackmap",
1365                  "err %d, errno %d\n", err, errno))
1366                goto disable_pmu;
1367
1368        do {
1369                char build_id[64];
1370
1371                err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1372                if (CHECK(err, "lookup_elem from stackmap",
1373                          "err %d, errno %d\n", err, errno))
1374                        goto disable_pmu;
1375                for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1376                        if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1377                            id_offs[i].offset != 0) {
1378                                for (j = 0; j < 20; ++j)
1379                                        sprintf(build_id + 2 * j, "%02x",
1380                                                id_offs[i].build_id[j] & 0xff);
1381                                if (strstr(buf, build_id) != NULL)
1382                                        build_id_matches = 1;
1383                        }
1384                previous_key = key;
1385        } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1386
1387        if (CHECK(build_id_matches < 1, "build id match",
1388                  "Didn't find expected build ID from the map\n"))
1389                goto disable_pmu;
1390
1391        /*
1392         * We intentionally skip compare_stack_ips(). This is because we
1393         * only support one in_nmi() ips-to-build_id translation per cpu
1394         * at any time, thus stack_amap here will always fallback to
1395         * BPF_STACK_BUILD_ID_IP;
1396         */
1397
1398disable_pmu:
1399        ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1400
1401close_pmu:
1402        close(pmu_fd);
1403
1404close_prog:
1405        bpf_object__close(obj);
1406}
1407
1408#define MAX_CNT_RAWTP   10ull
1409#define MAX_STACK_RAWTP 100
1410struct get_stack_trace_t {
1411        int pid;
1412        int kern_stack_size;
1413        int user_stack_size;
1414        int user_stack_buildid_size;
1415        __u64 kern_stack[MAX_STACK_RAWTP];
1416        __u64 user_stack[MAX_STACK_RAWTP];
1417        struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
1418};
1419
1420static int get_stack_print_output(void *data, int size)
1421{
1422        bool good_kern_stack = false, good_user_stack = false;
1423        const char *nonjit_func = "___bpf_prog_run";
1424        struct get_stack_trace_t *e = data;
1425        int i, num_stack;
1426        static __u64 cnt;
1427        struct ksym *ks;
1428
1429        cnt++;
1430
1431        if (size < sizeof(struct get_stack_trace_t)) {
1432                __u64 *raw_data = data;
1433                bool found = false;
1434
1435                num_stack = size / sizeof(__u64);
1436                /* If jit is enabled, we do not have a good way to
1437                 * verify the sanity of the kernel stack. So we
1438                 * just assume it is good if the stack is not empty.
1439                 * This could be improved in the future.
1440                 */
1441                if (jit_enabled) {
1442                        found = num_stack > 0;
1443                } else {
1444                        for (i = 0; i < num_stack; i++) {
1445                                ks = ksym_search(raw_data[i]);
1446                                if (strcmp(ks->name, nonjit_func) == 0) {
1447                                        found = true;
1448                                        break;
1449                                }
1450                        }
1451                }
1452                if (found) {
1453                        good_kern_stack = true;
1454                        good_user_stack = true;
1455                }
1456        } else {
1457                num_stack = e->kern_stack_size / sizeof(__u64);
1458                if (jit_enabled) {
1459                        good_kern_stack = num_stack > 0;
1460                } else {
1461                        for (i = 0; i < num_stack; i++) {
1462                                ks = ksym_search(e->kern_stack[i]);
1463                                if (strcmp(ks->name, nonjit_func) == 0) {
1464                                        good_kern_stack = true;
1465                                        break;
1466                                }
1467                        }
1468                }
1469                if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
1470                        good_user_stack = true;
1471        }
1472        if (!good_kern_stack || !good_user_stack)
1473                return LIBBPF_PERF_EVENT_ERROR;
1474
1475        if (cnt == MAX_CNT_RAWTP)
1476                return LIBBPF_PERF_EVENT_DONE;
1477
1478        return LIBBPF_PERF_EVENT_CONT;
1479}
1480
1481static void test_get_stack_raw_tp(void)
1482{
1483        const char *file = "./test_get_stack_rawtp.o";
1484        int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
1485        struct perf_event_attr attr = {};
1486        struct timespec tv = {0, 10};
1487        __u32 key = 0, duration = 0;
1488        struct bpf_object *obj;
1489
1490        err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1491        if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1492                return;
1493
1494        efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1495        if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1496                goto close_prog;
1497
1498        perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
1499        if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
1500                  perfmap_fd, errno))
1501                goto close_prog;
1502
1503        err = load_kallsyms();
1504        if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
1505                goto close_prog;
1506
1507        attr.sample_type = PERF_SAMPLE_RAW;
1508        attr.type = PERF_TYPE_SOFTWARE;
1509        attr.config = PERF_COUNT_SW_BPF_OUTPUT;
1510        pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
1511                         -1/*group_fd*/, 0);
1512        if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
1513                  errno))
1514                goto close_prog;
1515
1516        err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
1517        if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
1518                  errno))
1519                goto close_prog;
1520
1521        err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1522        if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
1523                  err, errno))
1524                goto close_prog;
1525
1526        err = perf_event_mmap(pmu_fd);
1527        if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
1528                goto close_prog;
1529
1530        /* trigger some syscall action */
1531        for (i = 0; i < MAX_CNT_RAWTP; i++)
1532                nanosleep(&tv, NULL);
1533
1534        err = perf_event_poller(pmu_fd, get_stack_print_output);
1535        if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
1536                goto close_prog;
1537
1538        goto close_prog_noerr;
1539close_prog:
1540        error_cnt++;
1541close_prog_noerr:
1542        bpf_object__close(obj);
1543}
1544
1545static void test_task_fd_query_rawtp(void)
1546{
1547        const char *file = "./test_get_stack_rawtp.o";
1548        __u64 probe_offset, probe_addr;
1549        __u32 len, prog_id, fd_type;
1550        struct bpf_object *obj;
1551        int efd, err, prog_fd;
1552        __u32 duration = 0;
1553        char buf[256];
1554
1555        err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
1556        if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
1557                return;
1558
1559        efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
1560        if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
1561                goto close_prog;
1562
1563        /* query (getpid(), efd) */
1564        len = sizeof(buf);
1565        err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1566                                &fd_type, &probe_offset, &probe_addr);
1567        if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1568                  errno))
1569                goto close_prog;
1570
1571        err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1572              strcmp(buf, "sys_enter") == 0;
1573        if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1574                  fd_type, buf))
1575                goto close_prog;
1576
1577        /* test zero len */
1578        len = 0;
1579        err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1580                                &fd_type, &probe_offset, &probe_addr);
1581        if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
1582                  err, errno))
1583                goto close_prog;
1584        err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1585              len == strlen("sys_enter");
1586        if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1587                goto close_prog;
1588
1589        /* test empty buffer */
1590        len = sizeof(buf);
1591        err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
1592                                &fd_type, &probe_offset, &probe_addr);
1593        if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
1594                  err, errno))
1595                goto close_prog;
1596        err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1597              len == strlen("sys_enter");
1598        if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1599                goto close_prog;
1600
1601        /* test smaller buffer */
1602        len = 3;
1603        err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
1604                                &fd_type, &probe_offset, &probe_addr);
1605        if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
1606                  "err %d errno %d\n", err, errno))
1607                goto close_prog;
1608        err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
1609              len == strlen("sys_enter") &&
1610              strcmp(buf, "sy") == 0;
1611        if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
1612                goto close_prog;
1613
1614        goto close_prog_noerr;
1615close_prog:
1616        error_cnt++;
1617close_prog_noerr:
1618        bpf_object__close(obj);
1619}
1620
1621static void test_task_fd_query_tp_core(const char *probe_name,
1622                                       const char *tp_name)
1623{
1624        const char *file = "./test_tracepoint.o";
1625        int err, bytes, efd, prog_fd, pmu_fd;
1626        struct perf_event_attr attr = {};
1627        __u64 probe_offset, probe_addr;
1628        __u32 len, prog_id, fd_type;
1629        struct bpf_object *obj;
1630        __u32 duration = 0;
1631        char buf[256];
1632
1633        err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1634        if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
1635                goto close_prog;
1636
1637        snprintf(buf, sizeof(buf),
1638                 "/sys/kernel/debug/tracing/events/%s/id", probe_name);
1639        efd = open(buf, O_RDONLY, 0);
1640        if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1641                goto close_prog;
1642        bytes = read(efd, buf, sizeof(buf));
1643        close(efd);
1644        if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
1645                  "bytes %d errno %d\n", bytes, errno))
1646                goto close_prog;
1647
1648        attr.config = strtol(buf, NULL, 0);
1649        attr.type = PERF_TYPE_TRACEPOINT;
1650        attr.sample_type = PERF_SAMPLE_RAW;
1651        attr.sample_period = 1;
1652        attr.wakeup_events = 1;
1653        pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1654                         0 /* cpu 0 */, -1 /* group id */,
1655                         0 /* flags */);
1656        if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
1657                goto close_pmu;
1658
1659        err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1660        if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
1661                  errno))
1662                goto close_pmu;
1663
1664        err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1665        if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
1666                  errno))
1667                goto close_pmu;
1668
1669        /* query (getpid(), pmu_fd) */
1670        len = sizeof(buf);
1671        err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
1672                                &fd_type, &probe_offset, &probe_addr);
1673        if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
1674                  errno))
1675                goto close_pmu;
1676
1677        err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
1678        if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
1679                  fd_type, buf))
1680                goto close_pmu;
1681
1682        close(pmu_fd);
1683        goto close_prog_noerr;
1684
1685close_pmu:
1686        close(pmu_fd);
1687close_prog:
1688        error_cnt++;
1689close_prog_noerr:
1690        bpf_object__close(obj);
1691}
1692
1693static void test_task_fd_query_tp(void)
1694{
1695        test_task_fd_query_tp_core("sched/sched_switch",
1696                                   "sched_switch");
1697        test_task_fd_query_tp_core("syscalls/sys_enter_read",
1698                                   "sys_enter_read");
1699}
1700
1701int main(void)
1702{
1703        jit_enabled = is_jit_enabled();
1704
1705        test_pkt_access();
1706        test_xdp();
1707        test_xdp_adjust_tail();
1708        test_l4lb_all();
1709        test_xdp_noinline();
1710        test_tcp_estats();
1711        test_bpf_obj_id();
1712        test_pkt_md_access();
1713        test_obj_name();
1714        test_tp_attach_query();
1715        test_stacktrace_map();
1716        test_stacktrace_build_id();
1717        test_stacktrace_build_id_nmi();
1718        test_stacktrace_map_raw_tp();
1719        test_get_stack_raw_tp();
1720        test_task_fd_query_rawtp();
1721        test_task_fd_query_tp();
1722
1723        printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1724        return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1725}
1726