linux/tools/perf/builtin-test.c
<<
>>
Prefs
   1/*
   2 * builtin-test.c
   3 *
   4 * Builtin regression testing command: ever growing number of sanity tests
   5 */
   6#include "builtin.h"
   7
   8#include "util/cache.h"
   9#include "util/debug.h"
  10#include "util/debugfs.h"
  11#include "util/evlist.h"
  12#include "util/parse-options.h"
  13#include "util/parse-events.h"
  14#include "util/symbol.h"
  15#include "util/thread_map.h"
  16#include "../../include/linux/hw_breakpoint.h"
  17
  18static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
  19{
  20        bool *visited = symbol__priv(sym);
  21        *visited = true;
  22        return 0;
  23}
  24
  25static int test__vmlinux_matches_kallsyms(void)
  26{
  27        int err = -1;
  28        struct rb_node *nd;
  29        struct symbol *sym;
  30        struct map *kallsyms_map, *vmlinux_map;
  31        struct machine kallsyms, vmlinux;
  32        enum map_type type = MAP__FUNCTION;
  33        long page_size = sysconf(_SC_PAGE_SIZE);
  34        struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
  35
  36        /*
  37         * Step 1:
  38         *
  39         * Init the machines that will hold kernel, modules obtained from
  40         * both vmlinux + .ko files and from /proc/kallsyms split by modules.
  41         */
  42        machine__init(&kallsyms, "", HOST_KERNEL_ID);
  43        machine__init(&vmlinux, "", HOST_KERNEL_ID);
  44
  45        /*
  46         * Step 2:
  47         *
  48         * Create the kernel maps for kallsyms and the DSO where we will then
  49         * load /proc/kallsyms. Also create the modules maps from /proc/modules
  50         * and find the .ko files that match them in /lib/modules/`uname -r`/.
  51         */
  52        if (machine__create_kernel_maps(&kallsyms) < 0) {
  53                pr_debug("machine__create_kernel_maps ");
  54                return -1;
  55        }
  56
  57        /*
  58         * Step 3:
  59         *
  60         * Load and split /proc/kallsyms into multiple maps, one per module.
  61         */
  62        if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
  63                pr_debug("dso__load_kallsyms ");
  64                goto out;
  65        }
  66
  67        /*
  68         * Step 4:
  69         *
  70         * kallsyms will be internally on demand sorted by name so that we can
  71         * find the reference relocation * symbol, i.e. the symbol we will use
  72         * to see if the running kernel was relocated by checking if it has the
  73         * same value in the vmlinux file we load.
  74         */
  75        kallsyms_map = machine__kernel_map(&kallsyms, type);
  76
  77        sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
  78        if (sym == NULL) {
  79                pr_debug("dso__find_symbol_by_name ");
  80                goto out;
  81        }
  82
  83        ref_reloc_sym.addr = sym->start;
  84
  85        /*
  86         * Step 5:
  87         *
  88         * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
  89         */
  90        if (machine__create_kernel_maps(&vmlinux) < 0) {
  91                pr_debug("machine__create_kernel_maps ");
  92                goto out;
  93        }
  94
  95        vmlinux_map = machine__kernel_map(&vmlinux, type);
  96        map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
  97
  98        /*
  99         * Step 6:
 100         *
 101         * Locate a vmlinux file in the vmlinux path that has a buildid that
 102         * matches the one of the running kernel.
 103         *
 104         * While doing that look if we find the ref reloc symbol, if we find it
 105         * we'll have its ref_reloc_symbol.unrelocated_addr and then
 106         * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
 107         * to fixup the symbols.
 108         */
 109        if (machine__load_vmlinux_path(&vmlinux, type,
 110                                       vmlinux_matches_kallsyms_filter) <= 0) {
 111                pr_debug("machine__load_vmlinux_path ");
 112                goto out;
 113        }
 114
 115        err = 0;
 116        /*
 117         * Step 7:
 118         *
 119         * Now look at the symbols in the vmlinux DSO and check if we find all of them
 120         * in the kallsyms dso. For the ones that are in both, check its names and
 121         * end addresses too.
 122         */
 123        for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
 124                struct symbol *pair, *first_pair;
 125                bool backwards = true;
 126
 127                sym  = rb_entry(nd, struct symbol, rb_node);
 128
 129                if (sym->start == sym->end)
 130                        continue;
 131
 132                first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
 133                pair = first_pair;
 134
 135                if (pair && pair->start == sym->start) {
 136next_pair:
 137                        if (strcmp(sym->name, pair->name) == 0) {
 138                                /*
 139                                 * kallsyms don't have the symbol end, so we
 140                                 * set that by using the next symbol start - 1,
 141                                 * in some cases we get this up to a page
 142                                 * wrong, trace_kmalloc when I was developing
 143                                 * this code was one such example, 2106 bytes
 144                                 * off the real size. More than that and we
 145                                 * _really_ have a problem.
 146                                 */
 147                                s64 skew = sym->end - pair->end;
 148                                if (llabs(skew) < page_size)
 149                                        continue;
 150
 151                                pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
 152                                         sym->start, sym->name, sym->end, pair->end);
 153                        } else {
 154                                struct rb_node *nnd;
 155detour:
 156                                nnd = backwards ? rb_prev(&pair->rb_node) :
 157                                                  rb_next(&pair->rb_node);
 158                                if (nnd) {
 159                                        struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
 160
 161                                        if (next->start == sym->start) {
 162                                                pair = next;
 163                                                goto next_pair;
 164                                        }
 165                                }
 166
 167                                if (backwards) {
 168                                        backwards = false;
 169                                        pair = first_pair;
 170                                        goto detour;
 171                                }
 172
 173                                pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
 174                                         sym->start, sym->name, pair->name);
 175                        }
 176                } else
 177                        pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
 178
 179                err = -1;
 180        }
 181
 182        if (!verbose)
 183                goto out;
 184
 185        pr_info("Maps only in vmlinux:\n");
 186
 187        for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
 188                struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
 189                /*
 190                 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
 191                 * the kernel will have the path for the vmlinux file being used,
 192                 * so use the short name, less descriptive but the same ("[kernel]" in
 193                 * both cases.
 194                 */
 195                pair = map_groups__find_by_name(&kallsyms.kmaps, type,
 196                                                (pos->dso->kernel ?
 197                                                        pos->dso->short_name :
 198                                                        pos->dso->name));
 199                if (pair)
 200                        pair->priv = 1;
 201                else
 202                        map__fprintf(pos, stderr);
 203        }
 204
 205        pr_info("Maps in vmlinux with a different name in kallsyms:\n");
 206
 207        for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
 208                struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
 209
 210                pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
 211                if (pair == NULL || pair->priv)
 212                        continue;
 213
 214                if (pair->start == pos->start) {
 215                        pair->priv = 1;
 216                        pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
 217                                pos->start, pos->end, pos->pgoff, pos->dso->name);
 218                        if (pos->pgoff != pair->pgoff || pos->end != pair->end)
 219                                pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
 220                                        pair->start, pair->end, pair->pgoff);
 221                        pr_info(" %s\n", pair->dso->name);
 222                        pair->priv = 1;
 223                }
 224        }
 225
 226        pr_info("Maps only in kallsyms:\n");
 227
 228        for (nd = rb_first(&kallsyms.kmaps.maps[type]);
 229             nd; nd = rb_next(nd)) {
 230                struct map *pos = rb_entry(nd, struct map, rb_node);
 231
 232                if (!pos->priv)
 233                        map__fprintf(pos, stderr);
 234        }
 235out:
 236        return err;
 237}
 238
 239#include "util/cpumap.h"
 240#include "util/evsel.h"
 241#include <sys/types.h>
 242
 243static int trace_event__id(const char *evname)
 244{
 245        char *filename;
 246        int err = -1, fd;
 247
 248        if (asprintf(&filename,
 249                     "%s/syscalls/%s/id",
 250                     tracing_events_path, evname) < 0)
 251                return -1;
 252
 253        fd = open(filename, O_RDONLY);
 254        if (fd >= 0) {
 255                char id[16];
 256                if (read(fd, id, sizeof(id)) > 0)
 257                        err = atoi(id);
 258                close(fd);
 259        }
 260
 261        free(filename);
 262        return err;
 263}
 264
 265static int test__open_syscall_event(void)
 266{
 267        int err = -1, fd;
 268        struct thread_map *threads;
 269        struct perf_evsel *evsel;
 270        struct perf_event_attr attr;
 271        unsigned int nr_open_calls = 111, i;
 272        int id = trace_event__id("sys_enter_open");
 273
 274        if (id < 0) {
 275                pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
 276                return -1;
 277        }
 278
 279        threads = thread_map__new(-1, getpid());
 280        if (threads == NULL) {
 281                pr_debug("thread_map__new\n");
 282                return -1;
 283        }
 284
 285        memset(&attr, 0, sizeof(attr));
 286        attr.type = PERF_TYPE_TRACEPOINT;
 287        attr.config = id;
 288        evsel = perf_evsel__new(&attr, 0);
 289        if (evsel == NULL) {
 290                pr_debug("perf_evsel__new\n");
 291                goto out_thread_map_delete;
 292        }
 293
 294        if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
 295                pr_debug("failed to open counter: %s, "
 296                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 297                         strerror(errno));
 298                goto out_evsel_delete;
 299        }
 300
 301        for (i = 0; i < nr_open_calls; ++i) {
 302                fd = open("/etc/passwd", O_RDONLY);
 303                close(fd);
 304        }
 305
 306        if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
 307                pr_debug("perf_evsel__read_on_cpu\n");
 308                goto out_close_fd;
 309        }
 310
 311        if (evsel->counts->cpu[0].val != nr_open_calls) {
 312                pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
 313                         nr_open_calls, evsel->counts->cpu[0].val);
 314                goto out_close_fd;
 315        }
 316        
 317        err = 0;
 318out_close_fd:
 319        perf_evsel__close_fd(evsel, 1, threads->nr);
 320out_evsel_delete:
 321        perf_evsel__delete(evsel);
 322out_thread_map_delete:
 323        thread_map__delete(threads);
 324        return err;
 325}
 326
 327#include <sched.h>
 328
 329static int test__open_syscall_event_on_all_cpus(void)
 330{
 331        int err = -1, fd, cpu;
 332        struct thread_map *threads;
 333        struct cpu_map *cpus;
 334        struct perf_evsel *evsel;
 335        struct perf_event_attr attr;
 336        unsigned int nr_open_calls = 111, i;
 337        cpu_set_t cpu_set;
 338        int id = trace_event__id("sys_enter_open");
 339
 340        if (id < 0) {
 341                pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
 342                return -1;
 343        }
 344
 345        threads = thread_map__new(-1, getpid());
 346        if (threads == NULL) {
 347                pr_debug("thread_map__new\n");
 348                return -1;
 349        }
 350
 351        cpus = cpu_map__new(NULL);
 352        if (cpus == NULL) {
 353                pr_debug("cpu_map__new\n");
 354                goto out_thread_map_delete;
 355        }
 356
 357
 358        CPU_ZERO(&cpu_set);
 359
 360        memset(&attr, 0, sizeof(attr));
 361        attr.type = PERF_TYPE_TRACEPOINT;
 362        attr.config = id;
 363        evsel = perf_evsel__new(&attr, 0);
 364        if (evsel == NULL) {
 365                pr_debug("perf_evsel__new\n");
 366                goto out_thread_map_delete;
 367        }
 368
 369        if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
 370                pr_debug("failed to open counter: %s, "
 371                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 372                         strerror(errno));
 373                goto out_evsel_delete;
 374        }
 375
 376        for (cpu = 0; cpu < cpus->nr; ++cpu) {
 377                unsigned int ncalls = nr_open_calls + cpu;
 378                /*
 379                 * XXX eventually lift this restriction in a way that
 380                 * keeps perf building on older glibc installations
 381                 * without CPU_ALLOC. 1024 cpus in 2010 still seems
 382                 * a reasonable upper limit tho :-)
 383                 */
 384                if (cpus->map[cpu] >= CPU_SETSIZE) {
 385                        pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
 386                        continue;
 387                }
 388
 389                CPU_SET(cpus->map[cpu], &cpu_set);
 390                if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
 391                        pr_debug("sched_setaffinity() failed on CPU %d: %s ",
 392                                 cpus->map[cpu],
 393                                 strerror(errno));
 394                        goto out_close_fd;
 395                }
 396                for (i = 0; i < ncalls; ++i) {
 397                        fd = open("/etc/passwd", O_RDONLY);
 398                        close(fd);
 399                }
 400                CPU_CLR(cpus->map[cpu], &cpu_set);
 401        }
 402
 403        /*
 404         * Here we need to explicitely preallocate the counts, as if
 405         * we use the auto allocation it will allocate just for 1 cpu,
 406         * as we start by cpu 0.
 407         */
 408        if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
 409                pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
 410                goto out_close_fd;
 411        }
 412
 413        err = 0;
 414
 415        for (cpu = 0; cpu < cpus->nr; ++cpu) {
 416                unsigned int expected;
 417
 418                if (cpus->map[cpu] >= CPU_SETSIZE)
 419                        continue;
 420
 421                if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
 422                        pr_debug("perf_evsel__read_on_cpu\n");
 423                        err = -1;
 424                        break;
 425                }
 426
 427                expected = nr_open_calls + cpu;
 428                if (evsel->counts->cpu[cpu].val != expected) {
 429                        pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
 430                                 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
 431                        err = -1;
 432                }
 433        }
 434
 435out_close_fd:
 436        perf_evsel__close_fd(evsel, 1, threads->nr);
 437out_evsel_delete:
 438        perf_evsel__delete(evsel);
 439out_thread_map_delete:
 440        thread_map__delete(threads);
 441        return err;
 442}
 443
 444/*
 445 * This test will generate random numbers of calls to some getpid syscalls,
 446 * then establish an mmap for a group of events that are created to monitor
 447 * the syscalls.
 448 *
 449 * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
 450 * sample.id field to map back to its respective perf_evsel instance.
 451 *
 452 * Then it checks if the number of syscalls reported as perf events by
 453 * the kernel corresponds to the number of syscalls made.
 454 */
 455static int test__basic_mmap(void)
 456{
 457        int err = -1;
 458        union perf_event *event;
 459        struct thread_map *threads;
 460        struct cpu_map *cpus;
 461        struct perf_evlist *evlist;
 462        struct perf_event_attr attr = {
 463                .type           = PERF_TYPE_TRACEPOINT,
 464                .read_format    = PERF_FORMAT_ID,
 465                .sample_type    = PERF_SAMPLE_ID,
 466                .watermark      = 0,
 467        };
 468        cpu_set_t cpu_set;
 469        const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
 470                                        "getpgid", };
 471        pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
 472                                      (void*)getpgid };
 473#define nsyscalls ARRAY_SIZE(syscall_names)
 474        int ids[nsyscalls];
 475        unsigned int nr_events[nsyscalls],
 476                     expected_nr_events[nsyscalls], i, j;
 477        struct perf_evsel *evsels[nsyscalls], *evsel;
 478        int sample_size = __perf_evsel__sample_size(attr.sample_type);
 479
 480        for (i = 0; i < nsyscalls; ++i) {
 481                char name[64];
 482
 483                snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
 484                ids[i] = trace_event__id(name);
 485                if (ids[i] < 0) {
 486                        pr_debug("Is debugfs mounted on /sys/kernel/debug?\n");
 487                        return -1;
 488                }
 489                nr_events[i] = 0;
 490                expected_nr_events[i] = random() % 257;
 491        }
 492
 493        threads = thread_map__new(-1, getpid());
 494        if (threads == NULL) {
 495                pr_debug("thread_map__new\n");
 496                return -1;
 497        }
 498
 499        cpus = cpu_map__new(NULL);
 500        if (cpus == NULL) {
 501                pr_debug("cpu_map__new\n");
 502                goto out_free_threads;
 503        }
 504
 505        CPU_ZERO(&cpu_set);
 506        CPU_SET(cpus->map[0], &cpu_set);
 507        sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
 508        if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
 509                pr_debug("sched_setaffinity() failed on CPU %d: %s ",
 510                         cpus->map[0], strerror(errno));
 511                goto out_free_cpus;
 512        }
 513
 514        evlist = perf_evlist__new(cpus, threads);
 515        if (evlist == NULL) {
 516                pr_debug("perf_evlist__new\n");
 517                goto out_free_cpus;
 518        }
 519
 520        /* anonymous union fields, can't be initialized above */
 521        attr.wakeup_events = 1;
 522        attr.sample_period = 1;
 523
 524        for (i = 0; i < nsyscalls; ++i) {
 525                attr.config = ids[i];
 526                evsels[i] = perf_evsel__new(&attr, i);
 527                if (evsels[i] == NULL) {
 528                        pr_debug("perf_evsel__new\n");
 529                        goto out_free_evlist;
 530                }
 531
 532                perf_evlist__add(evlist, evsels[i]);
 533
 534                if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
 535                        pr_debug("failed to open counter: %s, "
 536                                 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 537                                 strerror(errno));
 538                        goto out_close_fd;
 539                }
 540        }
 541
 542        if (perf_evlist__mmap(evlist, 128, true) < 0) {
 543                pr_debug("failed to mmap events: %d (%s)\n", errno,
 544                         strerror(errno));
 545                goto out_close_fd;
 546        }
 547
 548        for (i = 0; i < nsyscalls; ++i)
 549                for (j = 0; j < expected_nr_events[i]; ++j) {
 550                        int foo = syscalls[i]();
 551                        ++foo;
 552                }
 553
 554        while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
 555                struct perf_sample sample;
 556
 557                if (event->header.type != PERF_RECORD_SAMPLE) {
 558                        pr_debug("unexpected %s event\n",
 559                                 perf_event__name(event->header.type));
 560                        goto out_munmap;
 561                }
 562
 563                err = perf_event__parse_sample(event, attr.sample_type, sample_size,
 564                                               false, &sample, false);
 565                if (err) {
 566                        pr_err("Can't parse sample, err = %d\n", err);
 567                        goto out_munmap;
 568                }
 569
 570                evsel = perf_evlist__id2evsel(evlist, sample.id);
 571                if (evsel == NULL) {
 572                        pr_debug("event with id %" PRIu64
 573                                 " doesn't map to an evsel\n", sample.id);
 574                        goto out_munmap;
 575                }
 576                nr_events[evsel->idx]++;
 577        }
 578
 579        list_for_each_entry(evsel, &evlist->entries, node) {
 580                if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
 581                        pr_debug("expected %d %s events, got %d\n",
 582                                 expected_nr_events[evsel->idx],
 583                                 event_name(evsel), nr_events[evsel->idx]);
 584                        goto out_munmap;
 585                }
 586        }
 587
 588        err = 0;
 589out_munmap:
 590        perf_evlist__munmap(evlist);
 591out_close_fd:
 592        for (i = 0; i < nsyscalls; ++i)
 593                perf_evsel__close_fd(evsels[i], 1, threads->nr);
 594out_free_evlist:
 595        perf_evlist__delete(evlist);
 596out_free_cpus:
 597        cpu_map__delete(cpus);
 598out_free_threads:
 599        thread_map__delete(threads);
 600        return err;
 601#undef nsyscalls
 602}
 603
 604#define TEST_ASSERT_VAL(text, cond) \
 605do { \
 606        if (!(cond)) { \
 607                pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
 608                return -1; \
 609        } \
 610} while (0)
 611
 612static int test__checkevent_tracepoint(struct perf_evlist *evlist)
 613{
 614        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 615                                              struct perf_evsel, node);
 616
 617        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 618        TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
 619        TEST_ASSERT_VAL("wrong sample_type",
 620                (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) ==
 621                evsel->attr.sample_type);
 622        TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period);
 623        return 0;
 624}
 625
 626static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
 627{
 628        struct perf_evsel *evsel;
 629
 630        TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
 631
 632        list_for_each_entry(evsel, &evlist->entries, node) {
 633                TEST_ASSERT_VAL("wrong type",
 634                        PERF_TYPE_TRACEPOINT == evsel->attr.type);
 635                TEST_ASSERT_VAL("wrong sample_type",
 636                        (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU)
 637                        == evsel->attr.sample_type);
 638                TEST_ASSERT_VAL("wrong sample_period",
 639                        1 == evsel->attr.sample_period);
 640        }
 641        return 0;
 642}
 643
 644static int test__checkevent_raw(struct perf_evlist *evlist)
 645{
 646        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 647                                              struct perf_evsel, node);
 648
 649        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 650        TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
 651        TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
 652        return 0;
 653}
 654
 655static int test__checkevent_numeric(struct perf_evlist *evlist)
 656{
 657        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 658                                              struct perf_evsel, node);
 659
 660        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 661        TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type);
 662        TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
 663        return 0;
 664}
 665
 666static int test__checkevent_symbolic_name(struct perf_evlist *evlist)
 667{
 668        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 669                                              struct perf_evsel, node);
 670
 671        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 672        TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
 673        TEST_ASSERT_VAL("wrong config",
 674                        PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
 675        return 0;
 676}
 677
 678static int test__checkevent_symbolic_alias(struct perf_evlist *evlist)
 679{
 680        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 681                                              struct perf_evsel, node);
 682
 683        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 684        TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type);
 685        TEST_ASSERT_VAL("wrong config",
 686                        PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config);
 687        return 0;
 688}
 689
 690static int test__checkevent_genhw(struct perf_evlist *evlist)
 691{
 692        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 693                                              struct perf_evsel, node);
 694
 695        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 696        TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type);
 697        TEST_ASSERT_VAL("wrong config", (1 << 16) == evsel->attr.config);
 698        return 0;
 699}
 700
 701static int test__checkevent_breakpoint(struct perf_evlist *evlist)
 702{
 703        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 704                                              struct perf_evsel, node);
 705
 706        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 707        TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
 708        TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
 709        TEST_ASSERT_VAL("wrong bp_type", (HW_BREAKPOINT_R | HW_BREAKPOINT_W) ==
 710                                         evsel->attr.bp_type);
 711        TEST_ASSERT_VAL("wrong bp_len", HW_BREAKPOINT_LEN_4 ==
 712                                        evsel->attr.bp_len);
 713        return 0;
 714}
 715
 716static int test__checkevent_breakpoint_x(struct perf_evlist *evlist)
 717{
 718        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 719                                              struct perf_evsel, node);
 720
 721        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 722        TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type);
 723        TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
 724        TEST_ASSERT_VAL("wrong bp_type",
 725                        HW_BREAKPOINT_X == evsel->attr.bp_type);
 726        TEST_ASSERT_VAL("wrong bp_len", sizeof(long) == evsel->attr.bp_len);
 727        return 0;
 728}
 729
 730static int test__checkevent_breakpoint_r(struct perf_evlist *evlist)
 731{
 732        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 733                                              struct perf_evsel, node);
 734
 735        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 736        TEST_ASSERT_VAL("wrong type",
 737                        PERF_TYPE_BREAKPOINT == evsel->attr.type);
 738        TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
 739        TEST_ASSERT_VAL("wrong bp_type",
 740                        HW_BREAKPOINT_R == evsel->attr.bp_type);
 741        TEST_ASSERT_VAL("wrong bp_len",
 742                        HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
 743        return 0;
 744}
 745
 746static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
 747{
 748        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 749                                              struct perf_evsel, node);
 750
 751        TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
 752        TEST_ASSERT_VAL("wrong type",
 753                        PERF_TYPE_BREAKPOINT == evsel->attr.type);
 754        TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
 755        TEST_ASSERT_VAL("wrong bp_type",
 756                        HW_BREAKPOINT_W == evsel->attr.bp_type);
 757        TEST_ASSERT_VAL("wrong bp_len",
 758                        HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
 759        return 0;
 760}
 761
 762static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
 763{
 764        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 765                                              struct perf_evsel, node);
 766
 767        TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
 768        TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
 769        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
 770        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
 771
 772        return test__checkevent_tracepoint(evlist);
 773}
 774
 775static int
 776test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
 777{
 778        struct perf_evsel *evsel;
 779
 780        TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
 781
 782        list_for_each_entry(evsel, &evlist->entries, node) {
 783                TEST_ASSERT_VAL("wrong exclude_user",
 784                                !evsel->attr.exclude_user);
 785                TEST_ASSERT_VAL("wrong exclude_kernel",
 786                                evsel->attr.exclude_kernel);
 787                TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
 788                TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
 789        }
 790
 791        return test__checkevent_tracepoint_multi(evlist);
 792}
 793
 794static int test__checkevent_raw_modifier(struct perf_evlist *evlist)
 795{
 796        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 797                                              struct perf_evsel, node);
 798
 799        TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
 800        TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
 801        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
 802        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
 803
 804        return test__checkevent_raw(evlist);
 805}
 806
 807static int test__checkevent_numeric_modifier(struct perf_evlist *evlist)
 808{
 809        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 810                                              struct perf_evsel, node);
 811
 812        TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
 813        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
 814        TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
 815        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
 816
 817        return test__checkevent_numeric(evlist);
 818}
 819
 820static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist)
 821{
 822        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 823                                              struct perf_evsel, node);
 824
 825        TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
 826        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
 827        TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
 828        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
 829
 830        return test__checkevent_symbolic_name(evlist);
 831}
 832
 833static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist)
 834{
 835        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 836                                              struct perf_evsel, node);
 837
 838        TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
 839        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
 840        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
 841        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
 842
 843        return test__checkevent_symbolic_alias(evlist);
 844}
 845
 846static int test__checkevent_genhw_modifier(struct perf_evlist *evlist)
 847{
 848        struct perf_evsel *evsel = list_entry(evlist->entries.next,
 849                                              struct perf_evsel, node);
 850
 851        TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
 852        TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
 853        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
 854        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
 855
 856        return test__checkevent_genhw(evlist);
 857}
 858
 859static struct test__event_st {
 860        const char *name;
 861        __u32 type;
 862        int (*check)(struct perf_evlist *evlist);
 863} test__events[] = {
 864        {
 865                .name  = "syscalls:sys_enter_open",
 866                .check = test__checkevent_tracepoint,
 867        },
 868        {
 869                .name  = "syscalls:*",
 870                .check = test__checkevent_tracepoint_multi,
 871        },
 872        {
 873                .name  = "r1",
 874                .check = test__checkevent_raw,
 875        },
 876        {
 877                .name  = "1:1",
 878                .check = test__checkevent_numeric,
 879        },
 880        {
 881                .name  = "instructions",
 882                .check = test__checkevent_symbolic_name,
 883        },
 884        {
 885                .name  = "faults",
 886                .check = test__checkevent_symbolic_alias,
 887        },
 888        {
 889                .name  = "L1-dcache-load-miss",
 890                .check = test__checkevent_genhw,
 891        },
 892        {
 893                .name  = "mem:0",
 894                .check = test__checkevent_breakpoint,
 895        },
 896        {
 897                .name  = "mem:0:x",
 898                .check = test__checkevent_breakpoint_x,
 899        },
 900        {
 901                .name  = "mem:0:r",
 902                .check = test__checkevent_breakpoint_r,
 903        },
 904        {
 905                .name  = "mem:0:w",
 906                .check = test__checkevent_breakpoint_w,
 907        },
 908        {
 909                .name  = "syscalls:sys_enter_open:k",
 910                .check = test__checkevent_tracepoint_modifier,
 911        },
 912        {
 913                .name  = "syscalls:*:u",
 914                .check = test__checkevent_tracepoint_multi_modifier,
 915        },
 916        {
 917                .name  = "r1:kp",
 918                .check = test__checkevent_raw_modifier,
 919        },
 920        {
 921                .name  = "1:1:hp",
 922                .check = test__checkevent_numeric_modifier,
 923        },
 924        {
 925                .name  = "instructions:h",
 926                .check = test__checkevent_symbolic_name_modifier,
 927        },
 928        {
 929                .name  = "faults:u",
 930                .check = test__checkevent_symbolic_alias_modifier,
 931        },
 932        {
 933                .name  = "L1-dcache-load-miss:kp",
 934                .check = test__checkevent_genhw_modifier,
 935        },
 936};
 937
 938#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
 939
 940static int test__parse_events(void)
 941{
 942        struct perf_evlist *evlist;
 943        u_int i;
 944        int ret = 0;
 945
 946        for (i = 0; i < TEST__EVENTS_CNT; i++) {
 947                struct test__event_st *e = &test__events[i];
 948
 949                evlist = perf_evlist__new(NULL, NULL);
 950                if (evlist == NULL)
 951                        break;
 952
 953                ret = parse_events(evlist, e->name, 0);
 954                if (ret) {
 955                        pr_debug("failed to parse event '%s', err %d\n",
 956                                 e->name, ret);
 957                        break;
 958                }
 959
 960                ret = e->check(evlist);
 961                if (ret)
 962                        break;
 963
 964                perf_evlist__delete(evlist);
 965        }
 966
 967        return ret;
 968}
 969
 970static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t **maskp,
 971                                         size_t *sizep)
 972{
 973        cpu_set_t *mask;
 974        size_t size;
 975        int i, cpu = -1, nrcpus = 1024;
 976realloc:
 977        mask = CPU_ALLOC(nrcpus);
 978        size = CPU_ALLOC_SIZE(nrcpus);
 979        CPU_ZERO_S(size, mask);
 980
 981        if (sched_getaffinity(pid, size, mask) == -1) {
 982                CPU_FREE(mask);
 983                if (errno == EINVAL && nrcpus < (1024 << 8)) {
 984                        nrcpus = nrcpus << 2;
 985                        goto realloc;
 986                }
 987                perror("sched_getaffinity");
 988                        return -1;
 989        }
 990
 991        for (i = 0; i < nrcpus; i++) {
 992                if (CPU_ISSET_S(i, size, mask)) {
 993                        if (cpu == -1) {
 994                                cpu = i;
 995                                *maskp = mask;
 996                                *sizep = size;
 997                        } else
 998                                CPU_CLR_S(i, size, mask);
 999                }
1000        }
1001
1002        if (cpu == -1)
1003                CPU_FREE(mask);
1004
1005        return cpu;
1006}
1007
1008static int test__PERF_RECORD(void)
1009{
1010        struct perf_record_opts opts = {
1011                .target_pid = -1,
1012                .target_tid = -1,
1013                .no_delay   = true,
1014                .freq       = 10,
1015                .mmap_pages = 256,
1016                .sample_id_all_avail = true,
1017        };
1018        cpu_set_t *cpu_mask = NULL;
1019        size_t cpu_mask_size = 0;
1020        struct perf_evlist *evlist = perf_evlist__new(NULL, NULL);
1021        struct perf_evsel *evsel;
1022        struct perf_sample sample;
1023        const char *cmd = "sleep";
1024        const char *argv[] = { cmd, "1", NULL, };
1025        char *bname;
1026        u64 sample_type, prev_time = 0;
1027        bool found_cmd_mmap = false,
1028             found_libc_mmap = false,
1029             found_vdso_mmap = false,
1030             found_ld_mmap = false;
1031        int err = -1, errs = 0, i, wakeups = 0, sample_size;
1032        u32 cpu;
1033        int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
1034
1035        if (evlist == NULL || argv == NULL) {
1036                pr_debug("Not enough memory to create evlist\n");
1037                goto out;
1038        }
1039
1040        /*
1041         * We need at least one evsel in the evlist, use the default
1042         * one: "cycles".
1043         */
1044        err = perf_evlist__add_default(evlist);
1045        if (err < 0) {
1046                pr_debug("Not enough memory to create evsel\n");
1047                goto out_delete_evlist;
1048        }
1049
1050        /*
1051         * Create maps of threads and cpus to monitor. In this case
1052         * we start with all threads and cpus (-1, -1) but then in
1053         * perf_evlist__prepare_workload we'll fill in the only thread
1054         * we're monitoring, the one forked there.
1055         */
1056        err = perf_evlist__create_maps(evlist, opts.target_pid,
1057                                       opts.target_tid, opts.cpu_list);
1058        if (err < 0) {
1059                pr_debug("Not enough memory to create thread/cpu maps\n");
1060                goto out_delete_evlist;
1061        }
1062
1063        /*
1064         * Prepare the workload in argv[] to run, it'll fork it, and then wait
1065         * for perf_evlist__start_workload() to exec it. This is done this way
1066         * so that we have time to open the evlist (calling sys_perf_event_open
1067         * on all the fds) and then mmap them.
1068         */
1069        err = perf_evlist__prepare_workload(evlist, &opts, argv);
1070        if (err < 0) {
1071                pr_debug("Couldn't run the workload!\n");
1072                goto out_delete_evlist;
1073        }
1074
1075        /*
1076         * Config the evsels, setting attr->comm on the first one, etc.
1077         */
1078        evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
1079        evsel->attr.sample_type |= PERF_SAMPLE_CPU;
1080        evsel->attr.sample_type |= PERF_SAMPLE_TID;
1081        evsel->attr.sample_type |= PERF_SAMPLE_TIME;
1082        perf_evlist__config_attrs(evlist, &opts);
1083
1084        err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask,
1085                                            &cpu_mask_size);
1086        if (err < 0) {
1087                pr_debug("sched__get_first_possible_cpu: %s\n", strerror(errno));
1088                goto out_delete_evlist;
1089        }
1090
1091        cpu = err;
1092
1093        /*
1094         * So that we can check perf_sample.cpu on all the samples.
1095         */
1096        if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
1097                pr_debug("sched_setaffinity: %s\n", strerror(errno));
1098                goto out_free_cpu_mask;
1099        }
1100
1101        /*
1102         * Call sys_perf_event_open on all the fds on all the evsels,
1103         * grouping them if asked to.
1104         */
1105        err = perf_evlist__open(evlist, opts.group);
1106        if (err < 0) {
1107                pr_debug("perf_evlist__open: %s\n", strerror(errno));
1108                goto out_delete_evlist;
1109        }
1110
1111        /*
1112         * mmap the first fd on a given CPU and ask for events for the other
1113         * fds in the same CPU to be injected in the same mmap ring buffer
1114         * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
1115         */
1116        err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
1117        if (err < 0) {
1118                pr_debug("perf_evlist__mmap: %s\n", strerror(errno));
1119                goto out_delete_evlist;
1120        }
1121
1122        /*
1123         * We'll need these two to parse the PERF_SAMPLE_* fields in each
1124         * event.
1125         */
1126        sample_type = perf_evlist__sample_type(evlist);
1127        sample_size = __perf_evsel__sample_size(sample_type);
1128
1129        /*
1130         * Now that all is properly set up, enable the events, they will
1131         * count just on workload.pid, which will start...
1132         */
1133        perf_evlist__enable(evlist);
1134
1135        /*
1136         * Now!
1137         */
1138        perf_evlist__start_workload(evlist);
1139
1140        while (1) {
1141                int before = total_events;
1142
1143                for (i = 0; i < evlist->nr_mmaps; i++) {
1144                        union perf_event *event;
1145
1146                        while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
1147                                const u32 type = event->header.type;
1148                                const char *name = perf_event__name(type);
1149
1150                                ++total_events;
1151                                if (type < PERF_RECORD_MAX)
1152                                        nr_events[type]++;
1153
1154                                err = perf_event__parse_sample(event, sample_type,
1155                                                               sample_size, true,
1156                                                               &sample, false);
1157                                if (err < 0) {
1158                                        if (verbose)
1159                                                perf_event__fprintf(event, stderr);
1160                                        pr_debug("Couldn't parse sample\n");
1161                                        goto out_err;
1162                                }
1163
1164                                if (verbose) {
1165                                        pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
1166                                        perf_event__fprintf(event, stderr);
1167                                }
1168
1169                                if (prev_time > sample.time) {
1170                                        pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
1171                                                 name, prev_time, sample.time);
1172                                        ++errs;
1173                                }
1174
1175                                prev_time = sample.time;
1176
1177                                if (sample.cpu != cpu) {
1178                                        pr_debug("%s with unexpected cpu, expected %d, got %d\n",
1179                                                 name, cpu, sample.cpu);
1180                                        ++errs;
1181                                }
1182
1183                                if ((pid_t)sample.pid != evlist->workload.pid) {
1184                                        pr_debug("%s with unexpected pid, expected %d, got %d\n",
1185                                                 name, evlist->workload.pid, sample.pid);
1186                                        ++errs;
1187                                }
1188
1189                                if ((pid_t)sample.tid != evlist->workload.pid) {
1190                                        pr_debug("%s with unexpected tid, expected %d, got %d\n",
1191                                                 name, evlist->workload.pid, sample.tid);
1192                                        ++errs;
1193                                }
1194
1195                                if ((type == PERF_RECORD_COMM ||
1196                                     type == PERF_RECORD_MMAP ||
1197                                     type == PERF_RECORD_FORK ||
1198                                     type == PERF_RECORD_EXIT) &&
1199                                     (pid_t)event->comm.pid != evlist->workload.pid) {
1200                                        pr_debug("%s with unexpected pid/tid\n", name);
1201                                        ++errs;
1202                                }
1203
1204                                if ((type == PERF_RECORD_COMM ||
1205                                     type == PERF_RECORD_MMAP) &&
1206                                     event->comm.pid != event->comm.tid) {
1207                                        pr_debug("%s with different pid/tid!\n", name);
1208                                        ++errs;
1209                                }
1210
1211                                switch (type) {
1212                                case PERF_RECORD_COMM:
1213                                        if (strcmp(event->comm.comm, cmd)) {
1214                                                pr_debug("%s with unexpected comm!\n", name);
1215                                                ++errs;
1216                                        }
1217                                        break;
1218                                case PERF_RECORD_EXIT:
1219                                        goto found_exit;
1220                                case PERF_RECORD_MMAP:
1221                                        bname = strrchr(event->mmap.filename, '/');
1222                                        if (bname != NULL) {
1223                                                if (!found_cmd_mmap)
1224                                                        found_cmd_mmap = !strcmp(bname + 1, cmd);
1225                                                if (!found_libc_mmap)
1226                                                        found_libc_mmap = !strncmp(bname + 1, "libc", 4);
1227                                                if (!found_ld_mmap)
1228                                                        found_ld_mmap = !strncmp(bname + 1, "ld", 2);
1229                                        } else if (!found_vdso_mmap)
1230                                                found_vdso_mmap = !strcmp(event->mmap.filename, "[vdso]");
1231                                        break;
1232
1233                                case PERF_RECORD_SAMPLE:
1234                                        /* Just ignore samples for now */
1235                                        break;
1236                                default:
1237                                        pr_debug("Unexpected perf_event->header.type %d!\n",
1238                                                 type);
1239                                        ++errs;
1240                                }
1241                        }
1242                }
1243
1244                /*
1245                 * We don't use poll here because at least at 3.1 times the
1246                 * PERF_RECORD_{!SAMPLE} events don't honour
1247                 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
1248                 */
1249                if (total_events == before && false)
1250                        poll(evlist->pollfd, evlist->nr_fds, -1);
1251
1252                sleep(1);
1253                if (++wakeups > 5) {
1254                        pr_debug("No PERF_RECORD_EXIT event!\n");
1255                        break;
1256                }
1257        }
1258
1259found_exit:
1260        if (nr_events[PERF_RECORD_COMM] > 1) {
1261                pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
1262                ++errs;
1263        }
1264
1265        if (nr_events[PERF_RECORD_COMM] == 0) {
1266                pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
1267                ++errs;
1268        }
1269
1270        if (!found_cmd_mmap) {
1271                pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
1272                ++errs;
1273        }
1274
1275        if (!found_libc_mmap) {
1276                pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
1277                ++errs;
1278        }
1279
1280        if (!found_ld_mmap) {
1281                pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
1282                ++errs;
1283        }
1284
1285        if (!found_vdso_mmap) {
1286                pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
1287                ++errs;
1288        }
1289out_err:
1290        perf_evlist__munmap(evlist);
1291out_free_cpu_mask:
1292        CPU_FREE(cpu_mask);
1293out_delete_evlist:
1294        perf_evlist__delete(evlist);
1295out:
1296        return (err < 0 || errs > 0) ? -1 : 0;
1297}
1298
1299static struct test {
1300        const char *desc;
1301        int (*func)(void);
1302} tests[] = {
1303        {
1304                .desc = "vmlinux symtab matches kallsyms",
1305                .func = test__vmlinux_matches_kallsyms,
1306        },
1307        {
1308                .desc = "detect open syscall event",
1309                .func = test__open_syscall_event,
1310        },
1311        {
1312                .desc = "detect open syscall event on all cpus",
1313                .func = test__open_syscall_event_on_all_cpus,
1314        },
1315        {
1316                .desc = "read samples using the mmap interface",
1317                .func = test__basic_mmap,
1318        },
1319        {
1320                .desc = "parse events tests",
1321                .func = test__parse_events,
1322        },
1323        {
1324                .desc = "Validate PERF_RECORD_* events & perf_sample fields",
1325                .func = test__PERF_RECORD,
1326        },
1327        {
1328                .func = NULL,
1329        },
1330};
1331
1332static bool perf_test__matches(int curr, int argc, const char *argv[])
1333{
1334        int i;
1335
1336        if (argc == 0)
1337                return true;
1338
1339        for (i = 0; i < argc; ++i) {
1340                char *end;
1341                long nr = strtoul(argv[i], &end, 10);
1342
1343                if (*end == '\0') {
1344                        if (nr == curr + 1)
1345                                return true;
1346                        continue;
1347                }
1348
1349                if (strstr(tests[curr].desc, argv[i]))
1350                        return true;
1351        }
1352
1353        return false;
1354}
1355
1356static int __cmd_test(int argc, const char *argv[])
1357{
1358        int i = 0;
1359
1360        while (tests[i].func) {
1361                int curr = i++, err;
1362
1363                if (!perf_test__matches(curr, argc, argv))
1364                        continue;
1365
1366                pr_info("%2d: %s:", i, tests[curr].desc);
1367                pr_debug("\n--- start ---\n");
1368                err = tests[curr].func();
1369                pr_debug("---- end ----\n%s:", tests[curr].desc);
1370                pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
1371        }
1372
1373        return 0;
1374}
1375
1376static int perf_test__list(int argc, const char **argv)
1377{
1378        int i = 0;
1379
1380        while (tests[i].func) {
1381                int curr = i++;
1382
1383                if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
1384                        continue;
1385
1386                pr_info("%2d: %s\n", i, tests[curr].desc);
1387        }
1388
1389        return 0;
1390}
1391
1392int cmd_test(int argc, const char **argv, const char *prefix __used)
1393{
1394        const char * const test_usage[] = {
1395        "perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
1396        NULL,
1397        };
1398        const struct option test_options[] = {
1399        OPT_INCR('v', "verbose", &verbose,
1400                    "be more verbose (show symbol address, etc)"),
1401        OPT_END()
1402        };
1403
1404        argc = parse_options(argc, argv, test_options, test_usage, 0);
1405        if (argc >= 1 && !strcmp(argv[0], "list"))
1406                return perf_test__list(argc, argv);
1407
1408        symbol_conf.priv_size = sizeof(int);
1409        symbol_conf.sort_by_name = true;
1410        symbol_conf.try_vmlinux_path = true;
1411
1412        if (symbol__init() < 0)
1413                return -1;
1414
1415        setup_pager();
1416
1417        return __cmd_test(argc, argv);
1418}
1419