linux/tools/perf/builtin-test.c
<<
>>
Prefs
   1/*
   2 * builtin-test.c
   3 *
   4 * Builtin regression testing command: ever growing number of sanity tests
   5 */
   6#include "builtin.h"
   7
   8#include "util/cache.h"
   9#include "util/debug.h"
  10#include "util/parse-options.h"
  11#include "util/session.h"
  12#include "util/symbol.h"
  13#include "util/thread.h"
  14
  15static long page_size;
  16
  17static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym)
  18{
  19        bool *visited = symbol__priv(sym);
  20        *visited = true;
  21        return 0;
  22}
  23
  24static int test__vmlinux_matches_kallsyms(void)
  25{
  26        int err = -1;
  27        struct rb_node *nd;
  28        struct symbol *sym;
  29        struct map *kallsyms_map, *vmlinux_map;
  30        struct machine kallsyms, vmlinux;
  31        enum map_type type = MAP__FUNCTION;
  32        struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
  33
  34        /*
  35         * Step 1:
  36         *
  37         * Init the machines that will hold kernel, modules obtained from
  38         * both vmlinux + .ko files and from /proc/kallsyms split by modules.
  39         */
  40        machine__init(&kallsyms, "", HOST_KERNEL_ID);
  41        machine__init(&vmlinux, "", HOST_KERNEL_ID);
  42
  43        /*
  44         * Step 2:
  45         *
  46         * Create the kernel maps for kallsyms and the DSO where we will then
  47         * load /proc/kallsyms. Also create the modules maps from /proc/modules
  48         * and find the .ko files that match them in /lib/modules/`uname -r`/.
  49         */
  50        if (machine__create_kernel_maps(&kallsyms) < 0) {
  51                pr_debug("machine__create_kernel_maps ");
  52                return -1;
  53        }
  54
  55        /*
  56         * Step 3:
  57         *
  58         * Load and split /proc/kallsyms into multiple maps, one per module.
  59         */
  60        if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms", type, NULL) <= 0) {
  61                pr_debug("dso__load_kallsyms ");
  62                goto out;
  63        }
  64
  65        /*
  66         * Step 4:
  67         *
  68         * kallsyms will be internally on demand sorted by name so that we can
  69         * find the reference relocation * symbol, i.e. the symbol we will use
  70         * to see if the running kernel was relocated by checking if it has the
  71         * same value in the vmlinux file we load.
  72         */
  73        kallsyms_map = machine__kernel_map(&kallsyms, type);
  74
  75        sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
  76        if (sym == NULL) {
  77                pr_debug("dso__find_symbol_by_name ");
  78                goto out;
  79        }
  80
  81        ref_reloc_sym.addr = sym->start;
  82
  83        /*
  84         * Step 5:
  85         *
  86         * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
  87         */
  88        if (machine__create_kernel_maps(&vmlinux) < 0) {
  89                pr_debug("machine__create_kernel_maps ");
  90                goto out;
  91        }
  92
  93        vmlinux_map = machine__kernel_map(&vmlinux, type);
  94        map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
  95
  96        /*
  97         * Step 6:
  98         *
  99         * Locate a vmlinux file in the vmlinux path that has a buildid that
 100         * matches the one of the running kernel.
 101         *
 102         * While doing that look if we find the ref reloc symbol, if we find it
 103         * we'll have its ref_reloc_symbol.unrelocated_addr and then
 104         * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
 105         * to fixup the symbols.
 106         */
 107        if (machine__load_vmlinux_path(&vmlinux, type,
 108                                       vmlinux_matches_kallsyms_filter) <= 0) {
 109                pr_debug("machine__load_vmlinux_path ");
 110                goto out;
 111        }
 112
 113        err = 0;
 114        /*
 115         * Step 7:
 116         *
 117         * Now look at the symbols in the vmlinux DSO and check if we find all of them
 118         * in the kallsyms dso. For the ones that are in both, check its names and
 119         * end addresses too.
 120         */
 121        for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
 122                struct symbol *pair, *first_pair;
 123                bool backwards = true;
 124
 125                sym  = rb_entry(nd, struct symbol, rb_node);
 126
 127                if (sym->start == sym->end)
 128                        continue;
 129
 130                first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
 131                pair = first_pair;
 132
 133                if (pair && pair->start == sym->start) {
 134next_pair:
 135                        if (strcmp(sym->name, pair->name) == 0) {
 136                                /*
 137                                 * kallsyms don't have the symbol end, so we
 138                                 * set that by using the next symbol start - 1,
 139                                 * in some cases we get this up to a page
 140                                 * wrong, trace_kmalloc when I was developing
 141                                 * this code was one such example, 2106 bytes
 142                                 * off the real size. More than that and we
 143                                 * _really_ have a problem.
 144                                 */
 145                                s64 skew = sym->end - pair->end;
 146                                if (llabs(skew) < page_size)
 147                                        continue;
 148
 149                                pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
 150                                         sym->start, sym->name, sym->end, pair->end);
 151                        } else {
 152                                struct rb_node *nnd;
 153detour:
 154                                nnd = backwards ? rb_prev(&pair->rb_node) :
 155                                                  rb_next(&pair->rb_node);
 156                                if (nnd) {
 157                                        struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
 158
 159                                        if (next->start == sym->start) {
 160                                                pair = next;
 161                                                goto next_pair;
 162                                        }
 163                                }
 164
 165                                if (backwards) {
 166                                        backwards = false;
 167                                        pair = first_pair;
 168                                        goto detour;
 169                                }
 170
 171                                pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
 172                                         sym->start, sym->name, pair->name);
 173                        }
 174                } else
 175                        pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
 176
 177                err = -1;
 178        }
 179
 180        if (!verbose)
 181                goto out;
 182
 183        pr_info("Maps only in vmlinux:\n");
 184
 185        for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
 186                struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
 187                /*
 188                 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
 189                 * the kernel will have the path for the vmlinux file being used,
 190                 * so use the short name, less descriptive but the same ("[kernel]" in
 191                 * both cases.
 192                 */
 193                pair = map_groups__find_by_name(&kallsyms.kmaps, type,
 194                                                (pos->dso->kernel ?
 195                                                        pos->dso->short_name :
 196                                                        pos->dso->name));
 197                if (pair)
 198                        pair->priv = 1;
 199                else
 200                        map__fprintf(pos, stderr);
 201        }
 202
 203        pr_info("Maps in vmlinux with a different name in kallsyms:\n");
 204
 205        for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
 206                struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
 207
 208                pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
 209                if (pair == NULL || pair->priv)
 210                        continue;
 211
 212                if (pair->start == pos->start) {
 213                        pair->priv = 1;
 214                        pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
 215                                pos->start, pos->end, pos->pgoff, pos->dso->name);
 216                        if (pos->pgoff != pair->pgoff || pos->end != pair->end)
 217                                pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
 218                                        pair->start, pair->end, pair->pgoff);
 219                        pr_info(" %s\n", pair->dso->name);
 220                        pair->priv = 1;
 221                }
 222        }
 223
 224        pr_info("Maps only in kallsyms:\n");
 225
 226        for (nd = rb_first(&kallsyms.kmaps.maps[type]);
 227             nd; nd = rb_next(nd)) {
 228                struct map *pos = rb_entry(nd, struct map, rb_node);
 229
 230                if (!pos->priv)
 231                        map__fprintf(pos, stderr);
 232        }
 233out:
 234        return err;
 235}
 236
 237#include "util/cpumap.h"
 238#include "util/evsel.h"
 239#include <sys/types.h>
 240
 241static int trace_event__id(const char *event_name)
 242{
 243        char *filename;
 244        int err = -1, fd;
 245
 246        if (asprintf(&filename,
 247                     "/sys/kernel/debug/tracing/events/syscalls/%s/id",
 248                     event_name) < 0)
 249                return -1;
 250
 251        fd = open(filename, O_RDONLY);
 252        if (fd >= 0) {
 253                char id[16];
 254                if (read(fd, id, sizeof(id)) > 0)
 255                        err = atoi(id);
 256                close(fd);
 257        }
 258
 259        free(filename);
 260        return err;
 261}
 262
 263static int test__open_syscall_event(void)
 264{
 265        int err = -1, fd;
 266        struct thread_map *threads;
 267        struct perf_evsel *evsel;
 268        struct perf_event_attr attr;
 269        unsigned int nr_open_calls = 111, i;
 270        int id = trace_event__id("sys_enter_open");
 271
 272        if (id < 0) {
 273                pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
 274                return -1;
 275        }
 276
 277        threads = thread_map__new(-1, getpid());
 278        if (threads == NULL) {
 279                pr_debug("thread_map__new\n");
 280                return -1;
 281        }
 282
 283        memset(&attr, 0, sizeof(attr));
 284        attr.type = PERF_TYPE_TRACEPOINT;
 285        attr.config = id;
 286        evsel = perf_evsel__new(&attr, 0);
 287        if (evsel == NULL) {
 288                pr_debug("perf_evsel__new\n");
 289                goto out_thread_map_delete;
 290        }
 291
 292        if (perf_evsel__open_per_thread(evsel, threads) < 0) {
 293                pr_debug("failed to open counter: %s, "
 294                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 295                         strerror(errno));
 296                goto out_evsel_delete;
 297        }
 298
 299        for (i = 0; i < nr_open_calls; ++i) {
 300                fd = open("/etc/passwd", O_RDONLY);
 301                close(fd);
 302        }
 303
 304        if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
 305                pr_debug("perf_evsel__open_read_on_cpu\n");
 306                goto out_close_fd;
 307        }
 308
 309        if (evsel->counts->cpu[0].val != nr_open_calls) {
 310                pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
 311                         nr_open_calls, evsel->counts->cpu[0].val);
 312                goto out_close_fd;
 313        }
 314        
 315        err = 0;
 316out_close_fd:
 317        perf_evsel__close_fd(evsel, 1, threads->nr);
 318out_evsel_delete:
 319        perf_evsel__delete(evsel);
 320out_thread_map_delete:
 321        thread_map__delete(threads);
 322        return err;
 323}
 324
 325#include <sched.h>
 326
 327static int test__open_syscall_event_on_all_cpus(void)
 328{
 329        int err = -1, fd, cpu;
 330        struct thread_map *threads;
 331        struct cpu_map *cpus;
 332        struct perf_evsel *evsel;
 333        struct perf_event_attr attr;
 334        unsigned int nr_open_calls = 111, i;
 335        cpu_set_t cpu_set;
 336        int id = trace_event__id("sys_enter_open");
 337
 338        if (id < 0) {
 339                pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
 340                return -1;
 341        }
 342
 343        threads = thread_map__new(-1, getpid());
 344        if (threads == NULL) {
 345                pr_debug("thread_map__new\n");
 346                return -1;
 347        }
 348
 349        cpus = cpu_map__new(NULL);
 350        if (threads == NULL) {
 351                pr_debug("thread_map__new\n");
 352                return -1;
 353        }
 354
 355
 356        CPU_ZERO(&cpu_set);
 357
 358        memset(&attr, 0, sizeof(attr));
 359        attr.type = PERF_TYPE_TRACEPOINT;
 360        attr.config = id;
 361        evsel = perf_evsel__new(&attr, 0);
 362        if (evsel == NULL) {
 363                pr_debug("perf_evsel__new\n");
 364                goto out_thread_map_delete;
 365        }
 366
 367        if (perf_evsel__open(evsel, cpus, threads) < 0) {
 368                pr_debug("failed to open counter: %s, "
 369                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 370                         strerror(errno));
 371                goto out_evsel_delete;
 372        }
 373
 374        for (cpu = 0; cpu < cpus->nr; ++cpu) {
 375                unsigned int ncalls = nr_open_calls + cpu;
 376                /*
 377                 * XXX eventually lift this restriction in a way that
 378                 * keeps perf building on older glibc installations
 379                 * without CPU_ALLOC. 1024 cpus in 2010 still seems
 380                 * a reasonable upper limit tho :-)
 381                 */
 382                if (cpus->map[cpu] >= CPU_SETSIZE) {
 383                        pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
 384                        continue;
 385                }
 386
 387                CPU_SET(cpus->map[cpu], &cpu_set);
 388                if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
 389                        pr_debug("sched_setaffinity() failed on CPU %d: %s ",
 390                                 cpus->map[cpu],
 391                                 strerror(errno));
 392                        goto out_close_fd;
 393                }
 394                for (i = 0; i < ncalls; ++i) {
 395                        fd = open("/etc/passwd", O_RDONLY);
 396                        close(fd);
 397                }
 398                CPU_CLR(cpus->map[cpu], &cpu_set);
 399        }
 400
 401        /*
 402         * Here we need to explicitely preallocate the counts, as if
 403         * we use the auto allocation it will allocate just for 1 cpu,
 404         * as we start by cpu 0.
 405         */
 406        if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
 407                pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
 408                goto out_close_fd;
 409        }
 410
 411        for (cpu = 0; cpu < cpus->nr; ++cpu) {
 412                unsigned int expected;
 413
 414                if (cpus->map[cpu] >= CPU_SETSIZE)
 415                        continue;
 416
 417                if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
 418                        pr_debug("perf_evsel__open_read_on_cpu\n");
 419                        goto out_close_fd;
 420                }
 421
 422                expected = nr_open_calls + cpu;
 423                if (evsel->counts->cpu[cpu].val != expected) {
 424                        pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
 425                                 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
 426                        goto out_close_fd;
 427                }
 428        }
 429
 430        err = 0;
 431out_close_fd:
 432        perf_evsel__close_fd(evsel, 1, threads->nr);
 433out_evsel_delete:
 434        perf_evsel__delete(evsel);
 435out_thread_map_delete:
 436        thread_map__delete(threads);
 437        return err;
 438}
 439
 440static struct test {
 441        const char *desc;
 442        int (*func)(void);
 443} tests[] = {
 444        {
 445                .desc = "vmlinux symtab matches kallsyms",
 446                .func = test__vmlinux_matches_kallsyms,
 447        },
 448        {
 449                .desc = "detect open syscall event",
 450                .func = test__open_syscall_event,
 451        },
 452        {
 453                .desc = "detect open syscall event on all cpus",
 454                .func = test__open_syscall_event_on_all_cpus,
 455        },
 456        {
 457                .func = NULL,
 458        },
 459};
 460
 461static int __cmd_test(void)
 462{
 463        int i = 0;
 464
 465        page_size = sysconf(_SC_PAGE_SIZE);
 466
 467        while (tests[i].func) {
 468                int err;
 469                pr_info("%2d: %s:", i + 1, tests[i].desc);
 470                pr_debug("\n--- start ---\n");
 471                err = tests[i].func();
 472                pr_debug("---- end ----\n%s:", tests[i].desc);
 473                pr_info(" %s\n", err ? "FAILED!\n" : "Ok");
 474                ++i;
 475        }
 476
 477        return 0;
 478}
 479
 480static const char * const test_usage[] = {
 481        "perf test [<options>]",
 482        NULL,
 483};
 484
 485static const struct option test_options[] = {
 486        OPT_INTEGER('v', "verbose", &verbose,
 487                    "be more verbose (show symbol address, etc)"),
 488        OPT_END()
 489};
 490
 491int cmd_test(int argc, const char **argv, const char *prefix __used)
 492{
 493        argc = parse_options(argc, argv, test_options, test_usage, 0);
 494        if (argc)
 495                usage_with_options(test_usage, test_options);
 496
 497        symbol_conf.priv_size = sizeof(int);
 498        symbol_conf.sort_by_name = true;
 499        symbol_conf.try_vmlinux_path = true;
 500
 501        if (symbol__init() < 0)
 502                return -1;
 503
 504        setup_pager();
 505
 506        return __cmd_test();
 507}
 508