linux/tools/perf/tests/code-reading.c
<<
>>
Prefs
   1#include <errno.h>
   2#include <linux/kernel.h>
   3#include <linux/types.h>
   4#include <inttypes.h>
   5#include <stdlib.h>
   6#include <unistd.h>
   7#include <stdio.h>
   8#include <string.h>
   9#include <sys/param.h>
  10
  11#include "parse-events.h"
  12#include "evlist.h"
  13#include "evsel.h"
  14#include "thread_map.h"
  15#include "cpumap.h"
  16#include "machine.h"
  17#include "event.h"
  18#include "thread.h"
  19
  20#include "tests.h"
  21
  22#include "sane_ctype.h"
  23
  24#define BUFSZ   1024
  25#define READLEN 128
  26
  27struct state {
  28        u64 done[1024];
  29        size_t done_cnt;
  30};
  31
  32static unsigned int hex(char c)
  33{
  34        if (c >= '0' && c <= '9')
  35                return c - '0';
  36        if (c >= 'a' && c <= 'f')
  37                return c - 'a' + 10;
  38        return c - 'A' + 10;
  39}
  40
  41static size_t read_objdump_chunk(const char **line, unsigned char **buf,
  42                                 size_t *buf_len)
  43{
  44        size_t bytes_read = 0;
  45        unsigned char *chunk_start = *buf;
  46
  47        /* Read bytes */
  48        while (*buf_len > 0) {
  49                char c1, c2;
  50
  51                /* Get 2 hex digits */
  52                c1 = *(*line)++;
  53                if (!isxdigit(c1))
  54                        break;
  55                c2 = *(*line)++;
  56                if (!isxdigit(c2))
  57                        break;
  58
  59                /* Store byte and advance buf */
  60                **buf = (hex(c1) << 4) | hex(c2);
  61                (*buf)++;
  62                (*buf_len)--;
  63                bytes_read++;
  64
  65                /* End of chunk? */
  66                if (isspace(**line))
  67                        break;
  68        }
  69
  70        /*
  71         * objdump will display raw insn as LE if code endian
  72         * is LE and bytes_per_chunk > 1. In that case reverse
  73         * the chunk we just read.
  74         *
  75         * see disassemble_bytes() at binutils/objdump.c for details
  76         * how objdump chooses display endian)
  77         */
  78        if (bytes_read > 1 && !bigendian()) {
  79                unsigned char *chunk_end = chunk_start + bytes_read - 1;
  80                unsigned char tmp;
  81
  82                while (chunk_start < chunk_end) {
  83                        tmp = *chunk_start;
  84                        *chunk_start = *chunk_end;
  85                        *chunk_end = tmp;
  86                        chunk_start++;
  87                        chunk_end--;
  88                }
  89        }
  90
  91        return bytes_read;
  92}
  93
  94static size_t read_objdump_line(const char *line, unsigned char *buf,
  95                                size_t buf_len)
  96{
  97        const char *p;
  98        size_t ret, bytes_read = 0;
  99
 100        /* Skip to a colon */
 101        p = strchr(line, ':');
 102        if (!p)
 103                return 0;
 104        p++;
 105
 106        /* Skip initial spaces */
 107        while (*p) {
 108                if (!isspace(*p))
 109                        break;
 110                p++;
 111        }
 112
 113        do {
 114                ret = read_objdump_chunk(&p, &buf, &buf_len);
 115                bytes_read += ret;
 116                p++;
 117        } while (ret > 0);
 118
 119        /* return number of successfully read bytes */
 120        return bytes_read;
 121}
 122
 123static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr)
 124{
 125        char *line = NULL;
 126        size_t line_len, off_last = 0;
 127        ssize_t ret;
 128        int err = 0;
 129        u64 addr, last_addr = start_addr;
 130
 131        while (off_last < *len) {
 132                size_t off, read_bytes, written_bytes;
 133                unsigned char tmp[BUFSZ];
 134
 135                ret = getline(&line, &line_len, f);
 136                if (feof(f))
 137                        break;
 138                if (ret < 0) {
 139                        pr_debug("getline failed\n");
 140                        err = -1;
 141                        break;
 142                }
 143
 144                /* read objdump data into temporary buffer */
 145                read_bytes = read_objdump_line(line, tmp, sizeof(tmp));
 146                if (!read_bytes)
 147                        continue;
 148
 149                if (sscanf(line, "%"PRIx64, &addr) != 1)
 150                        continue;
 151                if (addr < last_addr) {
 152                        pr_debug("addr going backwards, read beyond section?\n");
 153                        break;
 154                }
 155                last_addr = addr;
 156
 157                /* copy it from temporary buffer to 'buf' according
 158                 * to address on current objdump line */
 159                off = addr - start_addr;
 160                if (off >= *len)
 161                        break;
 162                written_bytes = MIN(read_bytes, *len - off);
 163                memcpy(buf + off, tmp, written_bytes);
 164                off_last = off + written_bytes;
 165        }
 166
 167        /* len returns number of bytes that could not be read */
 168        *len -= off_last;
 169
 170        free(line);
 171
 172        return err;
 173}
 174
 175static int read_via_objdump(const char *filename, u64 addr, void *buf,
 176                            size_t len)
 177{
 178        char cmd[PATH_MAX * 2];
 179        const char *fmt;
 180        FILE *f;
 181        int ret;
 182
 183        fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
 184        ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
 185                       filename);
 186        if (ret <= 0 || (size_t)ret >= sizeof(cmd))
 187                return -1;
 188
 189        pr_debug("Objdump command is: %s\n", cmd);
 190
 191        /* Ignore objdump errors */
 192        strcat(cmd, " 2>/dev/null");
 193
 194        f = popen(cmd, "r");
 195        if (!f) {
 196                pr_debug("popen failed\n");
 197                return -1;
 198        }
 199
 200        ret = read_objdump_output(f, buf, &len, addr);
 201        if (len) {
 202                pr_debug("objdump read too few bytes: %zd\n", len);
 203                if (!ret)
 204                        ret = len;
 205        }
 206
 207        pclose(f);
 208
 209        return ret;
 210}
 211
 212static void dump_buf(unsigned char *buf, size_t len)
 213{
 214        size_t i;
 215
 216        for (i = 0; i < len; i++) {
 217                pr_debug("0x%02x ", buf[i]);
 218                if (i % 16 == 15)
 219                        pr_debug("\n");
 220        }
 221        pr_debug("\n");
 222}
 223
 224static int read_object_code(u64 addr, size_t len, u8 cpumode,
 225                            struct thread *thread, struct state *state)
 226{
 227        struct addr_location al;
 228        unsigned char buf1[BUFSZ];
 229        unsigned char buf2[BUFSZ];
 230        size_t ret_len;
 231        u64 objdump_addr;
 232        const char *objdump_name;
 233        char decomp_name[KMOD_DECOMP_LEN];
 234        int ret;
 235
 236        pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
 237
 238        thread__find_addr_map(thread, cpumode, MAP__FUNCTION, addr, &al);
 239        if (!al.map || !al.map->dso) {
 240                pr_debug("thread__find_addr_map failed\n");
 241                return -1;
 242        }
 243
 244        pr_debug("File is: %s\n", al.map->dso->long_name);
 245
 246        if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
 247            !dso__is_kcore(al.map->dso)) {
 248                pr_debug("Unexpected kernel address - skipping\n");
 249                return 0;
 250        }
 251
 252        pr_debug("On file address is: %#"PRIx64"\n", al.addr);
 253
 254        if (len > BUFSZ)
 255                len = BUFSZ;
 256
 257        /* Do not go off the map */
 258        if (addr + len > al.map->end)
 259                len = al.map->end - addr;
 260
 261        /* Read the object code using perf */
 262        ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine,
 263                                        al.addr, buf1, len);
 264        if (ret_len != len) {
 265                pr_debug("dso__data_read_offset failed\n");
 266                return -1;
 267        }
 268
 269        /*
 270         * Converting addresses for use by objdump requires more information.
 271         * map__load() does that.  See map__rip_2objdump() for details.
 272         */
 273        if (map__load(al.map))
 274                return -1;
 275
 276        /* objdump struggles with kcore - try each map only once */
 277        if (dso__is_kcore(al.map->dso)) {
 278                size_t d;
 279
 280                for (d = 0; d < state->done_cnt; d++) {
 281                        if (state->done[d] == al.map->start) {
 282                                pr_debug("kcore map tested already");
 283                                pr_debug(" - skipping\n");
 284                                return 0;
 285                        }
 286                }
 287                if (state->done_cnt >= ARRAY_SIZE(state->done)) {
 288                        pr_debug("Too many kcore maps - skipping\n");
 289                        return 0;
 290                }
 291                state->done[state->done_cnt++] = al.map->start;
 292        }
 293
 294        objdump_name = al.map->dso->long_name;
 295        if (dso__needs_decompress(al.map->dso)) {
 296                if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
 297                                                 decomp_name,
 298                                                 sizeof(decomp_name)) < 0) {
 299                        pr_debug("decompression failed\n");
 300                        return -1;
 301                }
 302
 303                objdump_name = decomp_name;
 304        }
 305
 306        /* Read the object code using objdump */
 307        objdump_addr = map__rip_2objdump(al.map, al.addr);
 308        ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
 309
 310        if (dso__needs_decompress(al.map->dso))
 311                unlink(objdump_name);
 312
 313        if (ret > 0) {
 314                /*
 315                 * The kernel maps are inaccurate - assume objdump is right in
 316                 * that case.
 317                 */
 318                if (cpumode == PERF_RECORD_MISC_KERNEL ||
 319                    cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
 320                        len -= ret;
 321                        if (len) {
 322                                pr_debug("Reducing len to %zu\n", len);
 323                        } else if (dso__is_kcore(al.map->dso)) {
 324                                /*
 325                                 * objdump cannot handle very large segments
 326                                 * that may be found in kcore.
 327                                 */
 328                                pr_debug("objdump failed for kcore");
 329                                pr_debug(" - skipping\n");
 330                                return 0;
 331                        } else {
 332                                return -1;
 333                        }
 334                }
 335        }
 336        if (ret < 0) {
 337                pr_debug("read_via_objdump failed\n");
 338                return -1;
 339        }
 340
 341        /* The results should be identical */
 342        if (memcmp(buf1, buf2, len)) {
 343                pr_debug("Bytes read differ from those read by objdump\n");
 344                pr_debug("buf1 (dso):\n");
 345                dump_buf(buf1, len);
 346                pr_debug("buf2 (objdump):\n");
 347                dump_buf(buf2, len);
 348                return -1;
 349        }
 350        pr_debug("Bytes read match those read by objdump\n");
 351
 352        return 0;
 353}
 354
 355static int process_sample_event(struct machine *machine,
 356                                struct perf_evlist *evlist,
 357                                union perf_event *event, struct state *state)
 358{
 359        struct perf_sample sample;
 360        struct thread *thread;
 361        int ret;
 362
 363        if (perf_evlist__parse_sample(evlist, event, &sample)) {
 364                pr_debug("perf_evlist__parse_sample failed\n");
 365                return -1;
 366        }
 367
 368        thread = machine__findnew_thread(machine, sample.pid, sample.tid);
 369        if (!thread) {
 370                pr_debug("machine__findnew_thread failed\n");
 371                return -1;
 372        }
 373
 374        ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state);
 375        thread__put(thread);
 376        return ret;
 377}
 378
 379static int process_event(struct machine *machine, struct perf_evlist *evlist,
 380                         union perf_event *event, struct state *state)
 381{
 382        if (event->header.type == PERF_RECORD_SAMPLE)
 383                return process_sample_event(machine, evlist, event, state);
 384
 385        if (event->header.type == PERF_RECORD_THROTTLE ||
 386            event->header.type == PERF_RECORD_UNTHROTTLE)
 387                return 0;
 388
 389        if (event->header.type < PERF_RECORD_MAX) {
 390                int ret;
 391
 392                ret = machine__process_event(machine, event, NULL);
 393                if (ret < 0)
 394                        pr_debug("machine__process_event failed, event type %u\n",
 395                                 event->header.type);
 396                return ret;
 397        }
 398
 399        return 0;
 400}
 401
 402static int process_events(struct machine *machine, struct perf_evlist *evlist,
 403                          struct state *state)
 404{
 405        union perf_event *event;
 406        int i, ret;
 407
 408        for (i = 0; i < evlist->nr_mmaps; i++) {
 409                while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
 410                        ret = process_event(machine, evlist, event, state);
 411                        perf_evlist__mmap_consume(evlist, i);
 412                        if (ret < 0)
 413                                return ret;
 414                }
 415        }
 416        return 0;
 417}
 418
 419static int comp(const void *a, const void *b)
 420{
 421        return *(int *)a - *(int *)b;
 422}
 423
 424static void do_sort_something(void)
 425{
 426        int buf[40960], i;
 427
 428        for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
 429                buf[i] = ARRAY_SIZE(buf) - i - 1;
 430
 431        qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
 432
 433        for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
 434                if (buf[i] != i) {
 435                        pr_debug("qsort failed\n");
 436                        break;
 437                }
 438        }
 439}
 440
 441static void sort_something(void)
 442{
 443        int i;
 444
 445        for (i = 0; i < 10; i++)
 446                do_sort_something();
 447}
 448
 449static void syscall_something(void)
 450{
 451        int pipefd[2];
 452        int i;
 453
 454        for (i = 0; i < 1000; i++) {
 455                if (pipe(pipefd) < 0) {
 456                        pr_debug("pipe failed\n");
 457                        break;
 458                }
 459                close(pipefd[1]);
 460                close(pipefd[0]);
 461        }
 462}
 463
 464static void fs_something(void)
 465{
 466        const char *test_file_name = "temp-perf-code-reading-test-file--";
 467        FILE *f;
 468        int i;
 469
 470        for (i = 0; i < 1000; i++) {
 471                f = fopen(test_file_name, "w+");
 472                if (f) {
 473                        fclose(f);
 474                        unlink(test_file_name);
 475                }
 476        }
 477}
 478
 479static void do_something(void)
 480{
 481        fs_something();
 482
 483        sort_something();
 484
 485        syscall_something();
 486}
 487
 488enum {
 489        TEST_CODE_READING_OK,
 490        TEST_CODE_READING_NO_VMLINUX,
 491        TEST_CODE_READING_NO_KCORE,
 492        TEST_CODE_READING_NO_ACCESS,
 493        TEST_CODE_READING_NO_KERNEL_OBJ,
 494};
 495
 496static int do_test_code_reading(bool try_kcore)
 497{
 498        struct machine *machine;
 499        struct thread *thread;
 500        struct record_opts opts = {
 501                .mmap_pages          = UINT_MAX,
 502                .user_freq           = UINT_MAX,
 503                .user_interval       = ULLONG_MAX,
 504                .freq                = 500,
 505                .target              = {
 506                        .uses_mmap   = true,
 507                },
 508        };
 509        struct state state = {
 510                .done_cnt = 0,
 511        };
 512        struct thread_map *threads = NULL;
 513        struct cpu_map *cpus = NULL;
 514        struct perf_evlist *evlist = NULL;
 515        struct perf_evsel *evsel = NULL;
 516        int err = -1, ret;
 517        pid_t pid;
 518        struct map *map;
 519        bool have_vmlinux, have_kcore, excl_kernel = false;
 520
 521        pid = getpid();
 522
 523        machine = machine__new_host();
 524
 525        ret = machine__create_kernel_maps(machine);
 526        if (ret < 0) {
 527                pr_debug("machine__create_kernel_maps failed\n");
 528                goto out_err;
 529        }
 530
 531        /* Force the use of kallsyms instead of vmlinux to try kcore */
 532        if (try_kcore)
 533                symbol_conf.kallsyms_name = "/proc/kallsyms";
 534
 535        /* Load kernel map */
 536        map = machine__kernel_map(machine);
 537        ret = map__load(map);
 538        if (ret < 0) {
 539                pr_debug("map__load failed\n");
 540                goto out_err;
 541        }
 542        have_vmlinux = dso__is_vmlinux(map->dso);
 543        have_kcore = dso__is_kcore(map->dso);
 544
 545        /* 2nd time through we just try kcore */
 546        if (try_kcore && !have_kcore)
 547                return TEST_CODE_READING_NO_KCORE;
 548
 549        /* No point getting kernel events if there is no kernel object */
 550        if (!have_vmlinux && !have_kcore)
 551                excl_kernel = true;
 552
 553        threads = thread_map__new_by_tid(pid);
 554        if (!threads) {
 555                pr_debug("thread_map__new_by_tid failed\n");
 556                goto out_err;
 557        }
 558
 559        ret = perf_event__synthesize_thread_map(NULL, threads,
 560                                                perf_event__process, machine, false, 500);
 561        if (ret < 0) {
 562                pr_debug("perf_event__synthesize_thread_map failed\n");
 563                goto out_err;
 564        }
 565
 566        thread = machine__findnew_thread(machine, pid, pid);
 567        if (!thread) {
 568                pr_debug("machine__findnew_thread failed\n");
 569                goto out_put;
 570        }
 571
 572        cpus = cpu_map__new(NULL);
 573        if (!cpus) {
 574                pr_debug("cpu_map__new failed\n");
 575                goto out_put;
 576        }
 577
 578        while (1) {
 579                const char *str;
 580
 581                evlist = perf_evlist__new();
 582                if (!evlist) {
 583                        pr_debug("perf_evlist__new failed\n");
 584                        goto out_put;
 585                }
 586
 587                perf_evlist__set_maps(evlist, cpus, threads);
 588
 589                if (excl_kernel)
 590                        str = "cycles:u";
 591                else
 592                        str = "cycles";
 593                pr_debug("Parsing event '%s'\n", str);
 594                ret = parse_events(evlist, str, NULL);
 595                if (ret < 0) {
 596                        pr_debug("parse_events failed\n");
 597                        goto out_put;
 598                }
 599
 600                perf_evlist__config(evlist, &opts, NULL);
 601
 602                evsel = perf_evlist__first(evlist);
 603
 604                evsel->attr.comm = 1;
 605                evsel->attr.disabled = 1;
 606                evsel->attr.enable_on_exec = 0;
 607
 608                ret = perf_evlist__open(evlist);
 609                if (ret < 0) {
 610                        if (!excl_kernel) {
 611                                excl_kernel = true;
 612                                /*
 613                                 * Both cpus and threads are now owned by evlist
 614                                 * and will be freed by following perf_evlist__set_maps
 615                                 * call. Getting refference to keep them alive.
 616                                 */
 617                                cpu_map__get(cpus);
 618                                thread_map__get(threads);
 619                                perf_evlist__set_maps(evlist, NULL, NULL);
 620                                perf_evlist__delete(evlist);
 621                                evlist = NULL;
 622                                continue;
 623                        }
 624
 625                        if (verbose > 0) {
 626                                char errbuf[512];
 627                                perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
 628                                pr_debug("perf_evlist__open() failed!\n%s\n", errbuf);
 629                        }
 630
 631                        goto out_put;
 632                }
 633                break;
 634        }
 635
 636        ret = perf_evlist__mmap(evlist, UINT_MAX, false);
 637        if (ret < 0) {
 638                pr_debug("perf_evlist__mmap failed\n");
 639                goto out_put;
 640        }
 641
 642        perf_evlist__enable(evlist);
 643
 644        do_something();
 645
 646        perf_evlist__disable(evlist);
 647
 648        ret = process_events(machine, evlist, &state);
 649        if (ret < 0)
 650                goto out_put;
 651
 652        if (!have_vmlinux && !have_kcore && !try_kcore)
 653                err = TEST_CODE_READING_NO_KERNEL_OBJ;
 654        else if (!have_vmlinux && !try_kcore)
 655                err = TEST_CODE_READING_NO_VMLINUX;
 656        else if (excl_kernel)
 657                err = TEST_CODE_READING_NO_ACCESS;
 658        else
 659                err = TEST_CODE_READING_OK;
 660out_put:
 661        thread__put(thread);
 662out_err:
 663
 664        if (evlist) {
 665                perf_evlist__delete(evlist);
 666        } else {
 667                cpu_map__put(cpus);
 668                thread_map__put(threads);
 669        }
 670        machine__delete_threads(machine);
 671        machine__delete(machine);
 672
 673        return err;
 674}
 675
 676int test__code_reading(int subtest __maybe_unused)
 677{
 678        int ret;
 679
 680        ret = do_test_code_reading(false);
 681        if (!ret)
 682                ret = do_test_code_reading(true);
 683
 684        switch (ret) {
 685        case TEST_CODE_READING_OK:
 686                return 0;
 687        case TEST_CODE_READING_NO_VMLINUX:
 688                pr_debug("no vmlinux\n");
 689                return 0;
 690        case TEST_CODE_READING_NO_KCORE:
 691                pr_debug("no kcore\n");
 692                return 0;
 693        case TEST_CODE_READING_NO_ACCESS:
 694                pr_debug("no access\n");
 695                return 0;
 696        case TEST_CODE_READING_NO_KERNEL_OBJ:
 697                pr_debug("no kernel obj\n");
 698                return 0;
 699        default:
 700                return -1;
 701        };
 702}
 703