linux/tools/perf/util/symbol.c
<<
>>
Prefs
   1#include <dirent.h>
   2#include <errno.h>
   3#include <stdlib.h>
   4#include <stdio.h>
   5#include <string.h>
   6#include <sys/types.h>
   7#include <sys/stat.h>
   8#include <sys/param.h>
   9#include <fcntl.h>
  10#include <unistd.h>
  11#include <inttypes.h>
  12#include "annotate.h"
  13#include "build-id.h"
  14#include "util.h"
  15#include "debug.h"
  16#include "machine.h"
  17#include "symbol.h"
  18#include "strlist.h"
  19#include "intlist.h"
  20#include "header.h"
  21
  22#include <elf.h>
  23#include <limits.h>
  24#include <symbol/kallsyms.h>
  25#include <sys/utsname.h>
  26
  27static int dso__load_kernel_sym(struct dso *dso, struct map *map);
  28static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
  29static bool symbol__is_idle(const char *name);
  30
  31int vmlinux_path__nr_entries;
  32char **vmlinux_path;
  33
  34struct symbol_conf symbol_conf = {
  35        .use_modules            = true,
  36        .try_vmlinux_path       = true,
  37        .annotate_src           = true,
  38        .demangle               = true,
  39        .demangle_kernel        = false,
  40        .cumulate_callchain     = true,
  41        .show_hist_headers      = true,
  42        .symfs                  = "",
  43        .event_group            = true,
  44};
  45
  46static enum dso_binary_type binary_type_symtab[] = {
  47        DSO_BINARY_TYPE__KALLSYMS,
  48        DSO_BINARY_TYPE__GUEST_KALLSYMS,
  49        DSO_BINARY_TYPE__JAVA_JIT,
  50        DSO_BINARY_TYPE__DEBUGLINK,
  51        DSO_BINARY_TYPE__BUILD_ID_CACHE,
  52        DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
  53        DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
  54        DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
  55        DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  56        DSO_BINARY_TYPE__GUEST_KMODULE,
  57        DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
  58        DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
  59        DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
  60        DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
  61        DSO_BINARY_TYPE__NOT_FOUND,
  62};
  63
  64#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
  65
  66bool symbol_type__is_a(char symbol_type, enum map_type map_type)
  67{
  68        symbol_type = toupper(symbol_type);
  69
  70        switch (map_type) {
  71        case MAP__FUNCTION:
  72                return symbol_type == 'T' || symbol_type == 'W';
  73        case MAP__VARIABLE:
  74                return symbol_type == 'D';
  75        default:
  76                return false;
  77        }
  78}
  79
  80static int prefix_underscores_count(const char *str)
  81{
  82        const char *tail = str;
  83
  84        while (*tail == '_')
  85                tail++;
  86
  87        return tail - str;
  88}
  89
  90int __weak arch__choose_best_symbol(struct symbol *syma,
  91                                    struct symbol *symb __maybe_unused)
  92{
  93        /* Avoid "SyS" kernel syscall aliases */
  94        if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
  95                return SYMBOL_B;
  96        if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
  97                return SYMBOL_B;
  98
  99        return SYMBOL_A;
 100}
 101
 102static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
 103{
 104        s64 a;
 105        s64 b;
 106        size_t na, nb;
 107
 108        /* Prefer a symbol with non zero length */
 109        a = syma->end - syma->start;
 110        b = symb->end - symb->start;
 111        if ((b == 0) && (a > 0))
 112                return SYMBOL_A;
 113        else if ((a == 0) && (b > 0))
 114                return SYMBOL_B;
 115
 116        /* Prefer a non weak symbol over a weak one */
 117        a = syma->binding == STB_WEAK;
 118        b = symb->binding == STB_WEAK;
 119        if (b && !a)
 120                return SYMBOL_A;
 121        if (a && !b)
 122                return SYMBOL_B;
 123
 124        /* Prefer a global symbol over a non global one */
 125        a = syma->binding == STB_GLOBAL;
 126        b = symb->binding == STB_GLOBAL;
 127        if (a && !b)
 128                return SYMBOL_A;
 129        if (b && !a)
 130                return SYMBOL_B;
 131
 132        /* Prefer a symbol with less underscores */
 133        a = prefix_underscores_count(syma->name);
 134        b = prefix_underscores_count(symb->name);
 135        if (b > a)
 136                return SYMBOL_A;
 137        else if (a > b)
 138                return SYMBOL_B;
 139
 140        /* Choose the symbol with the longest name */
 141        na = strlen(syma->name);
 142        nb = strlen(symb->name);
 143        if (na > nb)
 144                return SYMBOL_A;
 145        else if (na < nb)
 146                return SYMBOL_B;
 147
 148        return arch__choose_best_symbol(syma, symb);
 149}
 150
 151void symbols__fixup_duplicate(struct rb_root *symbols)
 152{
 153        struct rb_node *nd;
 154        struct symbol *curr, *next;
 155
 156        if (symbol_conf.allow_aliases)
 157                return;
 158
 159        nd = rb_first(symbols);
 160
 161        while (nd) {
 162                curr = rb_entry(nd, struct symbol, rb_node);
 163again:
 164                nd = rb_next(&curr->rb_node);
 165                next = rb_entry(nd, struct symbol, rb_node);
 166
 167                if (!nd)
 168                        break;
 169
 170                if (curr->start != next->start)
 171                        continue;
 172
 173                if (choose_best_symbol(curr, next) == SYMBOL_A) {
 174                        rb_erase(&next->rb_node, symbols);
 175                        symbol__delete(next);
 176                        goto again;
 177                } else {
 178                        nd = rb_next(&curr->rb_node);
 179                        rb_erase(&curr->rb_node, symbols);
 180                        symbol__delete(curr);
 181                }
 182        }
 183}
 184
 185void symbols__fixup_end(struct rb_root *symbols)
 186{
 187        struct rb_node *nd, *prevnd = rb_first(symbols);
 188        struct symbol *curr, *prev;
 189
 190        if (prevnd == NULL)
 191                return;
 192
 193        curr = rb_entry(prevnd, struct symbol, rb_node);
 194
 195        for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
 196                prev = curr;
 197                curr = rb_entry(nd, struct symbol, rb_node);
 198
 199                if (prev->end == prev->start && prev->end != curr->start)
 200                        prev->end = curr->start;
 201        }
 202
 203        /* Last entry */
 204        if (curr->end == curr->start)
 205                curr->end = roundup(curr->start, 4096);
 206}
 207
 208void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
 209{
 210        struct maps *maps = &mg->maps[type];
 211        struct map *next, *curr;
 212
 213        pthread_rwlock_wrlock(&maps->lock);
 214
 215        curr = maps__first(maps);
 216        if (curr == NULL)
 217                goto out_unlock;
 218
 219        for (next = map__next(curr); next; next = map__next(curr)) {
 220                curr->end = next->start;
 221                curr = next;
 222        }
 223
 224        /*
 225         * We still haven't the actual symbols, so guess the
 226         * last map final address.
 227         */
 228        curr->end = ~0ULL;
 229
 230out_unlock:
 231        pthread_rwlock_unlock(&maps->lock);
 232}
 233
 234struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
 235{
 236        size_t namelen = strlen(name) + 1;
 237        struct symbol *sym = calloc(1, (symbol_conf.priv_size +
 238                                        sizeof(*sym) + namelen));
 239        if (sym == NULL)
 240                return NULL;
 241
 242        if (symbol_conf.priv_size) {
 243                if (symbol_conf.init_annotation) {
 244                        struct annotation *notes = (void *)sym;
 245                        pthread_mutex_init(&notes->lock, NULL);
 246                }
 247                sym = ((void *)sym) + symbol_conf.priv_size;
 248        }
 249
 250        sym->start   = start;
 251        sym->end     = len ? start + len : start;
 252        sym->binding = binding;
 253        sym->namelen = namelen - 1;
 254
 255        pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
 256                  __func__, name, start, sym->end);
 257        memcpy(sym->name, name, namelen);
 258
 259        return sym;
 260}
 261
 262void symbol__delete(struct symbol *sym)
 263{
 264        free(((void *)sym) - symbol_conf.priv_size);
 265}
 266
 267void symbols__delete(struct rb_root *symbols)
 268{
 269        struct symbol *pos;
 270        struct rb_node *next = rb_first(symbols);
 271
 272        while (next) {
 273                pos = rb_entry(next, struct symbol, rb_node);
 274                next = rb_next(&pos->rb_node);
 275                rb_erase(&pos->rb_node, symbols);
 276                symbol__delete(pos);
 277        }
 278}
 279
 280void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
 281{
 282        struct rb_node **p = &symbols->rb_node;
 283        struct rb_node *parent = NULL;
 284        const u64 ip = sym->start;
 285        struct symbol *s;
 286
 287        if (kernel) {
 288                const char *name = sym->name;
 289                /*
 290                 * ppc64 uses function descriptors and appends a '.' to the
 291                 * start of every instruction address. Remove it.
 292                 */
 293                if (name[0] == '.')
 294                        name++;
 295                sym->idle = symbol__is_idle(name);
 296        }
 297
 298        while (*p != NULL) {
 299                parent = *p;
 300                s = rb_entry(parent, struct symbol, rb_node);
 301                if (ip < s->start)
 302                        p = &(*p)->rb_left;
 303                else
 304                        p = &(*p)->rb_right;
 305        }
 306        rb_link_node(&sym->rb_node, parent, p);
 307        rb_insert_color(&sym->rb_node, symbols);
 308}
 309
 310void symbols__insert(struct rb_root *symbols, struct symbol *sym)
 311{
 312        __symbols__insert(symbols, sym, false);
 313}
 314
 315static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
 316{
 317        struct rb_node *n;
 318
 319        if (symbols == NULL)
 320                return NULL;
 321
 322        n = symbols->rb_node;
 323
 324        while (n) {
 325                struct symbol *s = rb_entry(n, struct symbol, rb_node);
 326
 327                if (ip < s->start)
 328                        n = n->rb_left;
 329                else if (ip > s->end || (ip == s->end && ip != s->start))
 330                        n = n->rb_right;
 331                else
 332                        return s;
 333        }
 334
 335        return NULL;
 336}
 337
 338static struct symbol *symbols__first(struct rb_root *symbols)
 339{
 340        struct rb_node *n = rb_first(symbols);
 341
 342        if (n)
 343                return rb_entry(n, struct symbol, rb_node);
 344
 345        return NULL;
 346}
 347
 348static struct symbol *symbols__last(struct rb_root *symbols)
 349{
 350        struct rb_node *n = rb_last(symbols);
 351
 352        if (n)
 353                return rb_entry(n, struct symbol, rb_node);
 354
 355        return NULL;
 356}
 357
 358static struct symbol *symbols__next(struct symbol *sym)
 359{
 360        struct rb_node *n = rb_next(&sym->rb_node);
 361
 362        if (n)
 363                return rb_entry(n, struct symbol, rb_node);
 364
 365        return NULL;
 366}
 367
 368static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
 369{
 370        struct rb_node **p = &symbols->rb_node;
 371        struct rb_node *parent = NULL;
 372        struct symbol_name_rb_node *symn, *s;
 373
 374        symn = container_of(sym, struct symbol_name_rb_node, sym);
 375
 376        while (*p != NULL) {
 377                parent = *p;
 378                s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
 379                if (strcmp(sym->name, s->sym.name) < 0)
 380                        p = &(*p)->rb_left;
 381                else
 382                        p = &(*p)->rb_right;
 383        }
 384        rb_link_node(&symn->rb_node, parent, p);
 385        rb_insert_color(&symn->rb_node, symbols);
 386}
 387
 388static void symbols__sort_by_name(struct rb_root *symbols,
 389                                  struct rb_root *source)
 390{
 391        struct rb_node *nd;
 392
 393        for (nd = rb_first(source); nd; nd = rb_next(nd)) {
 394                struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
 395                symbols__insert_by_name(symbols, pos);
 396        }
 397}
 398
 399static struct symbol *symbols__find_by_name(struct rb_root *symbols,
 400                                            const char *name)
 401{
 402        struct rb_node *n;
 403        struct symbol_name_rb_node *s = NULL;
 404
 405        if (symbols == NULL)
 406                return NULL;
 407
 408        n = symbols->rb_node;
 409
 410        while (n) {
 411                int cmp;
 412
 413                s = rb_entry(n, struct symbol_name_rb_node, rb_node);
 414                cmp = arch__compare_symbol_names(name, s->sym.name);
 415
 416                if (cmp < 0)
 417                        n = n->rb_left;
 418                else if (cmp > 0)
 419                        n = n->rb_right;
 420                else
 421                        break;
 422        }
 423
 424        if (n == NULL)
 425                return NULL;
 426
 427        /* return first symbol that has same name (if any) */
 428        for (n = rb_prev(n); n; n = rb_prev(n)) {
 429                struct symbol_name_rb_node *tmp;
 430
 431                tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
 432                if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
 433                        break;
 434
 435                s = tmp;
 436        }
 437
 438        return &s->sym;
 439}
 440
 441void dso__reset_find_symbol_cache(struct dso *dso)
 442{
 443        enum map_type type;
 444
 445        for (type = MAP__FUNCTION; type <= MAP__VARIABLE; ++type) {
 446                dso->last_find_result[type].addr   = 0;
 447                dso->last_find_result[type].symbol = NULL;
 448        }
 449}
 450
 451void dso__insert_symbol(struct dso *dso, enum map_type type, struct symbol *sym)
 452{
 453        __symbols__insert(&dso->symbols[type], sym, dso->kernel);
 454
 455        /* update the symbol cache if necessary */
 456        if (dso->last_find_result[type].addr >= sym->start &&
 457            (dso->last_find_result[type].addr < sym->end ||
 458            sym->start == sym->end)) {
 459                dso->last_find_result[type].symbol = sym;
 460        }
 461}
 462
 463struct symbol *dso__find_symbol(struct dso *dso,
 464                                enum map_type type, u64 addr)
 465{
 466        if (dso->last_find_result[type].addr != addr || dso->last_find_result[type].symbol == NULL) {
 467                dso->last_find_result[type].addr   = addr;
 468                dso->last_find_result[type].symbol = symbols__find(&dso->symbols[type], addr);
 469        }
 470
 471        return dso->last_find_result[type].symbol;
 472}
 473
 474struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
 475{
 476        return symbols__first(&dso->symbols[type]);
 477}
 478
 479struct symbol *dso__last_symbol(struct dso *dso, enum map_type type)
 480{
 481        return symbols__last(&dso->symbols[type]);
 482}
 483
 484struct symbol *dso__next_symbol(struct symbol *sym)
 485{
 486        return symbols__next(sym);
 487}
 488
 489struct symbol *symbol__next_by_name(struct symbol *sym)
 490{
 491        struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
 492        struct rb_node *n = rb_next(&s->rb_node);
 493
 494        return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
 495}
 496
 497 /*
 498  * Teturns first symbol that matched with @name.
 499  */
 500struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
 501                                        const char *name)
 502{
 503        return symbols__find_by_name(&dso->symbol_names[type], name);
 504}
 505
 506void dso__sort_by_name(struct dso *dso, enum map_type type)
 507{
 508        dso__set_sorted_by_name(dso, type);
 509        return symbols__sort_by_name(&dso->symbol_names[type],
 510                                     &dso->symbols[type]);
 511}
 512
 513int modules__parse(const char *filename, void *arg,
 514                   int (*process_module)(void *arg, const char *name,
 515                                         u64 start))
 516{
 517        char *line = NULL;
 518        size_t n;
 519        FILE *file;
 520        int err = 0;
 521
 522        file = fopen(filename, "r");
 523        if (file == NULL)
 524                return -1;
 525
 526        while (1) {
 527                char name[PATH_MAX];
 528                u64 start;
 529                char *sep;
 530                ssize_t line_len;
 531
 532                line_len = getline(&line, &n, file);
 533                if (line_len < 0) {
 534                        if (feof(file))
 535                                break;
 536                        err = -1;
 537                        goto out;
 538                }
 539
 540                if (!line) {
 541                        err = -1;
 542                        goto out;
 543                }
 544
 545                line[--line_len] = '\0'; /* \n */
 546
 547                sep = strrchr(line, 'x');
 548                if (sep == NULL)
 549                        continue;
 550
 551                hex2u64(sep + 1, &start);
 552
 553                sep = strchr(line, ' ');
 554                if (sep == NULL)
 555                        continue;
 556
 557                *sep = '\0';
 558
 559                scnprintf(name, sizeof(name), "[%s]", line);
 560
 561                err = process_module(arg, name, start);
 562                if (err)
 563                        break;
 564        }
 565out:
 566        free(line);
 567        fclose(file);
 568        return err;
 569}
 570
 571struct process_kallsyms_args {
 572        struct map *map;
 573        struct dso *dso;
 574};
 575
 576/*
 577 * These are symbols in the kernel image, so make sure that
 578 * sym is from a kernel DSO.
 579 */
 580static bool symbol__is_idle(const char *name)
 581{
 582        const char * const idle_symbols[] = {
 583                "cpu_idle",
 584                "cpu_startup_entry",
 585                "intel_idle",
 586                "default_idle",
 587                "native_safe_halt",
 588                "enter_idle",
 589                "exit_idle",
 590                "mwait_idle",
 591                "mwait_idle_with_hints",
 592                "poll_idle",
 593                "ppc64_runlatch_off",
 594                "pseries_dedicated_idle_sleep",
 595                NULL
 596        };
 597        int i;
 598
 599        for (i = 0; idle_symbols[i]; i++) {
 600                if (!strcmp(idle_symbols[i], name))
 601                        return true;
 602        }
 603
 604        return false;
 605}
 606
 607static int map__process_kallsym_symbol(void *arg, const char *name,
 608                                       char type, u64 start)
 609{
 610        struct symbol *sym;
 611        struct process_kallsyms_args *a = arg;
 612        struct rb_root *root = &a->dso->symbols[a->map->type];
 613
 614        if (!symbol_type__is_a(type, a->map->type))
 615                return 0;
 616
 617        /*
 618         * module symbols are not sorted so we add all
 619         * symbols, setting length to 0, and rely on
 620         * symbols__fixup_end() to fix it up.
 621         */
 622        sym = symbol__new(start, 0, kallsyms2elf_binding(type), name);
 623        if (sym == NULL)
 624                return -ENOMEM;
 625        /*
 626         * We will pass the symbols to the filter later, in
 627         * map__split_kallsyms, when we have split the maps per module
 628         */
 629        __symbols__insert(root, sym, !strchr(name, '['));
 630
 631        return 0;
 632}
 633
 634/*
 635 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
 636 * so that we can in the next step set the symbol ->end address and then
 637 * call kernel_maps__split_kallsyms.
 638 */
 639static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
 640                                  struct map *map)
 641{
 642        struct process_kallsyms_args args = { .map = map, .dso = dso, };
 643        return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
 644}
 645
 646static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map)
 647{
 648        struct map_groups *kmaps = map__kmaps(map);
 649        struct map *curr_map;
 650        struct symbol *pos;
 651        int count = 0;
 652        struct rb_root old_root = dso->symbols[map->type];
 653        struct rb_root *root = &dso->symbols[map->type];
 654        struct rb_node *next = rb_first(root);
 655
 656        if (!kmaps)
 657                return -1;
 658
 659        *root = RB_ROOT;
 660
 661        while (next) {
 662                char *module;
 663
 664                pos = rb_entry(next, struct symbol, rb_node);
 665                next = rb_next(&pos->rb_node);
 666
 667                rb_erase_init(&pos->rb_node, &old_root);
 668
 669                module = strchr(pos->name, '\t');
 670                if (module)
 671                        *module = '\0';
 672
 673                curr_map = map_groups__find(kmaps, map->type, pos->start);
 674
 675                if (!curr_map) {
 676                        symbol__delete(pos);
 677                        continue;
 678                }
 679
 680                pos->start -= curr_map->start - curr_map->pgoff;
 681                if (pos->end)
 682                        pos->end -= curr_map->start - curr_map->pgoff;
 683                symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
 684                ++count;
 685        }
 686
 687        /* Symbols have been adjusted */
 688        dso->adjust_symbols = 1;
 689
 690        return count;
 691}
 692
 693/*
 694 * Split the symbols into maps, making sure there are no overlaps, i.e. the
 695 * kernel range is broken in several maps, named [kernel].N, as we don't have
 696 * the original ELF section names vmlinux have.
 697 */
 698static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta)
 699{
 700        struct map_groups *kmaps = map__kmaps(map);
 701        struct machine *machine;
 702        struct map *curr_map = map;
 703        struct symbol *pos;
 704        int count = 0, moved = 0;
 705        struct rb_root *root = &dso->symbols[map->type];
 706        struct rb_node *next = rb_first(root);
 707        int kernel_range = 0;
 708
 709        if (!kmaps)
 710                return -1;
 711
 712        machine = kmaps->machine;
 713
 714        while (next) {
 715                char *module;
 716
 717                pos = rb_entry(next, struct symbol, rb_node);
 718                next = rb_next(&pos->rb_node);
 719
 720                module = strchr(pos->name, '\t');
 721                if (module) {
 722                        if (!symbol_conf.use_modules)
 723                                goto discard_symbol;
 724
 725                        *module++ = '\0';
 726
 727                        if (strcmp(curr_map->dso->short_name, module)) {
 728                                if (curr_map != map &&
 729                                    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
 730                                    machine__is_default_guest(machine)) {
 731                                        /*
 732                                         * We assume all symbols of a module are
 733                                         * continuous in * kallsyms, so curr_map
 734                                         * points to a module and all its
 735                                         * symbols are in its kmap. Mark it as
 736                                         * loaded.
 737                                         */
 738                                        dso__set_loaded(curr_map->dso,
 739                                                        curr_map->type);
 740                                }
 741
 742                                curr_map = map_groups__find_by_name(kmaps,
 743                                                        map->type, module);
 744                                if (curr_map == NULL) {
 745                                        pr_debug("%s/proc/{kallsyms,modules} "
 746                                                 "inconsistency while looking "
 747                                                 "for \"%s\" module!\n",
 748                                                 machine->root_dir, module);
 749                                        curr_map = map;
 750                                        goto discard_symbol;
 751                                }
 752
 753                                if (curr_map->dso->loaded &&
 754                                    !machine__is_default_guest(machine))
 755                                        goto discard_symbol;
 756                        }
 757                        /*
 758                         * So that we look just like we get from .ko files,
 759                         * i.e. not prelinked, relative to map->start.
 760                         */
 761                        pos->start = curr_map->map_ip(curr_map, pos->start);
 762                        pos->end   = curr_map->map_ip(curr_map, pos->end);
 763                } else if (curr_map != map) {
 764                        char dso_name[PATH_MAX];
 765                        struct dso *ndso;
 766
 767                        if (delta) {
 768                                /* Kernel was relocated at boot time */
 769                                pos->start -= delta;
 770                                pos->end -= delta;
 771                        }
 772
 773                        if (count == 0) {
 774                                curr_map = map;
 775                                goto add_symbol;
 776                        }
 777
 778                        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
 779                                snprintf(dso_name, sizeof(dso_name),
 780                                        "[guest.kernel].%d",
 781                                        kernel_range++);
 782                        else
 783                                snprintf(dso_name, sizeof(dso_name),
 784                                        "[kernel].%d",
 785                                        kernel_range++);
 786
 787                        ndso = dso__new(dso_name);
 788                        if (ndso == NULL)
 789                                return -1;
 790
 791                        ndso->kernel = dso->kernel;
 792
 793                        curr_map = map__new2(pos->start, ndso, map->type);
 794                        if (curr_map == NULL) {
 795                                dso__put(ndso);
 796                                return -1;
 797                        }
 798
 799                        curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
 800                        map_groups__insert(kmaps, curr_map);
 801                        ++kernel_range;
 802                } else if (delta) {
 803                        /* Kernel was relocated at boot time */
 804                        pos->start -= delta;
 805                        pos->end -= delta;
 806                }
 807add_symbol:
 808                if (curr_map != map) {
 809                        rb_erase(&pos->rb_node, root);
 810                        symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
 811                        ++moved;
 812                } else
 813                        ++count;
 814
 815                continue;
 816discard_symbol:
 817                rb_erase(&pos->rb_node, root);
 818                symbol__delete(pos);
 819        }
 820
 821        if (curr_map != map &&
 822            dso->kernel == DSO_TYPE_GUEST_KERNEL &&
 823            machine__is_default_guest(kmaps->machine)) {
 824                dso__set_loaded(curr_map->dso, curr_map->type);
 825        }
 826
 827        return count + moved;
 828}
 829
 830bool symbol__restricted_filename(const char *filename,
 831                                 const char *restricted_filename)
 832{
 833        bool restricted = false;
 834
 835        if (symbol_conf.kptr_restrict) {
 836                char *r = realpath(filename, NULL);
 837
 838                if (r != NULL) {
 839                        restricted = strcmp(r, restricted_filename) == 0;
 840                        free(r);
 841                        return restricted;
 842                }
 843        }
 844
 845        return restricted;
 846}
 847
 848struct module_info {
 849        struct rb_node rb_node;
 850        char *name;
 851        u64 start;
 852};
 853
 854static void add_module(struct module_info *mi, struct rb_root *modules)
 855{
 856        struct rb_node **p = &modules->rb_node;
 857        struct rb_node *parent = NULL;
 858        struct module_info *m;
 859
 860        while (*p != NULL) {
 861                parent = *p;
 862                m = rb_entry(parent, struct module_info, rb_node);
 863                if (strcmp(mi->name, m->name) < 0)
 864                        p = &(*p)->rb_left;
 865                else
 866                        p = &(*p)->rb_right;
 867        }
 868        rb_link_node(&mi->rb_node, parent, p);
 869        rb_insert_color(&mi->rb_node, modules);
 870}
 871
 872static void delete_modules(struct rb_root *modules)
 873{
 874        struct module_info *mi;
 875        struct rb_node *next = rb_first(modules);
 876
 877        while (next) {
 878                mi = rb_entry(next, struct module_info, rb_node);
 879                next = rb_next(&mi->rb_node);
 880                rb_erase(&mi->rb_node, modules);
 881                zfree(&mi->name);
 882                free(mi);
 883        }
 884}
 885
 886static struct module_info *find_module(const char *name,
 887                                       struct rb_root *modules)
 888{
 889        struct rb_node *n = modules->rb_node;
 890
 891        while (n) {
 892                struct module_info *m;
 893                int cmp;
 894
 895                m = rb_entry(n, struct module_info, rb_node);
 896                cmp = strcmp(name, m->name);
 897                if (cmp < 0)
 898                        n = n->rb_left;
 899                else if (cmp > 0)
 900                        n = n->rb_right;
 901                else
 902                        return m;
 903        }
 904
 905        return NULL;
 906}
 907
 908static int __read_proc_modules(void *arg, const char *name, u64 start)
 909{
 910        struct rb_root *modules = arg;
 911        struct module_info *mi;
 912
 913        mi = zalloc(sizeof(struct module_info));
 914        if (!mi)
 915                return -ENOMEM;
 916
 917        mi->name = strdup(name);
 918        mi->start = start;
 919
 920        if (!mi->name) {
 921                free(mi);
 922                return -ENOMEM;
 923        }
 924
 925        add_module(mi, modules);
 926
 927        return 0;
 928}
 929
 930static int read_proc_modules(const char *filename, struct rb_root *modules)
 931{
 932        if (symbol__restricted_filename(filename, "/proc/modules"))
 933                return -1;
 934
 935        if (modules__parse(filename, modules, __read_proc_modules)) {
 936                delete_modules(modules);
 937                return -1;
 938        }
 939
 940        return 0;
 941}
 942
 943int compare_proc_modules(const char *from, const char *to)
 944{
 945        struct rb_root from_modules = RB_ROOT;
 946        struct rb_root to_modules = RB_ROOT;
 947        struct rb_node *from_node, *to_node;
 948        struct module_info *from_m, *to_m;
 949        int ret = -1;
 950
 951        if (read_proc_modules(from, &from_modules))
 952                return -1;
 953
 954        if (read_proc_modules(to, &to_modules))
 955                goto out_delete_from;
 956
 957        from_node = rb_first(&from_modules);
 958        to_node = rb_first(&to_modules);
 959        while (from_node) {
 960                if (!to_node)
 961                        break;
 962
 963                from_m = rb_entry(from_node, struct module_info, rb_node);
 964                to_m = rb_entry(to_node, struct module_info, rb_node);
 965
 966                if (from_m->start != to_m->start ||
 967                    strcmp(from_m->name, to_m->name))
 968                        break;
 969
 970                from_node = rb_next(from_node);
 971                to_node = rb_next(to_node);
 972        }
 973
 974        if (!from_node && !to_node)
 975                ret = 0;
 976
 977        delete_modules(&to_modules);
 978out_delete_from:
 979        delete_modules(&from_modules);
 980
 981        return ret;
 982}
 983
 984static int do_validate_kcore_modules(const char *filename, struct map *map,
 985                                  struct map_groups *kmaps)
 986{
 987        struct rb_root modules = RB_ROOT;
 988        struct map *old_map;
 989        int err;
 990
 991        err = read_proc_modules(filename, &modules);
 992        if (err)
 993                return err;
 994
 995        old_map = map_groups__first(kmaps, map->type);
 996        while (old_map) {
 997                struct map *next = map_groups__next(old_map);
 998                struct module_info *mi;
 999
1000                if (old_map == map || old_map->start == map->start) {
1001                        /* The kernel map */
1002                        old_map = next;
1003                        continue;
1004                }
1005
1006                /* Module must be in memory at the same address */
1007                mi = find_module(old_map->dso->short_name, &modules);
1008                if (!mi || mi->start != old_map->start) {
1009                        err = -EINVAL;
1010                        goto out;
1011                }
1012
1013                old_map = next;
1014        }
1015out:
1016        delete_modules(&modules);
1017        return err;
1018}
1019
1020/*
1021 * If kallsyms is referenced by name then we look for filename in the same
1022 * directory.
1023 */
1024static bool filename_from_kallsyms_filename(char *filename,
1025                                            const char *base_name,
1026                                            const char *kallsyms_filename)
1027{
1028        char *name;
1029
1030        strcpy(filename, kallsyms_filename);
1031        name = strrchr(filename, '/');
1032        if (!name)
1033                return false;
1034
1035        name += 1;
1036
1037        if (!strcmp(name, "kallsyms")) {
1038                strcpy(name, base_name);
1039                return true;
1040        }
1041
1042        return false;
1043}
1044
1045static int validate_kcore_modules(const char *kallsyms_filename,
1046                                  struct map *map)
1047{
1048        struct map_groups *kmaps = map__kmaps(map);
1049        char modules_filename[PATH_MAX];
1050
1051        if (!kmaps)
1052                return -EINVAL;
1053
1054        if (!filename_from_kallsyms_filename(modules_filename, "modules",
1055                                             kallsyms_filename))
1056                return -EINVAL;
1057
1058        if (do_validate_kcore_modules(modules_filename, map, kmaps))
1059                return -EINVAL;
1060
1061        return 0;
1062}
1063
1064static int validate_kcore_addresses(const char *kallsyms_filename,
1065                                    struct map *map)
1066{
1067        struct kmap *kmap = map__kmap(map);
1068
1069        if (!kmap)
1070                return -EINVAL;
1071
1072        if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1073                u64 start;
1074
1075                if (kallsyms__get_function_start(kallsyms_filename,
1076                                                 kmap->ref_reloc_sym->name, &start))
1077                        return -ENOENT;
1078                if (start != kmap->ref_reloc_sym->addr)
1079                        return -EINVAL;
1080        }
1081
1082        return validate_kcore_modules(kallsyms_filename, map);
1083}
1084
1085struct kcore_mapfn_data {
1086        struct dso *dso;
1087        enum map_type type;
1088        struct list_head maps;
1089};
1090
1091static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1092{
1093        struct kcore_mapfn_data *md = data;
1094        struct map *map;
1095
1096        map = map__new2(start, md->dso, md->type);
1097        if (map == NULL)
1098                return -ENOMEM;
1099
1100        map->end = map->start + len;
1101        map->pgoff = pgoff;
1102
1103        list_add(&map->node, &md->maps);
1104
1105        return 0;
1106}
1107
1108static int dso__load_kcore(struct dso *dso, struct map *map,
1109                           const char *kallsyms_filename)
1110{
1111        struct map_groups *kmaps = map__kmaps(map);
1112        struct machine *machine;
1113        struct kcore_mapfn_data md;
1114        struct map *old_map, *new_map, *replacement_map = NULL;
1115        bool is_64_bit;
1116        int err, fd;
1117        char kcore_filename[PATH_MAX];
1118        struct symbol *sym;
1119
1120        if (!kmaps)
1121                return -EINVAL;
1122
1123        machine = kmaps->machine;
1124
1125        /* This function requires that the map is the kernel map */
1126        if (map != machine->vmlinux_maps[map->type])
1127                return -EINVAL;
1128
1129        if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1130                                             kallsyms_filename))
1131                return -EINVAL;
1132
1133        /* Modules and kernel must be present at their original addresses */
1134        if (validate_kcore_addresses(kallsyms_filename, map))
1135                return -EINVAL;
1136
1137        md.dso = dso;
1138        md.type = map->type;
1139        INIT_LIST_HEAD(&md.maps);
1140
1141        fd = open(kcore_filename, O_RDONLY);
1142        if (fd < 0) {
1143                pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1144                         kcore_filename);
1145                return -EINVAL;
1146        }
1147
1148        /* Read new maps into temporary lists */
1149        err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
1150                              &is_64_bit);
1151        if (err)
1152                goto out_err;
1153        dso->is_64_bit = is_64_bit;
1154
1155        if (list_empty(&md.maps)) {
1156                err = -EINVAL;
1157                goto out_err;
1158        }
1159
1160        /* Remove old maps */
1161        old_map = map_groups__first(kmaps, map->type);
1162        while (old_map) {
1163                struct map *next = map_groups__next(old_map);
1164
1165                if (old_map != map)
1166                        map_groups__remove(kmaps, old_map);
1167                old_map = next;
1168        }
1169
1170        /* Find the kernel map using the first symbol */
1171        sym = dso__first_symbol(dso, map->type);
1172        list_for_each_entry(new_map, &md.maps, node) {
1173                if (sym && sym->start >= new_map->start &&
1174                    sym->start < new_map->end) {
1175                        replacement_map = new_map;
1176                        break;
1177                }
1178        }
1179
1180        if (!replacement_map)
1181                replacement_map = list_entry(md.maps.next, struct map, node);
1182
1183        /* Add new maps */
1184        while (!list_empty(&md.maps)) {
1185                new_map = list_entry(md.maps.next, struct map, node);
1186                list_del_init(&new_map->node);
1187                if (new_map == replacement_map) {
1188                        map->start      = new_map->start;
1189                        map->end        = new_map->end;
1190                        map->pgoff      = new_map->pgoff;
1191                        map->map_ip     = new_map->map_ip;
1192                        map->unmap_ip   = new_map->unmap_ip;
1193                        /* Ensure maps are correctly ordered */
1194                        map__get(map);
1195                        map_groups__remove(kmaps, map);
1196                        map_groups__insert(kmaps, map);
1197                        map__put(map);
1198                } else {
1199                        map_groups__insert(kmaps, new_map);
1200                }
1201
1202                map__put(new_map);
1203        }
1204
1205        /*
1206         * Set the data type and long name so that kcore can be read via
1207         * dso__data_read_addr().
1208         */
1209        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1210                dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1211        else
1212                dso->binary_type = DSO_BINARY_TYPE__KCORE;
1213        dso__set_long_name(dso, strdup(kcore_filename), true);
1214
1215        close(fd);
1216
1217        if (map->type == MAP__FUNCTION)
1218                pr_debug("Using %s for kernel object code\n", kcore_filename);
1219        else
1220                pr_debug("Using %s for kernel data\n", kcore_filename);
1221
1222        return 0;
1223
1224out_err:
1225        while (!list_empty(&md.maps)) {
1226                map = list_entry(md.maps.next, struct map, node);
1227                list_del_init(&map->node);
1228                map__put(map);
1229        }
1230        close(fd);
1231        return -EINVAL;
1232}
1233
1234/*
1235 * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1236 * delta based on the relocation reference symbol.
1237 */
1238static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
1239{
1240        struct kmap *kmap = map__kmap(map);
1241        u64 addr;
1242
1243        if (!kmap)
1244                return -1;
1245
1246        if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1247                return 0;
1248
1249        if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1250                return -1;
1251
1252        *delta = addr - kmap->ref_reloc_sym->addr;
1253        return 0;
1254}
1255
1256int __dso__load_kallsyms(struct dso *dso, const char *filename,
1257                         struct map *map, bool no_kcore)
1258{
1259        u64 delta = 0;
1260
1261        if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1262                return -1;
1263
1264        if (dso__load_all_kallsyms(dso, filename, map) < 0)
1265                return -1;
1266
1267        if (kallsyms__delta(map, filename, &delta))
1268                return -1;
1269
1270        symbols__fixup_end(&dso->symbols[map->type]);
1271        symbols__fixup_duplicate(&dso->symbols[map->type]);
1272
1273        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1274                dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1275        else
1276                dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1277
1278        if (!no_kcore && !dso__load_kcore(dso, map, filename))
1279                return dso__split_kallsyms_for_kcore(dso, map);
1280        else
1281                return dso__split_kallsyms(dso, map, delta);
1282}
1283
1284int dso__load_kallsyms(struct dso *dso, const char *filename,
1285                       struct map *map)
1286{
1287        return __dso__load_kallsyms(dso, filename, map, false);
1288}
1289
1290static int dso__load_perf_map(struct dso *dso, struct map *map)
1291{
1292        char *line = NULL;
1293        size_t n;
1294        FILE *file;
1295        int nr_syms = 0;
1296
1297        file = fopen(dso->long_name, "r");
1298        if (file == NULL)
1299                goto out_failure;
1300
1301        while (!feof(file)) {
1302                u64 start, size;
1303                struct symbol *sym;
1304                int line_len, len;
1305
1306                line_len = getline(&line, &n, file);
1307                if (line_len < 0)
1308                        break;
1309
1310                if (!line)
1311                        goto out_failure;
1312
1313                line[--line_len] = '\0'; /* \n */
1314
1315                len = hex2u64(line, &start);
1316
1317                len++;
1318                if (len + 2 >= line_len)
1319                        continue;
1320
1321                len += hex2u64(line + len, &size);
1322
1323                len++;
1324                if (len + 2 >= line_len)
1325                        continue;
1326
1327                sym = symbol__new(start, size, STB_GLOBAL, line + len);
1328
1329                if (sym == NULL)
1330                        goto out_delete_line;
1331
1332                symbols__insert(&dso->symbols[map->type], sym);
1333                nr_syms++;
1334        }
1335
1336        free(line);
1337        fclose(file);
1338
1339        return nr_syms;
1340
1341out_delete_line:
1342        free(line);
1343out_failure:
1344        return -1;
1345}
1346
1347static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1348                                           enum dso_binary_type type)
1349{
1350        switch (type) {
1351        case DSO_BINARY_TYPE__JAVA_JIT:
1352        case DSO_BINARY_TYPE__DEBUGLINK:
1353        case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1354        case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1355        case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1356        case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1357        case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1358                return !kmod && dso->kernel == DSO_TYPE_USER;
1359
1360        case DSO_BINARY_TYPE__KALLSYMS:
1361        case DSO_BINARY_TYPE__VMLINUX:
1362        case DSO_BINARY_TYPE__KCORE:
1363                return dso->kernel == DSO_TYPE_KERNEL;
1364
1365        case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1366        case DSO_BINARY_TYPE__GUEST_VMLINUX:
1367        case DSO_BINARY_TYPE__GUEST_KCORE:
1368                return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1369
1370        case DSO_BINARY_TYPE__GUEST_KMODULE:
1371        case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1372        case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1373        case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1374                /*
1375                 * kernel modules know their symtab type - it's set when
1376                 * creating a module dso in machine__findnew_module_map().
1377                 */
1378                return kmod && dso->symtab_type == type;
1379
1380        case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1381                return true;
1382
1383        case DSO_BINARY_TYPE__NOT_FOUND:
1384        default:
1385                return false;
1386        }
1387}
1388
1389int dso__load(struct dso *dso, struct map *map)
1390{
1391        char *name;
1392        int ret = -1;
1393        u_int i;
1394        struct machine *machine;
1395        char *root_dir = (char *) "";
1396        int ss_pos = 0;
1397        struct symsrc ss_[2];
1398        struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1399        bool kmod;
1400        unsigned char build_id[BUILD_ID_SIZE];
1401
1402        pthread_mutex_lock(&dso->lock);
1403
1404        /* check again under the dso->lock */
1405        if (dso__loaded(dso, map->type)) {
1406                ret = 1;
1407                goto out;
1408        }
1409
1410        if (dso->kernel) {
1411                if (dso->kernel == DSO_TYPE_KERNEL)
1412                        ret = dso__load_kernel_sym(dso, map);
1413                else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1414                        ret = dso__load_guest_kernel_sym(dso, map);
1415
1416                goto out;
1417        }
1418
1419        if (map->groups && map->groups->machine)
1420                machine = map->groups->machine;
1421        else
1422                machine = NULL;
1423
1424        dso->adjust_symbols = 0;
1425
1426        if (strncmp(dso->name, "/tmp/perf-", 10) == 0) {
1427                struct stat st;
1428
1429                if (lstat(dso->name, &st) < 0)
1430                        goto out;
1431
1432                if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1433                        pr_warning("File %s not owned by current user or root, "
1434                                   "ignoring it (use -f to override).\n", dso->name);
1435                        goto out;
1436                }
1437
1438                ret = dso__load_perf_map(dso, map);
1439                dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1440                                             DSO_BINARY_TYPE__NOT_FOUND;
1441                goto out;
1442        }
1443
1444        if (machine)
1445                root_dir = machine->root_dir;
1446
1447        name = malloc(PATH_MAX);
1448        if (!name)
1449                goto out;
1450
1451        kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1452                dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1453                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1454                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1455
1456
1457        /*
1458         * Read the build id if possible. This is required for
1459         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1460         */
1461        if (!dso->has_build_id &&
1462            is_regular_file(dso->long_name) &&
1463            filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
1464                dso__set_build_id(dso, build_id);
1465
1466        /*
1467         * Iterate over candidate debug images.
1468         * Keep track of "interesting" ones (those which have a symtab, dynsym,
1469         * and/or opd section) for processing.
1470         */
1471        for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1472                struct symsrc *ss = &ss_[ss_pos];
1473                bool next_slot = false;
1474
1475                enum dso_binary_type symtab_type = binary_type_symtab[i];
1476
1477                if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1478                        continue;
1479
1480                if (dso__read_binary_type_filename(dso, symtab_type,
1481                                                   root_dir, name, PATH_MAX))
1482                        continue;
1483
1484                if (!is_regular_file(name))
1485                        continue;
1486
1487                /* Name is now the name of the next image to try */
1488                if (symsrc__init(ss, dso, name, symtab_type) < 0)
1489                        continue;
1490
1491                if (!syms_ss && symsrc__has_symtab(ss)) {
1492                        syms_ss = ss;
1493                        next_slot = true;
1494                        if (!dso->symsrc_filename)
1495                                dso->symsrc_filename = strdup(name);
1496                }
1497
1498                if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1499                        runtime_ss = ss;
1500                        next_slot = true;
1501                }
1502
1503                if (next_slot) {
1504                        ss_pos++;
1505
1506                        if (syms_ss && runtime_ss)
1507                                break;
1508                } else {
1509                        symsrc__destroy(ss);
1510                }
1511
1512        }
1513
1514        if (!runtime_ss && !syms_ss)
1515                goto out_free;
1516
1517        if (runtime_ss && !syms_ss) {
1518                syms_ss = runtime_ss;
1519        }
1520
1521        /* We'll have to hope for the best */
1522        if (!runtime_ss && syms_ss)
1523                runtime_ss = syms_ss;
1524
1525        if (syms_ss && syms_ss->type == DSO_BINARY_TYPE__BUILD_ID_CACHE)
1526                if (dso__build_id_is_kmod(dso, name, PATH_MAX))
1527                        kmod = true;
1528
1529        if (syms_ss)
1530                ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1531        else
1532                ret = -1;
1533
1534        if (ret > 0) {
1535                int nr_plt;
1536
1537                nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map);
1538                if (nr_plt > 0)
1539                        ret += nr_plt;
1540        }
1541
1542        for (; ss_pos > 0; ss_pos--)
1543                symsrc__destroy(&ss_[ss_pos - 1]);
1544out_free:
1545        free(name);
1546        if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1547                ret = 0;
1548out:
1549        dso__set_loaded(dso, map->type);
1550        pthread_mutex_unlock(&dso->lock);
1551
1552        return ret;
1553}
1554
1555struct map *map_groups__find_by_name(struct map_groups *mg,
1556                                     enum map_type type, const char *name)
1557{
1558        struct maps *maps = &mg->maps[type];
1559        struct map *map;
1560
1561        pthread_rwlock_rdlock(&maps->lock);
1562
1563        for (map = maps__first(maps); map; map = map__next(map)) {
1564                if (map->dso && strcmp(map->dso->short_name, name) == 0)
1565                        goto out_unlock;
1566        }
1567
1568        map = NULL;
1569
1570out_unlock:
1571        pthread_rwlock_unlock(&maps->lock);
1572        return map;
1573}
1574
1575int dso__load_vmlinux(struct dso *dso, struct map *map,
1576                      const char *vmlinux, bool vmlinux_allocated)
1577{
1578        int err = -1;
1579        struct symsrc ss;
1580        char symfs_vmlinux[PATH_MAX];
1581        enum dso_binary_type symtab_type;
1582
1583        if (vmlinux[0] == '/')
1584                snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1585        else
1586                symbol__join_symfs(symfs_vmlinux, vmlinux);
1587
1588        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1589                symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1590        else
1591                symtab_type = DSO_BINARY_TYPE__VMLINUX;
1592
1593        if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1594                return -1;
1595
1596        err = dso__load_sym(dso, map, &ss, &ss, 0);
1597        symsrc__destroy(&ss);
1598
1599        if (err > 0) {
1600                if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1601                        dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1602                else
1603                        dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1604                dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1605                dso__set_loaded(dso, map->type);
1606                pr_debug("Using %s for symbols\n", symfs_vmlinux);
1607        }
1608
1609        return err;
1610}
1611
1612int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1613{
1614        int i, err = 0;
1615        char *filename = NULL;
1616
1617        pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1618                 vmlinux_path__nr_entries + 1);
1619
1620        for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1621                err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1622                if (err > 0)
1623                        goto out;
1624        }
1625
1626        if (!symbol_conf.ignore_vmlinux_buildid)
1627                filename = dso__build_id_filename(dso, NULL, 0);
1628        if (filename != NULL) {
1629                err = dso__load_vmlinux(dso, map, filename, true);
1630                if (err > 0)
1631                        goto out;
1632                free(filename);
1633        }
1634out:
1635        return err;
1636}
1637
1638static bool visible_dir_filter(const char *name, struct dirent *d)
1639{
1640        if (d->d_type != DT_DIR)
1641                return false;
1642        return lsdir_no_dot_filter(name, d);
1643}
1644
1645static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1646{
1647        char kallsyms_filename[PATH_MAX];
1648        int ret = -1;
1649        struct strlist *dirs;
1650        struct str_node *nd;
1651
1652        dirs = lsdir(dir, visible_dir_filter);
1653        if (!dirs)
1654                return -1;
1655
1656        strlist__for_each_entry(nd, dirs) {
1657                scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1658                          "%s/%s/kallsyms", dir, nd->s);
1659                if (!validate_kcore_addresses(kallsyms_filename, map)) {
1660                        strlcpy(dir, kallsyms_filename, dir_sz);
1661                        ret = 0;
1662                        break;
1663                }
1664        }
1665
1666        strlist__delete(dirs);
1667
1668        return ret;
1669}
1670
1671/*
1672 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1673 * since access(R_OK) only checks with real UID/GID but open() use effective
1674 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1675 */
1676static bool filename__readable(const char *file)
1677{
1678        int fd = open(file, O_RDONLY);
1679        if (fd < 0)
1680                return false;
1681        close(fd);
1682        return true;
1683}
1684
1685static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1686{
1687        u8 host_build_id[BUILD_ID_SIZE];
1688        char sbuild_id[SBUILD_ID_SIZE];
1689        bool is_host = false;
1690        char path[PATH_MAX];
1691
1692        if (!dso->has_build_id) {
1693                /*
1694                 * Last resort, if we don't have a build-id and couldn't find
1695                 * any vmlinux file, try the running kernel kallsyms table.
1696                 */
1697                goto proc_kallsyms;
1698        }
1699
1700        if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1701                                 sizeof(host_build_id)) == 0)
1702                is_host = dso__build_id_equal(dso, host_build_id);
1703
1704        /* Try a fast path for /proc/kallsyms if possible */
1705        if (is_host) {
1706                /*
1707                 * Do not check the build-id cache, unless we know we cannot use
1708                 * /proc/kcore or module maps don't match to /proc/kallsyms.
1709                 * To check readability of /proc/kcore, do not use access(R_OK)
1710                 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1711                 * can't check it.
1712                 */
1713                if (filename__readable("/proc/kcore") &&
1714                    !validate_kcore_addresses("/proc/kallsyms", map))
1715                        goto proc_kallsyms;
1716        }
1717
1718        build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1719
1720        /* Find kallsyms in build-id cache with kcore */
1721        scnprintf(path, sizeof(path), "%s/%s/%s",
1722                  buildid_dir, DSO__NAME_KCORE, sbuild_id);
1723
1724        if (!find_matching_kcore(map, path, sizeof(path)))
1725                return strdup(path);
1726
1727        /* Use current /proc/kallsyms if possible */
1728        if (is_host) {
1729proc_kallsyms:
1730                return strdup("/proc/kallsyms");
1731        }
1732
1733        /* Finally, find a cache of kallsyms */
1734        if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1735                pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1736                       sbuild_id);
1737                return NULL;
1738        }
1739
1740        return strdup(path);
1741}
1742
1743static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1744{
1745        int err;
1746        const char *kallsyms_filename = NULL;
1747        char *kallsyms_allocated_filename = NULL;
1748        /*
1749         * Step 1: if the user specified a kallsyms or vmlinux filename, use
1750         * it and only it, reporting errors to the user if it cannot be used.
1751         *
1752         * For instance, try to analyse an ARM perf.data file _without_ a
1753         * build-id, or if the user specifies the wrong path to the right
1754         * vmlinux file, obviously we can't fallback to another vmlinux (a
1755         * x86_86 one, on the machine where analysis is being performed, say),
1756         * or worse, /proc/kallsyms.
1757         *
1758         * If the specified file _has_ a build-id and there is a build-id
1759         * section in the perf.data file, we will still do the expected
1760         * validation in dso__load_vmlinux and will bail out if they don't
1761         * match.
1762         */
1763        if (symbol_conf.kallsyms_name != NULL) {
1764                kallsyms_filename = symbol_conf.kallsyms_name;
1765                goto do_kallsyms;
1766        }
1767
1768        if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1769                return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
1770        }
1771
1772        if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
1773                err = dso__load_vmlinux_path(dso, map);
1774                if (err > 0)
1775                        return err;
1776        }
1777
1778        /* do not try local files if a symfs was given */
1779        if (symbol_conf.symfs[0] != 0)
1780                return -1;
1781
1782        kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
1783        if (!kallsyms_allocated_filename)
1784                return -1;
1785
1786        kallsyms_filename = kallsyms_allocated_filename;
1787
1788do_kallsyms:
1789        err = dso__load_kallsyms(dso, kallsyms_filename, map);
1790        if (err > 0)
1791                pr_debug("Using %s for symbols\n", kallsyms_filename);
1792        free(kallsyms_allocated_filename);
1793
1794        if (err > 0 && !dso__is_kcore(dso)) {
1795                dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1796                dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
1797                map__fixup_start(map);
1798                map__fixup_end(map);
1799        }
1800
1801        return err;
1802}
1803
1804static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
1805{
1806        int err;
1807        const char *kallsyms_filename = NULL;
1808        struct machine *machine;
1809        char path[PATH_MAX];
1810
1811        if (!map->groups) {
1812                pr_debug("Guest kernel map hasn't the point to groups\n");
1813                return -1;
1814        }
1815        machine = map->groups->machine;
1816
1817        if (machine__is_default_guest(machine)) {
1818                /*
1819                 * if the user specified a vmlinux filename, use it and only
1820                 * it, reporting errors to the user if it cannot be used.
1821                 * Or use file guest_kallsyms inputted by user on commandline
1822                 */
1823                if (symbol_conf.default_guest_vmlinux_name != NULL) {
1824                        err = dso__load_vmlinux(dso, map,
1825                                                symbol_conf.default_guest_vmlinux_name,
1826                                                false);
1827                        return err;
1828                }
1829
1830                kallsyms_filename = symbol_conf.default_guest_kallsyms;
1831                if (!kallsyms_filename)
1832                        return -1;
1833        } else {
1834                sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1835                kallsyms_filename = path;
1836        }
1837
1838        err = dso__load_kallsyms(dso, kallsyms_filename, map);
1839        if (err > 0)
1840                pr_debug("Using %s for symbols\n", kallsyms_filename);
1841        if (err > 0 && !dso__is_kcore(dso)) {
1842                dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1843                machine__mmap_name(machine, path, sizeof(path));
1844                dso__set_long_name(dso, strdup(path), true);
1845                map__fixup_start(map);
1846                map__fixup_end(map);
1847        }
1848
1849        return err;
1850}
1851
1852static void vmlinux_path__exit(void)
1853{
1854        while (--vmlinux_path__nr_entries >= 0)
1855                zfree(&vmlinux_path[vmlinux_path__nr_entries]);
1856        vmlinux_path__nr_entries = 0;
1857
1858        zfree(&vmlinux_path);
1859}
1860
1861static const char * const vmlinux_paths[] = {
1862        "vmlinux",
1863        "/boot/vmlinux"
1864};
1865
1866static const char * const vmlinux_paths_upd[] = {
1867        "/boot/vmlinux-%s",
1868        "/usr/lib/debug/boot/vmlinux-%s",
1869        "/lib/modules/%s/build/vmlinux",
1870        "/usr/lib/debug/lib/modules/%s/vmlinux",
1871        "/usr/lib/debug/boot/vmlinux-%s.debug"
1872};
1873
1874static int vmlinux_path__add(const char *new_entry)
1875{
1876        vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
1877        if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
1878                return -1;
1879        ++vmlinux_path__nr_entries;
1880
1881        return 0;
1882}
1883
1884static int vmlinux_path__init(struct perf_env *env)
1885{
1886        struct utsname uts;
1887        char bf[PATH_MAX];
1888        char *kernel_version;
1889        unsigned int i;
1890
1891        vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
1892                              ARRAY_SIZE(vmlinux_paths_upd)));
1893        if (vmlinux_path == NULL)
1894                return -1;
1895
1896        for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
1897                if (vmlinux_path__add(vmlinux_paths[i]) < 0)
1898                        goto out_fail;
1899
1900        /* only try kernel version if no symfs was given */
1901        if (symbol_conf.symfs[0] != 0)
1902                return 0;
1903
1904        if (env) {
1905                kernel_version = env->os_release;
1906        } else {
1907                if (uname(&uts) < 0)
1908                        goto out_fail;
1909
1910                kernel_version = uts.release;
1911        }
1912
1913        for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
1914                snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
1915                if (vmlinux_path__add(bf) < 0)
1916                        goto out_fail;
1917        }
1918
1919        return 0;
1920
1921out_fail:
1922        vmlinux_path__exit();
1923        return -1;
1924}
1925
1926int setup_list(struct strlist **list, const char *list_str,
1927                      const char *list_name)
1928{
1929        if (list_str == NULL)
1930                return 0;
1931
1932        *list = strlist__new(list_str, NULL);
1933        if (!*list) {
1934                pr_err("problems parsing %s list\n", list_name);
1935                return -1;
1936        }
1937
1938        symbol_conf.has_filter = true;
1939        return 0;
1940}
1941
1942int setup_intlist(struct intlist **list, const char *list_str,
1943                  const char *list_name)
1944{
1945        if (list_str == NULL)
1946                return 0;
1947
1948        *list = intlist__new(list_str);
1949        if (!*list) {
1950                pr_err("problems parsing %s list\n", list_name);
1951                return -1;
1952        }
1953        return 0;
1954}
1955
1956static bool symbol__read_kptr_restrict(void)
1957{
1958        bool value = false;
1959        FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
1960
1961        if (fp != NULL) {
1962                char line[8];
1963
1964                if (fgets(line, sizeof(line), fp) != NULL)
1965                        value = ((geteuid() != 0) || (getuid() != 0)) ?
1966                                        (atoi(line) != 0) :
1967                                        (atoi(line) == 2);
1968
1969                fclose(fp);
1970        }
1971
1972        return value;
1973}
1974
1975int symbol__annotation_init(void)
1976{
1977        if (symbol_conf.initialized) {
1978                pr_err("Annotation needs to be init before symbol__init()\n");
1979                return -1;
1980        }
1981
1982        if (symbol_conf.init_annotation) {
1983                pr_warning("Annotation being initialized multiple times\n");
1984                return 0;
1985        }
1986
1987        symbol_conf.priv_size += sizeof(struct annotation);
1988        symbol_conf.init_annotation = true;
1989        return 0;
1990}
1991
1992int symbol__init(struct perf_env *env)
1993{
1994        const char *symfs;
1995
1996        if (symbol_conf.initialized)
1997                return 0;
1998
1999        symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2000
2001        symbol__elf_init();
2002
2003        if (symbol_conf.sort_by_name)
2004                symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2005                                          sizeof(struct symbol));
2006
2007        if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2008                return -1;
2009
2010        if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2011                pr_err("'.' is the only non valid --field-separator argument\n");
2012                return -1;
2013        }
2014
2015        if (setup_list(&symbol_conf.dso_list,
2016                       symbol_conf.dso_list_str, "dso") < 0)
2017                return -1;
2018
2019        if (setup_list(&symbol_conf.comm_list,
2020                       symbol_conf.comm_list_str, "comm") < 0)
2021                goto out_free_dso_list;
2022
2023        if (setup_intlist(&symbol_conf.pid_list,
2024                       symbol_conf.pid_list_str, "pid") < 0)
2025                goto out_free_comm_list;
2026
2027        if (setup_intlist(&symbol_conf.tid_list,
2028                       symbol_conf.tid_list_str, "tid") < 0)
2029                goto out_free_pid_list;
2030
2031        if (setup_list(&symbol_conf.sym_list,
2032                       symbol_conf.sym_list_str, "symbol") < 0)
2033                goto out_free_tid_list;
2034
2035        if (setup_list(&symbol_conf.bt_stop_list,
2036                       symbol_conf.bt_stop_list_str, "symbol") < 0)
2037                goto out_free_sym_list;
2038
2039        /*
2040         * A path to symbols of "/" is identical to ""
2041         * reset here for simplicity.
2042         */
2043        symfs = realpath(symbol_conf.symfs, NULL);
2044        if (symfs == NULL)
2045                symfs = symbol_conf.symfs;
2046        if (strcmp(symfs, "/") == 0)
2047                symbol_conf.symfs = "";
2048        if (symfs != symbol_conf.symfs)
2049                free((void *)symfs);
2050
2051        symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2052
2053        symbol_conf.initialized = true;
2054        return 0;
2055
2056out_free_sym_list:
2057        strlist__delete(symbol_conf.sym_list);
2058out_free_tid_list:
2059        intlist__delete(symbol_conf.tid_list);
2060out_free_pid_list:
2061        intlist__delete(symbol_conf.pid_list);
2062out_free_comm_list:
2063        strlist__delete(symbol_conf.comm_list);
2064out_free_dso_list:
2065        strlist__delete(symbol_conf.dso_list);
2066        return -1;
2067}
2068
2069void symbol__exit(void)
2070{
2071        if (!symbol_conf.initialized)
2072                return;
2073        strlist__delete(symbol_conf.bt_stop_list);
2074        strlist__delete(symbol_conf.sym_list);
2075        strlist__delete(symbol_conf.dso_list);
2076        strlist__delete(symbol_conf.comm_list);
2077        intlist__delete(symbol_conf.tid_list);
2078        intlist__delete(symbol_conf.pid_list);
2079        vmlinux_path__exit();
2080        symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2081        symbol_conf.bt_stop_list = NULL;
2082        symbol_conf.initialized = false;
2083}
2084
2085int symbol__config_symfs(const struct option *opt __maybe_unused,
2086                         const char *dir, int unset __maybe_unused)
2087{
2088        char *bf = NULL;
2089        int ret;
2090
2091        symbol_conf.symfs = strdup(dir);
2092        if (symbol_conf.symfs == NULL)
2093                return -ENOMEM;
2094
2095        /* skip the locally configured cache if a symfs is given, and
2096         * config buildid dir to symfs/.debug
2097         */
2098        ret = asprintf(&bf, "%s/%s", dir, ".debug");
2099        if (ret < 0)
2100                return -ENOMEM;
2101
2102        set_buildid_dir(bf);
2103
2104        free(bf);
2105        return 0;
2106}
2107