linux/tools/perf/util/symbol.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <dirent.h>
   3#include <errno.h>
   4#include <stdlib.h>
   5#include <stdio.h>
   6#include <string.h>
   7#include <linux/kernel.h>
   8#include <linux/mman.h>
   9#include <sys/types.h>
  10#include <sys/stat.h>
  11#include <sys/param.h>
  12#include <fcntl.h>
  13#include <unistd.h>
  14#include <inttypes.h>
  15#include "annotate.h"
  16#include "build-id.h"
  17#include "util.h"
  18#include "debug.h"
  19#include "machine.h"
  20#include "symbol.h"
  21#include "strlist.h"
  22#include "intlist.h"
  23#include "namespaces.h"
  24#include "header.h"
  25#include "path.h"
  26#include "sane_ctype.h"
  27
  28#include <elf.h>
  29#include <limits.h>
  30#include <symbol/kallsyms.h>
  31#include <sys/utsname.h>
  32
  33static int dso__load_kernel_sym(struct dso *dso, struct map *map);
  34static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
  35static bool symbol__is_idle(const char *name);
  36
  37int vmlinux_path__nr_entries;
  38char **vmlinux_path;
  39
  40struct symbol_conf symbol_conf = {
  41        .use_modules            = true,
  42        .try_vmlinux_path       = true,
  43        .demangle               = true,
  44        .demangle_kernel        = false,
  45        .cumulate_callchain     = true,
  46        .show_hist_headers      = true,
  47        .symfs                  = "",
  48        .event_group            = true,
  49        .inline_name            = true,
  50};
  51
  52static enum dso_binary_type binary_type_symtab[] = {
  53        DSO_BINARY_TYPE__KALLSYMS,
  54        DSO_BINARY_TYPE__GUEST_KALLSYMS,
  55        DSO_BINARY_TYPE__JAVA_JIT,
  56        DSO_BINARY_TYPE__DEBUGLINK,
  57        DSO_BINARY_TYPE__BUILD_ID_CACHE,
  58        DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
  59        DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
  60        DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
  61        DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
  62        DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
  63        DSO_BINARY_TYPE__GUEST_KMODULE,
  64        DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
  65        DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
  66        DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
  67        DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
  68        DSO_BINARY_TYPE__NOT_FOUND,
  69};
  70
  71#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
  72
  73static bool symbol_type__filter(char symbol_type)
  74{
  75        symbol_type = toupper(symbol_type);
  76        return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
  77}
  78
  79static int prefix_underscores_count(const char *str)
  80{
  81        const char *tail = str;
  82
  83        while (*tail == '_')
  84                tail++;
  85
  86        return tail - str;
  87}
  88
  89const char * __weak arch__normalize_symbol_name(const char *name)
  90{
  91        return name;
  92}
  93
  94int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
  95{
  96        return strcmp(namea, nameb);
  97}
  98
  99int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
 100                                        unsigned int n)
 101{
 102        return strncmp(namea, nameb, n);
 103}
 104
 105int __weak arch__choose_best_symbol(struct symbol *syma,
 106                                    struct symbol *symb __maybe_unused)
 107{
 108        /* Avoid "SyS" kernel syscall aliases */
 109        if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
 110                return SYMBOL_B;
 111        if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
 112                return SYMBOL_B;
 113
 114        return SYMBOL_A;
 115}
 116
 117static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
 118{
 119        s64 a;
 120        s64 b;
 121        size_t na, nb;
 122
 123        /* Prefer a symbol with non zero length */
 124        a = syma->end - syma->start;
 125        b = symb->end - symb->start;
 126        if ((b == 0) && (a > 0))
 127                return SYMBOL_A;
 128        else if ((a == 0) && (b > 0))
 129                return SYMBOL_B;
 130
 131        /* Prefer a non weak symbol over a weak one */
 132        a = syma->binding == STB_WEAK;
 133        b = symb->binding == STB_WEAK;
 134        if (b && !a)
 135                return SYMBOL_A;
 136        if (a && !b)
 137                return SYMBOL_B;
 138
 139        /* Prefer a global symbol over a non global one */
 140        a = syma->binding == STB_GLOBAL;
 141        b = symb->binding == STB_GLOBAL;
 142        if (a && !b)
 143                return SYMBOL_A;
 144        if (b && !a)
 145                return SYMBOL_B;
 146
 147        /* Prefer a symbol with less underscores */
 148        a = prefix_underscores_count(syma->name);
 149        b = prefix_underscores_count(symb->name);
 150        if (b > a)
 151                return SYMBOL_A;
 152        else if (a > b)
 153                return SYMBOL_B;
 154
 155        /* Choose the symbol with the longest name */
 156        na = strlen(syma->name);
 157        nb = strlen(symb->name);
 158        if (na > nb)
 159                return SYMBOL_A;
 160        else if (na < nb)
 161                return SYMBOL_B;
 162
 163        return arch__choose_best_symbol(syma, symb);
 164}
 165
 166void symbols__fixup_duplicate(struct rb_root *symbols)
 167{
 168        struct rb_node *nd;
 169        struct symbol *curr, *next;
 170
 171        if (symbol_conf.allow_aliases)
 172                return;
 173
 174        nd = rb_first(symbols);
 175
 176        while (nd) {
 177                curr = rb_entry(nd, struct symbol, rb_node);
 178again:
 179                nd = rb_next(&curr->rb_node);
 180                next = rb_entry(nd, struct symbol, rb_node);
 181
 182                if (!nd)
 183                        break;
 184
 185                if (curr->start != next->start)
 186                        continue;
 187
 188                if (choose_best_symbol(curr, next) == SYMBOL_A) {
 189                        rb_erase(&next->rb_node, symbols);
 190                        symbol__delete(next);
 191                        goto again;
 192                } else {
 193                        nd = rb_next(&curr->rb_node);
 194                        rb_erase(&curr->rb_node, symbols);
 195                        symbol__delete(curr);
 196                }
 197        }
 198}
 199
 200void symbols__fixup_end(struct rb_root *symbols)
 201{
 202        struct rb_node *nd, *prevnd = rb_first(symbols);
 203        struct symbol *curr, *prev;
 204
 205        if (prevnd == NULL)
 206                return;
 207
 208        curr = rb_entry(prevnd, struct symbol, rb_node);
 209
 210        for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
 211                prev = curr;
 212                curr = rb_entry(nd, struct symbol, rb_node);
 213
 214                if (prev->end == prev->start && prev->end != curr->start)
 215                        prev->end = curr->start;
 216        }
 217
 218        /* Last entry */
 219        if (curr->end == curr->start)
 220                curr->end = roundup(curr->start, 4096) + 4096;
 221}
 222
 223void map_groups__fixup_end(struct map_groups *mg)
 224{
 225        struct maps *maps = &mg->maps;
 226        struct map *next, *curr;
 227
 228        down_write(&maps->lock);
 229
 230        curr = maps__first(maps);
 231        if (curr == NULL)
 232                goto out_unlock;
 233
 234        for (next = map__next(curr); next; next = map__next(curr)) {
 235                if (!curr->end)
 236                        curr->end = next->start;
 237                curr = next;
 238        }
 239
 240        /*
 241         * We still haven't the actual symbols, so guess the
 242         * last map final address.
 243         */
 244        if (!curr->end)
 245                curr->end = ~0ULL;
 246
 247out_unlock:
 248        up_write(&maps->lock);
 249}
 250
 251struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
 252{
 253        size_t namelen = strlen(name) + 1;
 254        struct symbol *sym = calloc(1, (symbol_conf.priv_size +
 255                                        sizeof(*sym) + namelen));
 256        if (sym == NULL)
 257                return NULL;
 258
 259        if (symbol_conf.priv_size) {
 260                if (symbol_conf.init_annotation) {
 261                        struct annotation *notes = (void *)sym;
 262                        pthread_mutex_init(&notes->lock, NULL);
 263                }
 264                sym = ((void *)sym) + symbol_conf.priv_size;
 265        }
 266
 267        sym->start   = start;
 268        sym->end     = len ? start + len : start;
 269        sym->type    = type;
 270        sym->binding = binding;
 271        sym->namelen = namelen - 1;
 272
 273        pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
 274                  __func__, name, start, sym->end);
 275        memcpy(sym->name, name, namelen);
 276
 277        return sym;
 278}
 279
 280void symbol__delete(struct symbol *sym)
 281{
 282        free(((void *)sym) - symbol_conf.priv_size);
 283}
 284
 285void symbols__delete(struct rb_root *symbols)
 286{
 287        struct symbol *pos;
 288        struct rb_node *next = rb_first(symbols);
 289
 290        while (next) {
 291                pos = rb_entry(next, struct symbol, rb_node);
 292                next = rb_next(&pos->rb_node);
 293                rb_erase(&pos->rb_node, symbols);
 294                symbol__delete(pos);
 295        }
 296}
 297
 298void __symbols__insert(struct rb_root *symbols, struct symbol *sym, bool kernel)
 299{
 300        struct rb_node **p = &symbols->rb_node;
 301        struct rb_node *parent = NULL;
 302        const u64 ip = sym->start;
 303        struct symbol *s;
 304
 305        if (kernel) {
 306                const char *name = sym->name;
 307                /*
 308                 * ppc64 uses function descriptors and appends a '.' to the
 309                 * start of every instruction address. Remove it.
 310                 */
 311                if (name[0] == '.')
 312                        name++;
 313                sym->idle = symbol__is_idle(name);
 314        }
 315
 316        while (*p != NULL) {
 317                parent = *p;
 318                s = rb_entry(parent, struct symbol, rb_node);
 319                if (ip < s->start)
 320                        p = &(*p)->rb_left;
 321                else
 322                        p = &(*p)->rb_right;
 323        }
 324        rb_link_node(&sym->rb_node, parent, p);
 325        rb_insert_color(&sym->rb_node, symbols);
 326}
 327
 328void symbols__insert(struct rb_root *symbols, struct symbol *sym)
 329{
 330        __symbols__insert(symbols, sym, false);
 331}
 332
 333static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
 334{
 335        struct rb_node *n;
 336
 337        if (symbols == NULL)
 338                return NULL;
 339
 340        n = symbols->rb_node;
 341
 342        while (n) {
 343                struct symbol *s = rb_entry(n, struct symbol, rb_node);
 344
 345                if (ip < s->start)
 346                        n = n->rb_left;
 347                else if (ip > s->end || (ip == s->end && ip != s->start))
 348                        n = n->rb_right;
 349                else
 350                        return s;
 351        }
 352
 353        return NULL;
 354}
 355
 356static struct symbol *symbols__first(struct rb_root *symbols)
 357{
 358        struct rb_node *n = rb_first(symbols);
 359
 360        if (n)
 361                return rb_entry(n, struct symbol, rb_node);
 362
 363        return NULL;
 364}
 365
 366static struct symbol *symbols__last(struct rb_root *symbols)
 367{
 368        struct rb_node *n = rb_last(symbols);
 369
 370        if (n)
 371                return rb_entry(n, struct symbol, rb_node);
 372
 373        return NULL;
 374}
 375
 376static struct symbol *symbols__next(struct symbol *sym)
 377{
 378        struct rb_node *n = rb_next(&sym->rb_node);
 379
 380        if (n)
 381                return rb_entry(n, struct symbol, rb_node);
 382
 383        return NULL;
 384}
 385
 386static void symbols__insert_by_name(struct rb_root *symbols, struct symbol *sym)
 387{
 388        struct rb_node **p = &symbols->rb_node;
 389        struct rb_node *parent = NULL;
 390        struct symbol_name_rb_node *symn, *s;
 391
 392        symn = container_of(sym, struct symbol_name_rb_node, sym);
 393
 394        while (*p != NULL) {
 395                parent = *p;
 396                s = rb_entry(parent, struct symbol_name_rb_node, rb_node);
 397                if (strcmp(sym->name, s->sym.name) < 0)
 398                        p = &(*p)->rb_left;
 399                else
 400                        p = &(*p)->rb_right;
 401        }
 402        rb_link_node(&symn->rb_node, parent, p);
 403        rb_insert_color(&symn->rb_node, symbols);
 404}
 405
 406static void symbols__sort_by_name(struct rb_root *symbols,
 407                                  struct rb_root *source)
 408{
 409        struct rb_node *nd;
 410
 411        for (nd = rb_first(source); nd; nd = rb_next(nd)) {
 412                struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
 413                symbols__insert_by_name(symbols, pos);
 414        }
 415}
 416
 417int symbol__match_symbol_name(const char *name, const char *str,
 418                              enum symbol_tag_include includes)
 419{
 420        const char *versioning;
 421
 422        if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
 423            (versioning = strstr(name, "@@"))) {
 424                int len = strlen(str);
 425
 426                if (len < versioning - name)
 427                        len = versioning - name;
 428
 429                return arch__compare_symbol_names_n(name, str, len);
 430        } else
 431                return arch__compare_symbol_names(name, str);
 432}
 433
 434static struct symbol *symbols__find_by_name(struct rb_root *symbols,
 435                                            const char *name,
 436                                            enum symbol_tag_include includes)
 437{
 438        struct rb_node *n;
 439        struct symbol_name_rb_node *s = NULL;
 440
 441        if (symbols == NULL)
 442                return NULL;
 443
 444        n = symbols->rb_node;
 445
 446        while (n) {
 447                int cmp;
 448
 449                s = rb_entry(n, struct symbol_name_rb_node, rb_node);
 450                cmp = symbol__match_symbol_name(s->sym.name, name, includes);
 451
 452                if (cmp > 0)
 453                        n = n->rb_left;
 454                else if (cmp < 0)
 455                        n = n->rb_right;
 456                else
 457                        break;
 458        }
 459
 460        if (n == NULL)
 461                return NULL;
 462
 463        if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY)
 464                /* return first symbol that has same name (if any) */
 465                for (n = rb_prev(n); n; n = rb_prev(n)) {
 466                        struct symbol_name_rb_node *tmp;
 467
 468                        tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
 469                        if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
 470                                break;
 471
 472                        s = tmp;
 473                }
 474
 475        return &s->sym;
 476}
 477
 478void dso__reset_find_symbol_cache(struct dso *dso)
 479{
 480        dso->last_find_result.addr   = 0;
 481        dso->last_find_result.symbol = NULL;
 482}
 483
 484void dso__insert_symbol(struct dso *dso, struct symbol *sym)
 485{
 486        __symbols__insert(&dso->symbols, sym, dso->kernel);
 487
 488        /* update the symbol cache if necessary */
 489        if (dso->last_find_result.addr >= sym->start &&
 490            (dso->last_find_result.addr < sym->end ||
 491            sym->start == sym->end)) {
 492                dso->last_find_result.symbol = sym;
 493        }
 494}
 495
 496struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
 497{
 498        if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
 499                dso->last_find_result.addr   = addr;
 500                dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
 501        }
 502
 503        return dso->last_find_result.symbol;
 504}
 505
 506struct symbol *dso__first_symbol(struct dso *dso)
 507{
 508        return symbols__first(&dso->symbols);
 509}
 510
 511struct symbol *dso__last_symbol(struct dso *dso)
 512{
 513        return symbols__last(&dso->symbols);
 514}
 515
 516struct symbol *dso__next_symbol(struct symbol *sym)
 517{
 518        return symbols__next(sym);
 519}
 520
 521struct symbol *symbol__next_by_name(struct symbol *sym)
 522{
 523        struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
 524        struct rb_node *n = rb_next(&s->rb_node);
 525
 526        return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
 527}
 528
 529 /*
 530  * Returns first symbol that matched with @name.
 531  */
 532struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name)
 533{
 534        struct symbol *s = symbols__find_by_name(&dso->symbol_names, name,
 535                                                 SYMBOL_TAG_INCLUDE__NONE);
 536        if (!s)
 537                s = symbols__find_by_name(&dso->symbol_names, name,
 538                                          SYMBOL_TAG_INCLUDE__DEFAULT_ONLY);
 539        return s;
 540}
 541
 542void dso__sort_by_name(struct dso *dso)
 543{
 544        dso__set_sorted_by_name(dso);
 545        return symbols__sort_by_name(&dso->symbol_names, &dso->symbols);
 546}
 547
 548int modules__parse(const char *filename, void *arg,
 549                   int (*process_module)(void *arg, const char *name,
 550                                         u64 start, u64 size))
 551{
 552        char *line = NULL;
 553        size_t n;
 554        FILE *file;
 555        int err = 0;
 556
 557        file = fopen(filename, "r");
 558        if (file == NULL)
 559                return -1;
 560
 561        while (1) {
 562                char name[PATH_MAX];
 563                u64 start, size;
 564                char *sep, *endptr;
 565                ssize_t line_len;
 566
 567                line_len = getline(&line, &n, file);
 568                if (line_len < 0) {
 569                        if (feof(file))
 570                                break;
 571                        err = -1;
 572                        goto out;
 573                }
 574
 575                if (!line) {
 576                        err = -1;
 577                        goto out;
 578                }
 579
 580                line[--line_len] = '\0'; /* \n */
 581
 582                sep = strrchr(line, 'x');
 583                if (sep == NULL)
 584                        continue;
 585
 586                hex2u64(sep + 1, &start);
 587
 588                sep = strchr(line, ' ');
 589                if (sep == NULL)
 590                        continue;
 591
 592                *sep = '\0';
 593
 594                scnprintf(name, sizeof(name), "[%s]", line);
 595
 596                size = strtoul(sep + 1, &endptr, 0);
 597                if (*endptr != ' ' && *endptr != '\t')
 598                        continue;
 599
 600                err = process_module(arg, name, start, size);
 601                if (err)
 602                        break;
 603        }
 604out:
 605        free(line);
 606        fclose(file);
 607        return err;
 608}
 609
 610/*
 611 * These are symbols in the kernel image, so make sure that
 612 * sym is from a kernel DSO.
 613 */
 614static bool symbol__is_idle(const char *name)
 615{
 616        const char * const idle_symbols[] = {
 617                "cpu_idle",
 618                "cpu_startup_entry",
 619                "intel_idle",
 620                "default_idle",
 621                "native_safe_halt",
 622                "enter_idle",
 623                "exit_idle",
 624                "mwait_idle",
 625                "mwait_idle_with_hints",
 626                "poll_idle",
 627                "ppc64_runlatch_off",
 628                "pseries_dedicated_idle_sleep",
 629                NULL
 630        };
 631        int i;
 632
 633        for (i = 0; idle_symbols[i]; i++) {
 634                if (!strcmp(idle_symbols[i], name))
 635                        return true;
 636        }
 637
 638        return false;
 639}
 640
 641static int map__process_kallsym_symbol(void *arg, const char *name,
 642                                       char type, u64 start)
 643{
 644        struct symbol *sym;
 645        struct dso *dso = arg;
 646        struct rb_root *root = &dso->symbols;
 647
 648        if (!symbol_type__filter(type))
 649                return 0;
 650
 651        /*
 652         * module symbols are not sorted so we add all
 653         * symbols, setting length to 0, and rely on
 654         * symbols__fixup_end() to fix it up.
 655         */
 656        sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
 657        if (sym == NULL)
 658                return -ENOMEM;
 659        /*
 660         * We will pass the symbols to the filter later, in
 661         * map__split_kallsyms, when we have split the maps per module
 662         */
 663        __symbols__insert(root, sym, !strchr(name, '['));
 664
 665        return 0;
 666}
 667
 668/*
 669 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
 670 * so that we can in the next step set the symbol ->end address and then
 671 * call kernel_maps__split_kallsyms.
 672 */
 673static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
 674{
 675        return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
 676}
 677
 678static int map_groups__split_kallsyms_for_kcore(struct map_groups *kmaps, struct dso *dso)
 679{
 680        struct map *curr_map;
 681        struct symbol *pos;
 682        int count = 0;
 683        struct rb_root old_root = dso->symbols;
 684        struct rb_root *root = &dso->symbols;
 685        struct rb_node *next = rb_first(root);
 686
 687        if (!kmaps)
 688                return -1;
 689
 690        *root = RB_ROOT;
 691
 692        while (next) {
 693                char *module;
 694
 695                pos = rb_entry(next, struct symbol, rb_node);
 696                next = rb_next(&pos->rb_node);
 697
 698                rb_erase_init(&pos->rb_node, &old_root);
 699
 700                module = strchr(pos->name, '\t');
 701                if (module)
 702                        *module = '\0';
 703
 704                curr_map = map_groups__find(kmaps, pos->start);
 705
 706                if (!curr_map) {
 707                        symbol__delete(pos);
 708                        continue;
 709                }
 710
 711                pos->start -= curr_map->start - curr_map->pgoff;
 712                if (pos->end)
 713                        pos->end -= curr_map->start - curr_map->pgoff;
 714                symbols__insert(&curr_map->dso->symbols, pos);
 715                ++count;
 716        }
 717
 718        /* Symbols have been adjusted */
 719        dso->adjust_symbols = 1;
 720
 721        return count;
 722}
 723
 724/*
 725 * Split the symbols into maps, making sure there are no overlaps, i.e. the
 726 * kernel range is broken in several maps, named [kernel].N, as we don't have
 727 * the original ELF section names vmlinux have.
 728 */
 729static int map_groups__split_kallsyms(struct map_groups *kmaps, struct dso *dso, u64 delta,
 730                                      struct map *initial_map)
 731{
 732        struct machine *machine;
 733        struct map *curr_map = initial_map;
 734        struct symbol *pos;
 735        int count = 0, moved = 0;
 736        struct rb_root *root = &dso->symbols;
 737        struct rb_node *next = rb_first(root);
 738        int kernel_range = 0;
 739        bool x86_64;
 740
 741        if (!kmaps)
 742                return -1;
 743
 744        machine = kmaps->machine;
 745
 746        x86_64 = machine__is(machine, "x86_64");
 747
 748        while (next) {
 749                char *module;
 750
 751                pos = rb_entry(next, struct symbol, rb_node);
 752                next = rb_next(&pos->rb_node);
 753
 754                module = strchr(pos->name, '\t');
 755                if (module) {
 756                        if (!symbol_conf.use_modules)
 757                                goto discard_symbol;
 758
 759                        *module++ = '\0';
 760
 761                        if (strcmp(curr_map->dso->short_name, module)) {
 762                                if (curr_map != initial_map &&
 763                                    dso->kernel == DSO_TYPE_GUEST_KERNEL &&
 764                                    machine__is_default_guest(machine)) {
 765                                        /*
 766                                         * We assume all symbols of a module are
 767                                         * continuous in * kallsyms, so curr_map
 768                                         * points to a module and all its
 769                                         * symbols are in its kmap. Mark it as
 770                                         * loaded.
 771                                         */
 772                                        dso__set_loaded(curr_map->dso);
 773                                }
 774
 775                                curr_map = map_groups__find_by_name(kmaps, module);
 776                                if (curr_map == NULL) {
 777                                        pr_debug("%s/proc/{kallsyms,modules} "
 778                                                 "inconsistency while looking "
 779                                                 "for \"%s\" module!\n",
 780                                                 machine->root_dir, module);
 781                                        curr_map = initial_map;
 782                                        goto discard_symbol;
 783                                }
 784
 785                                if (curr_map->dso->loaded &&
 786                                    !machine__is_default_guest(machine))
 787                                        goto discard_symbol;
 788                        }
 789                        /*
 790                         * So that we look just like we get from .ko files,
 791                         * i.e. not prelinked, relative to initial_map->start.
 792                         */
 793                        pos->start = curr_map->map_ip(curr_map, pos->start);
 794                        pos->end   = curr_map->map_ip(curr_map, pos->end);
 795                } else if (x86_64 && is_entry_trampoline(pos->name)) {
 796                        /*
 797                         * These symbols are not needed anymore since the
 798                         * trampoline maps refer to the text section and it's
 799                         * symbols instead. Avoid having to deal with
 800                         * relocations, and the assumption that the first symbol
 801                         * is the start of kernel text, by simply removing the
 802                         * symbols at this point.
 803                         */
 804                        goto discard_symbol;
 805                } else if (curr_map != initial_map) {
 806                        char dso_name[PATH_MAX];
 807                        struct dso *ndso;
 808
 809                        if (delta) {
 810                                /* Kernel was relocated at boot time */
 811                                pos->start -= delta;
 812                                pos->end -= delta;
 813                        }
 814
 815                        if (count == 0) {
 816                                curr_map = initial_map;
 817                                goto add_symbol;
 818                        }
 819
 820                        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
 821                                snprintf(dso_name, sizeof(dso_name),
 822                                        "[guest.kernel].%d",
 823                                        kernel_range++);
 824                        else
 825                                snprintf(dso_name, sizeof(dso_name),
 826                                        "[kernel].%d",
 827                                        kernel_range++);
 828
 829                        ndso = dso__new(dso_name);
 830                        if (ndso == NULL)
 831                                return -1;
 832
 833                        ndso->kernel = dso->kernel;
 834
 835                        curr_map = map__new2(pos->start, ndso);
 836                        if (curr_map == NULL) {
 837                                dso__put(ndso);
 838                                return -1;
 839                        }
 840
 841                        curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
 842                        map_groups__insert(kmaps, curr_map);
 843                        ++kernel_range;
 844                } else if (delta) {
 845                        /* Kernel was relocated at boot time */
 846                        pos->start -= delta;
 847                        pos->end -= delta;
 848                }
 849add_symbol:
 850                if (curr_map != initial_map) {
 851                        rb_erase(&pos->rb_node, root);
 852                        symbols__insert(&curr_map->dso->symbols, pos);
 853                        ++moved;
 854                } else
 855                        ++count;
 856
 857                continue;
 858discard_symbol:
 859                rb_erase(&pos->rb_node, root);
 860                symbol__delete(pos);
 861        }
 862
 863        if (curr_map != initial_map &&
 864            dso->kernel == DSO_TYPE_GUEST_KERNEL &&
 865            machine__is_default_guest(kmaps->machine)) {
 866                dso__set_loaded(curr_map->dso);
 867        }
 868
 869        return count + moved;
 870}
 871
 872bool symbol__restricted_filename(const char *filename,
 873                                 const char *restricted_filename)
 874{
 875        bool restricted = false;
 876
 877        if (symbol_conf.kptr_restrict) {
 878                char *r = realpath(filename, NULL);
 879
 880                if (r != NULL) {
 881                        restricted = strcmp(r, restricted_filename) == 0;
 882                        free(r);
 883                        return restricted;
 884                }
 885        }
 886
 887        return restricted;
 888}
 889
 890struct module_info {
 891        struct rb_node rb_node;
 892        char *name;
 893        u64 start;
 894};
 895
 896static void add_module(struct module_info *mi, struct rb_root *modules)
 897{
 898        struct rb_node **p = &modules->rb_node;
 899        struct rb_node *parent = NULL;
 900        struct module_info *m;
 901
 902        while (*p != NULL) {
 903                parent = *p;
 904                m = rb_entry(parent, struct module_info, rb_node);
 905                if (strcmp(mi->name, m->name) < 0)
 906                        p = &(*p)->rb_left;
 907                else
 908                        p = &(*p)->rb_right;
 909        }
 910        rb_link_node(&mi->rb_node, parent, p);
 911        rb_insert_color(&mi->rb_node, modules);
 912}
 913
 914static void delete_modules(struct rb_root *modules)
 915{
 916        struct module_info *mi;
 917        struct rb_node *next = rb_first(modules);
 918
 919        while (next) {
 920                mi = rb_entry(next, struct module_info, rb_node);
 921                next = rb_next(&mi->rb_node);
 922                rb_erase(&mi->rb_node, modules);
 923                zfree(&mi->name);
 924                free(mi);
 925        }
 926}
 927
 928static struct module_info *find_module(const char *name,
 929                                       struct rb_root *modules)
 930{
 931        struct rb_node *n = modules->rb_node;
 932
 933        while (n) {
 934                struct module_info *m;
 935                int cmp;
 936
 937                m = rb_entry(n, struct module_info, rb_node);
 938                cmp = strcmp(name, m->name);
 939                if (cmp < 0)
 940                        n = n->rb_left;
 941                else if (cmp > 0)
 942                        n = n->rb_right;
 943                else
 944                        return m;
 945        }
 946
 947        return NULL;
 948}
 949
 950static int __read_proc_modules(void *arg, const char *name, u64 start,
 951                               u64 size __maybe_unused)
 952{
 953        struct rb_root *modules = arg;
 954        struct module_info *mi;
 955
 956        mi = zalloc(sizeof(struct module_info));
 957        if (!mi)
 958                return -ENOMEM;
 959
 960        mi->name = strdup(name);
 961        mi->start = start;
 962
 963        if (!mi->name) {
 964                free(mi);
 965                return -ENOMEM;
 966        }
 967
 968        add_module(mi, modules);
 969
 970        return 0;
 971}
 972
 973static int read_proc_modules(const char *filename, struct rb_root *modules)
 974{
 975        if (symbol__restricted_filename(filename, "/proc/modules"))
 976                return -1;
 977
 978        if (modules__parse(filename, modules, __read_proc_modules)) {
 979                delete_modules(modules);
 980                return -1;
 981        }
 982
 983        return 0;
 984}
 985
 986int compare_proc_modules(const char *from, const char *to)
 987{
 988        struct rb_root from_modules = RB_ROOT;
 989        struct rb_root to_modules = RB_ROOT;
 990        struct rb_node *from_node, *to_node;
 991        struct module_info *from_m, *to_m;
 992        int ret = -1;
 993
 994        if (read_proc_modules(from, &from_modules))
 995                return -1;
 996
 997        if (read_proc_modules(to, &to_modules))
 998                goto out_delete_from;
 999
1000        from_node = rb_first(&from_modules);
1001        to_node = rb_first(&to_modules);
1002        while (from_node) {
1003                if (!to_node)
1004                        break;
1005
1006                from_m = rb_entry(from_node, struct module_info, rb_node);
1007                to_m = rb_entry(to_node, struct module_info, rb_node);
1008
1009                if (from_m->start != to_m->start ||
1010                    strcmp(from_m->name, to_m->name))
1011                        break;
1012
1013                from_node = rb_next(from_node);
1014                to_node = rb_next(to_node);
1015        }
1016
1017        if (!from_node && !to_node)
1018                ret = 0;
1019
1020        delete_modules(&to_modules);
1021out_delete_from:
1022        delete_modules(&from_modules);
1023
1024        return ret;
1025}
1026
1027struct map *map_groups__first(struct map_groups *mg)
1028{
1029        return maps__first(&mg->maps);
1030}
1031
1032static int do_validate_kcore_modules(const char *filename,
1033                                  struct map_groups *kmaps)
1034{
1035        struct rb_root modules = RB_ROOT;
1036        struct map *old_map;
1037        int err;
1038
1039        err = read_proc_modules(filename, &modules);
1040        if (err)
1041                return err;
1042
1043        old_map = map_groups__first(kmaps);
1044        while (old_map) {
1045                struct map *next = map_groups__next(old_map);
1046                struct module_info *mi;
1047
1048                if (!__map__is_kmodule(old_map)) {
1049                        old_map = next;
1050                        continue;
1051                }
1052
1053                /* Module must be in memory at the same address */
1054                mi = find_module(old_map->dso->short_name, &modules);
1055                if (!mi || mi->start != old_map->start) {
1056                        err = -EINVAL;
1057                        goto out;
1058                }
1059
1060                old_map = next;
1061        }
1062out:
1063        delete_modules(&modules);
1064        return err;
1065}
1066
1067/*
1068 * If kallsyms is referenced by name then we look for filename in the same
1069 * directory.
1070 */
1071static bool filename_from_kallsyms_filename(char *filename,
1072                                            const char *base_name,
1073                                            const char *kallsyms_filename)
1074{
1075        char *name;
1076
1077        strcpy(filename, kallsyms_filename);
1078        name = strrchr(filename, '/');
1079        if (!name)
1080                return false;
1081
1082        name += 1;
1083
1084        if (!strcmp(name, "kallsyms")) {
1085                strcpy(name, base_name);
1086                return true;
1087        }
1088
1089        return false;
1090}
1091
1092static int validate_kcore_modules(const char *kallsyms_filename,
1093                                  struct map *map)
1094{
1095        struct map_groups *kmaps = map__kmaps(map);
1096        char modules_filename[PATH_MAX];
1097
1098        if (!kmaps)
1099                return -EINVAL;
1100
1101        if (!filename_from_kallsyms_filename(modules_filename, "modules",
1102                                             kallsyms_filename))
1103                return -EINVAL;
1104
1105        if (do_validate_kcore_modules(modules_filename, kmaps))
1106                return -EINVAL;
1107
1108        return 0;
1109}
1110
1111static int validate_kcore_addresses(const char *kallsyms_filename,
1112                                    struct map *map)
1113{
1114        struct kmap *kmap = map__kmap(map);
1115
1116        if (!kmap)
1117                return -EINVAL;
1118
1119        if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1120                u64 start;
1121
1122                if (kallsyms__get_function_start(kallsyms_filename,
1123                                                 kmap->ref_reloc_sym->name, &start))
1124                        return -ENOENT;
1125                if (start != kmap->ref_reloc_sym->addr)
1126                        return -EINVAL;
1127        }
1128
1129        return validate_kcore_modules(kallsyms_filename, map);
1130}
1131
1132struct kcore_mapfn_data {
1133        struct dso *dso;
1134        struct list_head maps;
1135};
1136
1137static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1138{
1139        struct kcore_mapfn_data *md = data;
1140        struct map *map;
1141
1142        map = map__new2(start, md->dso);
1143        if (map == NULL)
1144                return -ENOMEM;
1145
1146        map->end = map->start + len;
1147        map->pgoff = pgoff;
1148
1149        list_add(&map->node, &md->maps);
1150
1151        return 0;
1152}
1153
1154static int dso__load_kcore(struct dso *dso, struct map *map,
1155                           const char *kallsyms_filename)
1156{
1157        struct map_groups *kmaps = map__kmaps(map);
1158        struct kcore_mapfn_data md;
1159        struct map *old_map, *new_map, *replacement_map = NULL;
1160        struct machine *machine;
1161        bool is_64_bit;
1162        int err, fd;
1163        char kcore_filename[PATH_MAX];
1164        u64 stext;
1165
1166        if (!kmaps)
1167                return -EINVAL;
1168
1169        machine = kmaps->machine;
1170
1171        /* This function requires that the map is the kernel map */
1172        if (!__map__is_kernel(map))
1173                return -EINVAL;
1174
1175        if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1176                                             kallsyms_filename))
1177                return -EINVAL;
1178
1179        /* Modules and kernel must be present at their original addresses */
1180        if (validate_kcore_addresses(kallsyms_filename, map))
1181                return -EINVAL;
1182
1183        md.dso = dso;
1184        INIT_LIST_HEAD(&md.maps);
1185
1186        fd = open(kcore_filename, O_RDONLY);
1187        if (fd < 0) {
1188                pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1189                         kcore_filename);
1190                return -EINVAL;
1191        }
1192
1193        /* Read new maps into temporary lists */
1194        err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md,
1195                              &is_64_bit);
1196        if (err)
1197                goto out_err;
1198        dso->is_64_bit = is_64_bit;
1199
1200        if (list_empty(&md.maps)) {
1201                err = -EINVAL;
1202                goto out_err;
1203        }
1204
1205        /* Remove old maps */
1206        old_map = map_groups__first(kmaps);
1207        while (old_map) {
1208                struct map *next = map_groups__next(old_map);
1209
1210                if (old_map != map)
1211                        map_groups__remove(kmaps, old_map);
1212                old_map = next;
1213        }
1214        machine->trampolines_mapped = false;
1215
1216        /* Find the kernel map using the '_stext' symbol */
1217        if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1218                list_for_each_entry(new_map, &md.maps, node) {
1219                        if (stext >= new_map->start && stext < new_map->end) {
1220                                replacement_map = new_map;
1221                                break;
1222                        }
1223                }
1224        }
1225
1226        if (!replacement_map)
1227                replacement_map = list_entry(md.maps.next, struct map, node);
1228
1229        /* Add new maps */
1230        while (!list_empty(&md.maps)) {
1231                new_map = list_entry(md.maps.next, struct map, node);
1232                list_del_init(&new_map->node);
1233                if (new_map == replacement_map) {
1234                        map->start      = new_map->start;
1235                        map->end        = new_map->end;
1236                        map->pgoff      = new_map->pgoff;
1237                        map->map_ip     = new_map->map_ip;
1238                        map->unmap_ip   = new_map->unmap_ip;
1239                        /* Ensure maps are correctly ordered */
1240                        map__get(map);
1241                        map_groups__remove(kmaps, map);
1242                        map_groups__insert(kmaps, map);
1243                        map__put(map);
1244                } else {
1245                        map_groups__insert(kmaps, new_map);
1246                }
1247
1248                map__put(new_map);
1249        }
1250
1251        if (machine__is(machine, "x86_64")) {
1252                u64 addr;
1253
1254                /*
1255                 * If one of the corresponding symbols is there, assume the
1256                 * entry trampoline maps are too.
1257                 */
1258                if (!kallsyms__get_function_start(kallsyms_filename,
1259                                                  ENTRY_TRAMPOLINE_NAME,
1260                                                  &addr))
1261                        machine->trampolines_mapped = true;
1262        }
1263
1264        /*
1265         * Set the data type and long name so that kcore can be read via
1266         * dso__data_read_addr().
1267         */
1268        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1269                dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1270        else
1271                dso->binary_type = DSO_BINARY_TYPE__KCORE;
1272        dso__set_long_name(dso, strdup(kcore_filename), true);
1273
1274        close(fd);
1275
1276        if (map->prot & PROT_EXEC)
1277                pr_debug("Using %s for kernel object code\n", kcore_filename);
1278        else
1279                pr_debug("Using %s for kernel data\n", kcore_filename);
1280
1281        return 0;
1282
1283out_err:
1284        while (!list_empty(&md.maps)) {
1285                map = list_entry(md.maps.next, struct map, node);
1286                list_del_init(&map->node);
1287                map__put(map);
1288        }
1289        close(fd);
1290        return -EINVAL;
1291}
1292
1293/*
1294 * If the kernel is relocated at boot time, kallsyms won't match.  Compute the
1295 * delta based on the relocation reference symbol.
1296 */
1297static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1298{
1299        u64 addr;
1300
1301        if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1302                return 0;
1303
1304        if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1305                return -1;
1306
1307        *delta = addr - kmap->ref_reloc_sym->addr;
1308        return 0;
1309}
1310
1311int __dso__load_kallsyms(struct dso *dso, const char *filename,
1312                         struct map *map, bool no_kcore)
1313{
1314        struct kmap *kmap = map__kmap(map);
1315        u64 delta = 0;
1316
1317        if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1318                return -1;
1319
1320        if (!kmap || !kmap->kmaps)
1321                return -1;
1322
1323        if (dso__load_all_kallsyms(dso, filename) < 0)
1324                return -1;
1325
1326        if (kallsyms__delta(kmap, filename, &delta))
1327                return -1;
1328
1329        symbols__fixup_end(&dso->symbols);
1330        symbols__fixup_duplicate(&dso->symbols);
1331
1332        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1333                dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1334        else
1335                dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1336
1337        if (!no_kcore && !dso__load_kcore(dso, map, filename))
1338                return map_groups__split_kallsyms_for_kcore(kmap->kmaps, dso);
1339        else
1340                return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map);
1341}
1342
1343int dso__load_kallsyms(struct dso *dso, const char *filename,
1344                       struct map *map)
1345{
1346        return __dso__load_kallsyms(dso, filename, map, false);
1347}
1348
1349static int dso__load_perf_map(const char *map_path, struct dso *dso)
1350{
1351        char *line = NULL;
1352        size_t n;
1353        FILE *file;
1354        int nr_syms = 0;
1355
1356        file = fopen(map_path, "r");
1357        if (file == NULL)
1358                goto out_failure;
1359
1360        while (!feof(file)) {
1361                u64 start, size;
1362                struct symbol *sym;
1363                int line_len, len;
1364
1365                line_len = getline(&line, &n, file);
1366                if (line_len < 0)
1367                        break;
1368
1369                if (!line)
1370                        goto out_failure;
1371
1372                line[--line_len] = '\0'; /* \n */
1373
1374                len = hex2u64(line, &start);
1375
1376                len++;
1377                if (len + 2 >= line_len)
1378                        continue;
1379
1380                len += hex2u64(line + len, &size);
1381
1382                len++;
1383                if (len + 2 >= line_len)
1384                        continue;
1385
1386                sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1387
1388                if (sym == NULL)
1389                        goto out_delete_line;
1390
1391                symbols__insert(&dso->symbols, sym);
1392                nr_syms++;
1393        }
1394
1395        free(line);
1396        fclose(file);
1397
1398        return nr_syms;
1399
1400out_delete_line:
1401        free(line);
1402out_failure:
1403        return -1;
1404}
1405
1406static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1407                                           enum dso_binary_type type)
1408{
1409        switch (type) {
1410        case DSO_BINARY_TYPE__JAVA_JIT:
1411        case DSO_BINARY_TYPE__DEBUGLINK:
1412        case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1413        case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1414        case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1415        case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1416        case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1417                return !kmod && dso->kernel == DSO_TYPE_USER;
1418
1419        case DSO_BINARY_TYPE__KALLSYMS:
1420        case DSO_BINARY_TYPE__VMLINUX:
1421        case DSO_BINARY_TYPE__KCORE:
1422                return dso->kernel == DSO_TYPE_KERNEL;
1423
1424        case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1425        case DSO_BINARY_TYPE__GUEST_VMLINUX:
1426        case DSO_BINARY_TYPE__GUEST_KCORE:
1427                return dso->kernel == DSO_TYPE_GUEST_KERNEL;
1428
1429        case DSO_BINARY_TYPE__GUEST_KMODULE:
1430        case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1431        case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1432        case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1433                /*
1434                 * kernel modules know their symtab type - it's set when
1435                 * creating a module dso in machine__findnew_module_map().
1436                 */
1437                return kmod && dso->symtab_type == type;
1438
1439        case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1440        case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1441                return true;
1442
1443        case DSO_BINARY_TYPE__NOT_FOUND:
1444        default:
1445                return false;
1446        }
1447}
1448
1449/* Checks for the existence of the perf-<pid>.map file in two different
1450 * locations.  First, if the process is a separate mount namespace, check in
1451 * that namespace using the pid of the innermost pid namespace.  If's not in a
1452 * namespace, or the file can't be found there, try in the mount namespace of
1453 * the tracing process using our view of its pid.
1454 */
1455static int dso__find_perf_map(char *filebuf, size_t bufsz,
1456                              struct nsinfo **nsip)
1457{
1458        struct nscookie nsc;
1459        struct nsinfo *nsi;
1460        struct nsinfo *nnsi;
1461        int rc = -1;
1462
1463        nsi = *nsip;
1464
1465        if (nsi->need_setns) {
1466                snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid);
1467                nsinfo__mountns_enter(nsi, &nsc);
1468                rc = access(filebuf, R_OK);
1469                nsinfo__mountns_exit(&nsc);
1470                if (rc == 0)
1471                        return rc;
1472        }
1473
1474        nnsi = nsinfo__copy(nsi);
1475        if (nnsi) {
1476                nsinfo__put(nsi);
1477
1478                nnsi->need_setns = false;
1479                snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid);
1480                *nsip = nnsi;
1481                rc = 0;
1482        }
1483
1484        return rc;
1485}
1486
1487int dso__load(struct dso *dso, struct map *map)
1488{
1489        char *name;
1490        int ret = -1;
1491        u_int i;
1492        struct machine *machine;
1493        char *root_dir = (char *) "";
1494        int ss_pos = 0;
1495        struct symsrc ss_[2];
1496        struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1497        bool kmod;
1498        bool perfmap;
1499        unsigned char build_id[BUILD_ID_SIZE];
1500        struct nscookie nsc;
1501        char newmapname[PATH_MAX];
1502        const char *map_path = dso->long_name;
1503
1504        perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1505        if (perfmap) {
1506                if (dso->nsinfo && (dso__find_perf_map(newmapname,
1507                    sizeof(newmapname), &dso->nsinfo) == 0)) {
1508                        map_path = newmapname;
1509                }
1510        }
1511
1512        nsinfo__mountns_enter(dso->nsinfo, &nsc);
1513        pthread_mutex_lock(&dso->lock);
1514
1515        /* check again under the dso->lock */
1516        if (dso__loaded(dso)) {
1517                ret = 1;
1518                goto out;
1519        }
1520
1521        if (map->groups && map->groups->machine)
1522                machine = map->groups->machine;
1523        else
1524                machine = NULL;
1525
1526        if (dso->kernel) {
1527                if (dso->kernel == DSO_TYPE_KERNEL)
1528                        ret = dso__load_kernel_sym(dso, map);
1529                else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1530                        ret = dso__load_guest_kernel_sym(dso, map);
1531
1532                if (machine__is(machine, "x86_64"))
1533                        machine__map_x86_64_entry_trampolines(machine, dso);
1534                goto out;
1535        }
1536
1537        dso->adjust_symbols = 0;
1538
1539        if (perfmap) {
1540                struct stat st;
1541
1542                if (lstat(map_path, &st) < 0)
1543                        goto out;
1544
1545                if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) {
1546                        pr_warning("File %s not owned by current user or root, "
1547                                   "ignoring it (use -f to override).\n", map_path);
1548                        goto out;
1549                }
1550
1551                ret = dso__load_perf_map(map_path, dso);
1552                dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1553                                             DSO_BINARY_TYPE__NOT_FOUND;
1554                goto out;
1555        }
1556
1557        if (machine)
1558                root_dir = machine->root_dir;
1559
1560        name = malloc(PATH_MAX);
1561        if (!name)
1562                goto out;
1563
1564        kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1565                dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1566                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1567                dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1568
1569
1570        /*
1571         * Read the build id if possible. This is required for
1572         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1573         */
1574        if (!dso->has_build_id &&
1575            is_regular_file(dso->long_name)) {
1576            __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1577            if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0)
1578                dso__set_build_id(dso, build_id);
1579        }
1580
1581        /*
1582         * Iterate over candidate debug images.
1583         * Keep track of "interesting" ones (those which have a symtab, dynsym,
1584         * and/or opd section) for processing.
1585         */
1586        for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1587                struct symsrc *ss = &ss_[ss_pos];
1588                bool next_slot = false;
1589                bool is_reg;
1590                bool nsexit;
1591                int sirc = -1;
1592
1593                enum dso_binary_type symtab_type = binary_type_symtab[i];
1594
1595                nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1596                    symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1597
1598                if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1599                        continue;
1600
1601                if (dso__read_binary_type_filename(dso, symtab_type,
1602                                                   root_dir, name, PATH_MAX))
1603                        continue;
1604
1605                if (nsexit)
1606                        nsinfo__mountns_exit(&nsc);
1607
1608                is_reg = is_regular_file(name);
1609                if (is_reg)
1610                        sirc = symsrc__init(ss, dso, name, symtab_type);
1611
1612                if (nsexit)
1613                        nsinfo__mountns_enter(dso->nsinfo, &nsc);
1614
1615                if (!is_reg || sirc < 0)
1616                        continue;
1617
1618                if (!syms_ss && symsrc__has_symtab(ss)) {
1619                        syms_ss = ss;
1620                        next_slot = true;
1621                        if (!dso->symsrc_filename)
1622                                dso->symsrc_filename = strdup(name);
1623                }
1624
1625                if (!runtime_ss && symsrc__possibly_runtime(ss)) {
1626                        runtime_ss = ss;
1627                        next_slot = true;
1628                }
1629
1630                if (next_slot) {
1631                        ss_pos++;
1632
1633                        if (syms_ss && runtime_ss)
1634                                break;
1635                } else {
1636                        symsrc__destroy(ss);
1637                }
1638
1639        }
1640
1641        if (!runtime_ss && !syms_ss)
1642                goto out_free;
1643
1644        if (runtime_ss && !syms_ss) {
1645                syms_ss = runtime_ss;
1646        }
1647
1648        /* We'll have to hope for the best */
1649        if (!runtime_ss && syms_ss)
1650                runtime_ss = syms_ss;
1651
1652        if (syms_ss)
1653                ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
1654        else
1655                ret = -1;
1656
1657        if (ret > 0) {
1658                int nr_plt;
1659
1660                nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
1661                if (nr_plt > 0)
1662                        ret += nr_plt;
1663        }
1664
1665        for (; ss_pos > 0; ss_pos--)
1666                symsrc__destroy(&ss_[ss_pos - 1]);
1667out_free:
1668        free(name);
1669        if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
1670                ret = 0;
1671out:
1672        dso__set_loaded(dso);
1673        pthread_mutex_unlock(&dso->lock);
1674        nsinfo__mountns_exit(&nsc);
1675
1676        return ret;
1677}
1678
1679struct map *map_groups__find_by_name(struct map_groups *mg, const char *name)
1680{
1681        struct maps *maps = &mg->maps;
1682        struct map *map;
1683
1684        down_read(&maps->lock);
1685
1686        for (map = maps__first(maps); map; map = map__next(map)) {
1687                if (map->dso && strcmp(map->dso->short_name, name) == 0)
1688                        goto out_unlock;
1689        }
1690
1691        map = NULL;
1692
1693out_unlock:
1694        up_read(&maps->lock);
1695        return map;
1696}
1697
1698int dso__load_vmlinux(struct dso *dso, struct map *map,
1699                      const char *vmlinux, bool vmlinux_allocated)
1700{
1701        int err = -1;
1702        struct symsrc ss;
1703        char symfs_vmlinux[PATH_MAX];
1704        enum dso_binary_type symtab_type;
1705
1706        if (vmlinux[0] == '/')
1707                snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
1708        else
1709                symbol__join_symfs(symfs_vmlinux, vmlinux);
1710
1711        if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1712                symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1713        else
1714                symtab_type = DSO_BINARY_TYPE__VMLINUX;
1715
1716        if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
1717                return -1;
1718
1719        err = dso__load_sym(dso, map, &ss, &ss, 0);
1720        symsrc__destroy(&ss);
1721
1722        if (err > 0) {
1723                if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
1724                        dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
1725                else
1726                        dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
1727                dso__set_long_name(dso, vmlinux, vmlinux_allocated);
1728                dso__set_loaded(dso);
1729                pr_debug("Using %s for symbols\n", symfs_vmlinux);
1730        }
1731
1732        return err;
1733}
1734
1735int dso__load_vmlinux_path(struct dso *dso, struct map *map)
1736{
1737        int i, err = 0;
1738        char *filename = NULL;
1739
1740        pr_debug("Looking at the vmlinux_path (%d entries long)\n",
1741                 vmlinux_path__nr_entries + 1);
1742
1743        for (i = 0; i < vmlinux_path__nr_entries; ++i) {
1744                err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
1745                if (err > 0)
1746                        goto out;
1747        }
1748
1749        if (!symbol_conf.ignore_vmlinux_buildid)
1750                filename = dso__build_id_filename(dso, NULL, 0, false);
1751        if (filename != NULL) {
1752                err = dso__load_vmlinux(dso, map, filename, true);
1753                if (err > 0)
1754                        goto out;
1755                free(filename);
1756        }
1757out:
1758        return err;
1759}
1760
1761static bool visible_dir_filter(const char *name, struct dirent *d)
1762{
1763        if (d->d_type != DT_DIR)
1764                return false;
1765        return lsdir_no_dot_filter(name, d);
1766}
1767
1768static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
1769{
1770        char kallsyms_filename[PATH_MAX];
1771        int ret = -1;
1772        struct strlist *dirs;
1773        struct str_node *nd;
1774
1775        dirs = lsdir(dir, visible_dir_filter);
1776        if (!dirs)
1777                return -1;
1778
1779        strlist__for_each_entry(nd, dirs) {
1780                scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
1781                          "%s/%s/kallsyms", dir, nd->s);
1782                if (!validate_kcore_addresses(kallsyms_filename, map)) {
1783                        strlcpy(dir, kallsyms_filename, dir_sz);
1784                        ret = 0;
1785                        break;
1786                }
1787        }
1788
1789        strlist__delete(dirs);
1790
1791        return ret;
1792}
1793
1794/*
1795 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
1796 * since access(R_OK) only checks with real UID/GID but open() use effective
1797 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
1798 */
1799static bool filename__readable(const char *file)
1800{
1801        int fd = open(file, O_RDONLY);
1802        if (fd < 0)
1803                return false;
1804        close(fd);
1805        return true;
1806}
1807
1808static char *dso__find_kallsyms(struct dso *dso, struct map *map)
1809{
1810        u8 host_build_id[BUILD_ID_SIZE];
1811        char sbuild_id[SBUILD_ID_SIZE];
1812        bool is_host = false;
1813        char path[PATH_MAX];
1814
1815        if (!dso->has_build_id) {
1816                /*
1817                 * Last resort, if we don't have a build-id and couldn't find
1818                 * any vmlinux file, try the running kernel kallsyms table.
1819                 */
1820                goto proc_kallsyms;
1821        }
1822
1823        if (sysfs__read_build_id("/sys/kernel/notes", host_build_id,
1824                                 sizeof(host_build_id)) == 0)
1825                is_host = dso__build_id_equal(dso, host_build_id);
1826
1827        /* Try a fast path for /proc/kallsyms if possible */
1828        if (is_host) {
1829                /*
1830                 * Do not check the build-id cache, unless we know we cannot use
1831                 * /proc/kcore or module maps don't match to /proc/kallsyms.
1832                 * To check readability of /proc/kcore, do not use access(R_OK)
1833                 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
1834                 * can't check it.
1835                 */
1836                if (filename__readable("/proc/kcore") &&
1837                    !validate_kcore_addresses("/proc/kallsyms", map))
1838                        goto proc_kallsyms;
1839        }
1840
1841        build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1842
1843        /* Find kallsyms in build-id cache with kcore */
1844        scnprintf(path, sizeof(path), "%s/%s/%s",
1845                  buildid_dir, DSO__NAME_KCORE, sbuild_id);
1846
1847        if (!find_matching_kcore(map, path, sizeof(path)))
1848                return strdup(path);
1849
1850        /* Use current /proc/kallsyms if possible */
1851        if (is_host) {
1852proc_kallsyms:
1853                return strdup("/proc/kallsyms");
1854        }
1855
1856        /* Finally, find a cache of kallsyms */
1857        if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
1858                pr_err("No kallsyms or vmlinux with build-id %s was found\n",
1859                       sbuild_id);
1860                return NULL;
1861        }
1862
1863        return strdup(path);
1864}
1865
1866static int dso__load_kernel_sym(struct dso *dso, struct map *map)
1867{
1868        int err;
1869        const char *kallsyms_filename = NULL;
1870        char *kallsyms_allocated_filename = NULL;
1871        /*
1872         * Step 1: if the user specified a kallsyms or vmlinux filename, use
1873         * it and only it, reporting errors to the user if it cannot be used.
1874         *
1875         * For instance, try to analyse an ARM perf.data file _without_ a
1876         * build-id, or if the user specifies the wrong path to the right
1877         * vmlinux file, obviously we can't fallback to another vmlinux (a
1878         * x86_86 one, on the machine where analysis is being performed, say),
1879         * or worse, /proc/kallsyms.
1880         *
1881         * If the specified file _has_ a build-id and there is a build-id
1882         * section in the perf.data file, we will still do the expected
1883         * validation in dso__load_vmlinux and will bail out if they don't
1884         * match.
1885         */
1886        if (symbol_conf.kallsyms_name != NULL) {
1887                kallsyms_filename = symbol_conf.kallsyms_name;
1888                goto do_kallsyms;
1889        }
1890
1891        if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
1892                return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
1893        }
1894
1895        if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
1896                err = dso__load_vmlinux_path(dso, map);
1897                if (err > 0)
1898                        return err;
1899        }
1900
1901        /* do not try local files if a symfs was given */
1902        if (symbol_conf.symfs[0] != 0)
1903                return -1;
1904
1905        kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
1906        if (!kallsyms_allocated_filename)
1907                return -1;
1908
1909        kallsyms_filename = kallsyms_allocated_filename;
1910
1911do_kallsyms:
1912        err = dso__load_kallsyms(dso, kallsyms_filename, map);
1913        if (err > 0)
1914                pr_debug("Using %s for symbols\n", kallsyms_filename);
1915        free(kallsyms_allocated_filename);
1916
1917        if (err > 0 && !dso__is_kcore(dso)) {
1918                dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
1919                dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
1920                map__fixup_start(map);
1921                map__fixup_end(map);
1922        }
1923
1924        return err;
1925}
1926
1927static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
1928{
1929        int err;
1930        const char *kallsyms_filename = NULL;
1931        struct machine *machine;
1932        char path[PATH_MAX];
1933
1934        if (!map->groups) {
1935                pr_debug("Guest kernel map hasn't the point to groups\n");
1936                return -1;
1937        }
1938        machine = map->groups->machine;
1939
1940        if (machine__is_default_guest(machine)) {
1941                /*
1942                 * if the user specified a vmlinux filename, use it and only
1943                 * it, reporting errors to the user if it cannot be used.
1944                 * Or use file guest_kallsyms inputted by user on commandline
1945                 */
1946                if (symbol_conf.default_guest_vmlinux_name != NULL) {
1947                        err = dso__load_vmlinux(dso, map,
1948                                                symbol_conf.default_guest_vmlinux_name,
1949                                                false);
1950                        return err;
1951                }
1952
1953                kallsyms_filename = symbol_conf.default_guest_kallsyms;
1954                if (!kallsyms_filename)
1955                        return -1;
1956        } else {
1957                sprintf(path, "%s/proc/kallsyms", machine->root_dir);
1958                kallsyms_filename = path;
1959        }
1960
1961        err = dso__load_kallsyms(dso, kallsyms_filename, map);
1962        if (err > 0)
1963                pr_debug("Using %s for symbols\n", kallsyms_filename);
1964        if (err > 0 && !dso__is_kcore(dso)) {
1965                dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1966                dso__set_long_name(dso, machine->mmap_name, false);
1967                map__fixup_start(map);
1968                map__fixup_end(map);
1969        }
1970
1971        return err;
1972}
1973
1974static void vmlinux_path__exit(void)
1975{
1976        while (--vmlinux_path__nr_entries >= 0)
1977                zfree(&vmlinux_path[vmlinux_path__nr_entries]);
1978        vmlinux_path__nr_entries = 0;
1979
1980        zfree(&vmlinux_path);
1981}
1982
1983static const char * const vmlinux_paths[] = {
1984        "vmlinux",
1985        "/boot/vmlinux"
1986};
1987
1988static const char * const vmlinux_paths_upd[] = {
1989        "/boot/vmlinux-%s",
1990        "/usr/lib/debug/boot/vmlinux-%s",
1991        "/lib/modules/%s/build/vmlinux",
1992        "/usr/lib/debug/lib/modules/%s/vmlinux",
1993        "/usr/lib/debug/boot/vmlinux-%s.debug"
1994};
1995
1996static int vmlinux_path__add(const char *new_entry)
1997{
1998        vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
1999        if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2000                return -1;
2001        ++vmlinux_path__nr_entries;
2002
2003        return 0;
2004}
2005
2006static int vmlinux_path__init(struct perf_env *env)
2007{
2008        struct utsname uts;
2009        char bf[PATH_MAX];
2010        char *kernel_version;
2011        unsigned int i;
2012
2013        vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2014                              ARRAY_SIZE(vmlinux_paths_upd)));
2015        if (vmlinux_path == NULL)
2016                return -1;
2017
2018        for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2019                if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2020                        goto out_fail;
2021
2022        /* only try kernel version if no symfs was given */
2023        if (symbol_conf.symfs[0] != 0)
2024                return 0;
2025
2026        if (env) {
2027                kernel_version = env->os_release;
2028        } else {
2029                if (uname(&uts) < 0)
2030                        goto out_fail;
2031
2032                kernel_version = uts.release;
2033        }
2034
2035        for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2036                snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2037                if (vmlinux_path__add(bf) < 0)
2038                        goto out_fail;
2039        }
2040
2041        return 0;
2042
2043out_fail:
2044        vmlinux_path__exit();
2045        return -1;
2046}
2047
2048int setup_list(struct strlist **list, const char *list_str,
2049                      const char *list_name)
2050{
2051        if (list_str == NULL)
2052                return 0;
2053
2054        *list = strlist__new(list_str, NULL);
2055        if (!*list) {
2056                pr_err("problems parsing %s list\n", list_name);
2057                return -1;
2058        }
2059
2060        symbol_conf.has_filter = true;
2061        return 0;
2062}
2063
2064int setup_intlist(struct intlist **list, const char *list_str,
2065                  const char *list_name)
2066{
2067        if (list_str == NULL)
2068                return 0;
2069
2070        *list = intlist__new(list_str);
2071        if (!*list) {
2072                pr_err("problems parsing %s list\n", list_name);
2073                return -1;
2074        }
2075        return 0;
2076}
2077
2078static bool symbol__read_kptr_restrict(void)
2079{
2080        bool value = false;
2081        FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2082
2083        if (fp != NULL) {
2084                char line[8];
2085
2086                if (fgets(line, sizeof(line), fp) != NULL)
2087                        value = ((geteuid() != 0) || (getuid() != 0)) ?
2088                                        (atoi(line) != 0) :
2089                                        (atoi(line) == 2);
2090
2091                fclose(fp);
2092        }
2093
2094        return value;
2095}
2096
2097int symbol__annotation_init(void)
2098{
2099        if (symbol_conf.init_annotation)
2100                return 0;
2101
2102        if (symbol_conf.initialized) {
2103                pr_err("Annotation needs to be init before symbol__init()\n");
2104                return -1;
2105        }
2106
2107        symbol_conf.priv_size += sizeof(struct annotation);
2108        symbol_conf.init_annotation = true;
2109        return 0;
2110}
2111
2112int symbol__init(struct perf_env *env)
2113{
2114        const char *symfs;
2115
2116        if (symbol_conf.initialized)
2117                return 0;
2118
2119        symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2120
2121        symbol__elf_init();
2122
2123        if (symbol_conf.sort_by_name)
2124                symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) -
2125                                          sizeof(struct symbol));
2126
2127        if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2128                return -1;
2129
2130        if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2131                pr_err("'.' is the only non valid --field-separator argument\n");
2132                return -1;
2133        }
2134
2135        if (setup_list(&symbol_conf.dso_list,
2136                       symbol_conf.dso_list_str, "dso") < 0)
2137                return -1;
2138
2139        if (setup_list(&symbol_conf.comm_list,
2140                       symbol_conf.comm_list_str, "comm") < 0)
2141                goto out_free_dso_list;
2142
2143        if (setup_intlist(&symbol_conf.pid_list,
2144                       symbol_conf.pid_list_str, "pid") < 0)
2145                goto out_free_comm_list;
2146
2147        if (setup_intlist(&symbol_conf.tid_list,
2148                       symbol_conf.tid_list_str, "tid") < 0)
2149                goto out_free_pid_list;
2150
2151        if (setup_list(&symbol_conf.sym_list,
2152                       symbol_conf.sym_list_str, "symbol") < 0)
2153                goto out_free_tid_list;
2154
2155        if (setup_list(&symbol_conf.bt_stop_list,
2156                       symbol_conf.bt_stop_list_str, "symbol") < 0)
2157                goto out_free_sym_list;
2158
2159        /*
2160         * A path to symbols of "/" is identical to ""
2161         * reset here for simplicity.
2162         */
2163        symfs = realpath(symbol_conf.symfs, NULL);
2164        if (symfs == NULL)
2165                symfs = symbol_conf.symfs;
2166        if (strcmp(symfs, "/") == 0)
2167                symbol_conf.symfs = "";
2168        if (symfs != symbol_conf.symfs)
2169                free((void *)symfs);
2170
2171        symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2172
2173        symbol_conf.initialized = true;
2174        return 0;
2175
2176out_free_sym_list:
2177        strlist__delete(symbol_conf.sym_list);
2178out_free_tid_list:
2179        intlist__delete(symbol_conf.tid_list);
2180out_free_pid_list:
2181        intlist__delete(symbol_conf.pid_list);
2182out_free_comm_list:
2183        strlist__delete(symbol_conf.comm_list);
2184out_free_dso_list:
2185        strlist__delete(symbol_conf.dso_list);
2186        return -1;
2187}
2188
2189void symbol__exit(void)
2190{
2191        if (!symbol_conf.initialized)
2192                return;
2193        strlist__delete(symbol_conf.bt_stop_list);
2194        strlist__delete(symbol_conf.sym_list);
2195        strlist__delete(symbol_conf.dso_list);
2196        strlist__delete(symbol_conf.comm_list);
2197        intlist__delete(symbol_conf.tid_list);
2198        intlist__delete(symbol_conf.pid_list);
2199        vmlinux_path__exit();
2200        symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2201        symbol_conf.bt_stop_list = NULL;
2202        symbol_conf.initialized = false;
2203}
2204
2205int symbol__config_symfs(const struct option *opt __maybe_unused,
2206                         const char *dir, int unset __maybe_unused)
2207{
2208        char *bf = NULL;
2209        int ret;
2210
2211        symbol_conf.symfs = strdup(dir);
2212        if (symbol_conf.symfs == NULL)
2213                return -ENOMEM;
2214
2215        /* skip the locally configured cache if a symfs is given, and
2216         * config buildid dir to symfs/.debug
2217         */
2218        ret = asprintf(&bf, "%s/%s", dir, ".debug");
2219        if (ret < 0)
2220                return -ENOMEM;
2221
2222        set_buildid_dir(bf);
2223
2224        free(bf);
2225        return 0;
2226}
2227
2228struct mem_info *mem_info__get(struct mem_info *mi)
2229{
2230        if (mi)
2231                refcount_inc(&mi->refcnt);
2232        return mi;
2233}
2234
2235void mem_info__put(struct mem_info *mi)
2236{
2237        if (mi && refcount_dec_and_test(&mi->refcnt))
2238                free(mi);
2239}
2240
2241struct mem_info *mem_info__new(void)
2242{
2243        struct mem_info *mi = zalloc(sizeof(*mi));
2244
2245        if (mi)
2246                refcount_set(&mi->refcnt, 1);
2247        return mi;
2248}
2249