linux/tools/perf/util/machine.c
<<
>>
Prefs
   1#include "debug.h"
   2#include "event.h"
   3#include "machine.h"
   4#include "map.h"
   5#include "strlist.h"
   6#include "thread.h"
   7#include <stdbool.h>
   8
   9int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
  10{
  11        map_groups__init(&machine->kmaps);
  12        RB_CLEAR_NODE(&machine->rb_node);
  13        INIT_LIST_HEAD(&machine->user_dsos);
  14        INIT_LIST_HEAD(&machine->kernel_dsos);
  15
  16        machine->threads = RB_ROOT;
  17        INIT_LIST_HEAD(&machine->dead_threads);
  18        machine->last_match = NULL;
  19
  20        machine->kmaps.machine = machine;
  21        machine->pid = pid;
  22
  23        machine->root_dir = strdup(root_dir);
  24        if (machine->root_dir == NULL)
  25                return -ENOMEM;
  26
  27        if (pid != HOST_KERNEL_ID) {
  28                struct thread *thread = machine__findnew_thread(machine, pid);
  29                char comm[64];
  30
  31                if (thread == NULL)
  32                        return -ENOMEM;
  33
  34                snprintf(comm, sizeof(comm), "[guest/%d]", pid);
  35                thread__set_comm(thread, comm);
  36        }
  37
  38        return 0;
  39}
  40
  41static void dsos__delete(struct list_head *dsos)
  42{
  43        struct dso *pos, *n;
  44
  45        list_for_each_entry_safe(pos, n, dsos, node) {
  46                list_del(&pos->node);
  47                dso__delete(pos);
  48        }
  49}
  50
  51void machine__exit(struct machine *machine)
  52{
  53        map_groups__exit(&machine->kmaps);
  54        dsos__delete(&machine->user_dsos);
  55        dsos__delete(&machine->kernel_dsos);
  56        free(machine->root_dir);
  57        machine->root_dir = NULL;
  58}
  59
  60void machine__delete(struct machine *machine)
  61{
  62        machine__exit(machine);
  63        free(machine);
  64}
  65
  66struct machine *machines__add(struct rb_root *machines, pid_t pid,
  67                              const char *root_dir)
  68{
  69        struct rb_node **p = &machines->rb_node;
  70        struct rb_node *parent = NULL;
  71        struct machine *pos, *machine = malloc(sizeof(*machine));
  72
  73        if (machine == NULL)
  74                return NULL;
  75
  76        if (machine__init(machine, root_dir, pid) != 0) {
  77                free(machine);
  78                return NULL;
  79        }
  80
  81        while (*p != NULL) {
  82                parent = *p;
  83                pos = rb_entry(parent, struct machine, rb_node);
  84                if (pid < pos->pid)
  85                        p = &(*p)->rb_left;
  86                else
  87                        p = &(*p)->rb_right;
  88        }
  89
  90        rb_link_node(&machine->rb_node, parent, p);
  91        rb_insert_color(&machine->rb_node, machines);
  92
  93        return machine;
  94}
  95
  96struct machine *machines__find(struct rb_root *machines, pid_t pid)
  97{
  98        struct rb_node **p = &machines->rb_node;
  99        struct rb_node *parent = NULL;
 100        struct machine *machine;
 101        struct machine *default_machine = NULL;
 102
 103        while (*p != NULL) {
 104                parent = *p;
 105                machine = rb_entry(parent, struct machine, rb_node);
 106                if (pid < machine->pid)
 107                        p = &(*p)->rb_left;
 108                else if (pid > machine->pid)
 109                        p = &(*p)->rb_right;
 110                else
 111                        return machine;
 112                if (!machine->pid)
 113                        default_machine = machine;
 114        }
 115
 116        return default_machine;
 117}
 118
 119struct machine *machines__findnew(struct rb_root *machines, pid_t pid)
 120{
 121        char path[PATH_MAX];
 122        const char *root_dir = "";
 123        struct machine *machine = machines__find(machines, pid);
 124
 125        if (machine && (machine->pid == pid))
 126                goto out;
 127
 128        if ((pid != HOST_KERNEL_ID) &&
 129            (pid != DEFAULT_GUEST_KERNEL_ID) &&
 130            (symbol_conf.guestmount)) {
 131                sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
 132                if (access(path, R_OK)) {
 133                        static struct strlist *seen;
 134
 135                        if (!seen)
 136                                seen = strlist__new(true, NULL);
 137
 138                        if (!strlist__has_entry(seen, path)) {
 139                                pr_err("Can't access file %s\n", path);
 140                                strlist__add(seen, path);
 141                        }
 142                        machine = NULL;
 143                        goto out;
 144                }
 145                root_dir = path;
 146        }
 147
 148        machine = machines__add(machines, pid, root_dir);
 149out:
 150        return machine;
 151}
 152
 153void machines__process(struct rb_root *machines,
 154                       machine__process_t process, void *data)
 155{
 156        struct rb_node *nd;
 157
 158        for (nd = rb_first(machines); nd; nd = rb_next(nd)) {
 159                struct machine *pos = rb_entry(nd, struct machine, rb_node);
 160                process(pos, data);
 161        }
 162}
 163
 164char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
 165{
 166        if (machine__is_host(machine))
 167                snprintf(bf, size, "[%s]", "kernel.kallsyms");
 168        else if (machine__is_default_guest(machine))
 169                snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
 170        else {
 171                snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
 172                         machine->pid);
 173        }
 174
 175        return bf;
 176}
 177
 178void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size)
 179{
 180        struct rb_node *node;
 181        struct machine *machine;
 182
 183        for (node = rb_first(machines); node; node = rb_next(node)) {
 184                machine = rb_entry(node, struct machine, rb_node);
 185                machine->id_hdr_size = id_hdr_size;
 186        }
 187
 188        return;
 189}
 190
 191static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid,
 192                                                bool create)
 193{
 194        struct rb_node **p = &machine->threads.rb_node;
 195        struct rb_node *parent = NULL;
 196        struct thread *th;
 197
 198        /*
 199         * Font-end cache - PID lookups come in blocks,
 200         * so most of the time we dont have to look up
 201         * the full rbtree:
 202         */
 203        if (machine->last_match && machine->last_match->pid == pid)
 204                return machine->last_match;
 205
 206        while (*p != NULL) {
 207                parent = *p;
 208                th = rb_entry(parent, struct thread, rb_node);
 209
 210                if (th->pid == pid) {
 211                        machine->last_match = th;
 212                        return th;
 213                }
 214
 215                if (pid < th->pid)
 216                        p = &(*p)->rb_left;
 217                else
 218                        p = &(*p)->rb_right;
 219        }
 220
 221        if (!create)
 222                return NULL;
 223
 224        th = thread__new(pid);
 225        if (th != NULL) {
 226                rb_link_node(&th->rb_node, parent, p);
 227                rb_insert_color(&th->rb_node, &machine->threads);
 228                machine->last_match = th;
 229        }
 230
 231        return th;
 232}
 233
 234struct thread *machine__findnew_thread(struct machine *machine, pid_t pid)
 235{
 236        return __machine__findnew_thread(machine, pid, true);
 237}
 238
 239struct thread *machine__find_thread(struct machine *machine, pid_t pid)
 240{
 241        return __machine__findnew_thread(machine, pid, false);
 242}
 243
 244int machine__process_comm_event(struct machine *machine, union perf_event *event)
 245{
 246        struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
 247
 248        if (dump_trace)
 249                perf_event__fprintf_comm(event, stdout);
 250
 251        if (thread == NULL || thread__set_comm(thread, event->comm.comm)) {
 252                dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
 253                return -1;
 254        }
 255
 256        return 0;
 257}
 258
 259int machine__process_lost_event(struct machine *machine __maybe_unused,
 260                                union perf_event *event)
 261{
 262        dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
 263                    event->lost.id, event->lost.lost);
 264        return 0;
 265}
 266
 267static void machine__set_kernel_mmap_len(struct machine *machine,
 268                                         union perf_event *event)
 269{
 270        int i;
 271
 272        for (i = 0; i < MAP__NR_TYPES; i++) {
 273                machine->vmlinux_maps[i]->start = event->mmap.start;
 274                machine->vmlinux_maps[i]->end   = (event->mmap.start +
 275                                                   event->mmap.len);
 276                /*
 277                 * Be a bit paranoid here, some perf.data file came with
 278                 * a zero sized synthesized MMAP event for the kernel.
 279                 */
 280                if (machine->vmlinux_maps[i]->end == 0)
 281                        machine->vmlinux_maps[i]->end = ~0ULL;
 282        }
 283}
 284
 285static int machine__process_kernel_mmap_event(struct machine *machine,
 286                                              union perf_event *event)
 287{
 288        struct map *map;
 289        char kmmap_prefix[PATH_MAX];
 290        enum dso_kernel_type kernel_type;
 291        bool is_kernel_mmap;
 292
 293        machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
 294        if (machine__is_host(machine))
 295                kernel_type = DSO_TYPE_KERNEL;
 296        else
 297                kernel_type = DSO_TYPE_GUEST_KERNEL;
 298
 299        is_kernel_mmap = memcmp(event->mmap.filename,
 300                                kmmap_prefix,
 301                                strlen(kmmap_prefix) - 1) == 0;
 302        if (event->mmap.filename[0] == '/' ||
 303            (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
 304
 305                char short_module_name[1024];
 306                char *name, *dot;
 307
 308                if (event->mmap.filename[0] == '/') {
 309                        name = strrchr(event->mmap.filename, '/');
 310                        if (name == NULL)
 311                                goto out_problem;
 312
 313                        ++name; /* skip / */
 314                        dot = strrchr(name, '.');
 315                        if (dot == NULL)
 316                                goto out_problem;
 317                        snprintf(short_module_name, sizeof(short_module_name),
 318                                        "[%.*s]", (int)(dot - name), name);
 319                        strxfrchar(short_module_name, '-', '_');
 320                } else
 321                        strcpy(short_module_name, event->mmap.filename);
 322
 323                map = machine__new_module(machine, event->mmap.start,
 324                                          event->mmap.filename);
 325                if (map == NULL)
 326                        goto out_problem;
 327
 328                name = strdup(short_module_name);
 329                if (name == NULL)
 330                        goto out_problem;
 331
 332                map->dso->short_name = name;
 333                map->dso->sname_alloc = 1;
 334                map->end = map->start + event->mmap.len;
 335        } else if (is_kernel_mmap) {
 336                const char *symbol_name = (event->mmap.filename +
 337                                strlen(kmmap_prefix));
 338                /*
 339                 * Should be there already, from the build-id table in
 340                 * the header.
 341                 */
 342                struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
 343                                                     kmmap_prefix);
 344                if (kernel == NULL)
 345                        goto out_problem;
 346
 347                kernel->kernel = kernel_type;
 348                if (__machine__create_kernel_maps(machine, kernel) < 0)
 349                        goto out_problem;
 350
 351                machine__set_kernel_mmap_len(machine, event);
 352
 353                /*
 354                 * Avoid using a zero address (kptr_restrict) for the ref reloc
 355                 * symbol. Effectively having zero here means that at record
 356                 * time /proc/sys/kernel/kptr_restrict was non zero.
 357                 */
 358                if (event->mmap.pgoff != 0) {
 359                        maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
 360                                                         symbol_name,
 361                                                         event->mmap.pgoff);
 362                }
 363
 364                if (machine__is_default_guest(machine)) {
 365                        /*
 366                         * preload dso of guest kernel and modules
 367                         */
 368                        dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
 369                                  NULL);
 370                }
 371        }
 372        return 0;
 373out_problem:
 374        return -1;
 375}
 376
 377int machine__process_mmap_event(struct machine *machine, union perf_event *event)
 378{
 379        u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 380        struct thread *thread;
 381        struct map *map;
 382        int ret = 0;
 383
 384        if (dump_trace)
 385                perf_event__fprintf_mmap(event, stdout);
 386
 387        if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
 388            cpumode == PERF_RECORD_MISC_KERNEL) {
 389                ret = machine__process_kernel_mmap_event(machine, event);
 390                if (ret < 0)
 391                        goto out_problem;
 392                return 0;
 393        }
 394
 395        thread = machine__findnew_thread(machine, event->mmap.pid);
 396        if (thread == NULL)
 397                goto out_problem;
 398        map = map__new(&machine->user_dsos, event->mmap.start,
 399                        event->mmap.len, event->mmap.pgoff,
 400                        event->mmap.pid, event->mmap.filename,
 401                        MAP__FUNCTION);
 402        if (map == NULL)
 403                goto out_problem;
 404
 405        thread__insert_map(thread, map);
 406        return 0;
 407
 408out_problem:
 409        dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
 410        return 0;
 411}
 412
 413int machine__process_fork_event(struct machine *machine, union perf_event *event)
 414{
 415        struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
 416        struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
 417
 418        if (dump_trace)
 419                perf_event__fprintf_task(event, stdout);
 420
 421        if (thread == NULL || parent == NULL ||
 422            thread__fork(thread, parent) < 0) {
 423                dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
 424                return -1;
 425        }
 426
 427        return 0;
 428}
 429
 430int machine__process_exit_event(struct machine *machine, union perf_event *event)
 431{
 432        struct thread *thread = machine__find_thread(machine, event->fork.tid);
 433
 434        if (dump_trace)
 435                perf_event__fprintf_task(event, stdout);
 436
 437        if (thread != NULL)
 438                machine__remove_thread(machine, thread);
 439
 440        return 0;
 441}
 442
 443int machine__process_event(struct machine *machine, union perf_event *event)
 444{
 445        int ret;
 446
 447        switch (event->header.type) {
 448        case PERF_RECORD_COMM:
 449                ret = machine__process_comm_event(machine, event); break;
 450        case PERF_RECORD_MMAP:
 451                ret = machine__process_mmap_event(machine, event); break;
 452        case PERF_RECORD_FORK:
 453                ret = machine__process_fork_event(machine, event); break;
 454        case PERF_RECORD_EXIT:
 455                ret = machine__process_exit_event(machine, event); break;
 456        case PERF_RECORD_LOST:
 457                ret = machine__process_lost_event(machine, event); break;
 458        default:
 459                ret = -1;
 460                break;
 461        }
 462
 463        return ret;
 464}
 465