linux/tools/perf/util/event.c
<<
>>
Prefs
   1#include <dirent.h>
   2#include <errno.h>
   3#include <inttypes.h>
   4#include <linux/kernel.h>
   5#include <linux/types.h>
   6#include <sys/types.h>
   7#include <sys/stat.h>
   8#include <unistd.h>
   9#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  10#include <api/fs/fs.h>
  11#include <linux/perf_event.h>
  12#include "event.h"
  13#include "debug.h"
  14#include "hist.h"
  15#include "machine.h"
  16#include "sort.h"
  17#include "string2.h"
  18#include "strlist.h"
  19#include "thread.h"
  20#include "thread_map.h"
  21#include "sane_ctype.h"
  22#include "symbol/kallsyms.h"
  23#include "asm/bug.h"
  24#include "stat.h"
  25
  26static const char *perf_event__names[] = {
  27        [0]                                     = "TOTAL",
  28        [PERF_RECORD_MMAP]                      = "MMAP",
  29        [PERF_RECORD_MMAP2]                     = "MMAP2",
  30        [PERF_RECORD_LOST]                      = "LOST",
  31        [PERF_RECORD_COMM]                      = "COMM",
  32        [PERF_RECORD_EXIT]                      = "EXIT",
  33        [PERF_RECORD_THROTTLE]                  = "THROTTLE",
  34        [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
  35        [PERF_RECORD_FORK]                      = "FORK",
  36        [PERF_RECORD_READ]                      = "READ",
  37        [PERF_RECORD_SAMPLE]                    = "SAMPLE",
  38        [PERF_RECORD_AUX]                       = "AUX",
  39        [PERF_RECORD_ITRACE_START]              = "ITRACE_START",
  40        [PERF_RECORD_LOST_SAMPLES]              = "LOST_SAMPLES",
  41        [PERF_RECORD_SWITCH]                    = "SWITCH",
  42        [PERF_RECORD_SWITCH_CPU_WIDE]           = "SWITCH_CPU_WIDE",
  43        [PERF_RECORD_NAMESPACES]                = "NAMESPACES",
  44        [PERF_RECORD_HEADER_ATTR]               = "ATTR",
  45        [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
  46        [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
  47        [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
  48        [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
  49        [PERF_RECORD_ID_INDEX]                  = "ID_INDEX",
  50        [PERF_RECORD_AUXTRACE_INFO]             = "AUXTRACE_INFO",
  51        [PERF_RECORD_AUXTRACE]                  = "AUXTRACE",
  52        [PERF_RECORD_AUXTRACE_ERROR]            = "AUXTRACE_ERROR",
  53        [PERF_RECORD_THREAD_MAP]                = "THREAD_MAP",
  54        [PERF_RECORD_CPU_MAP]                   = "CPU_MAP",
  55        [PERF_RECORD_STAT_CONFIG]               = "STAT_CONFIG",
  56        [PERF_RECORD_STAT]                      = "STAT",
  57        [PERF_RECORD_STAT_ROUND]                = "STAT_ROUND",
  58        [PERF_RECORD_EVENT_UPDATE]              = "EVENT_UPDATE",
  59        [PERF_RECORD_TIME_CONV]                 = "TIME_CONV",
  60};
  61
  62static const char *perf_ns__names[] = {
  63        [NET_NS_INDEX]          = "net",
  64        [UTS_NS_INDEX]          = "uts",
  65        [IPC_NS_INDEX]          = "ipc",
  66        [PID_NS_INDEX]          = "pid",
  67        [USER_NS_INDEX]         = "user",
  68        [MNT_NS_INDEX]          = "mnt",
  69        [CGROUP_NS_INDEX]       = "cgroup",
  70};
  71
  72const char *perf_event__name(unsigned int id)
  73{
  74        if (id >= ARRAY_SIZE(perf_event__names))
  75                return "INVALID";
  76        if (!perf_event__names[id])
  77                return "UNKNOWN";
  78        return perf_event__names[id];
  79}
  80
  81static const char *perf_ns__name(unsigned int id)
  82{
  83        if (id >= ARRAY_SIZE(perf_ns__names))
  84                return "UNKNOWN";
  85        return perf_ns__names[id];
  86}
  87
  88static int perf_tool__process_synth_event(struct perf_tool *tool,
  89                                          union perf_event *event,
  90                                          struct machine *machine,
  91                                          perf_event__handler_t process)
  92{
  93        struct perf_sample synth_sample = {
  94        .pid       = -1,
  95        .tid       = -1,
  96        .time      = -1,
  97        .stream_id = -1,
  98        .cpu       = -1,
  99        .period    = 1,
 100        .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
 101        };
 102
 103        return process(tool, event, &synth_sample, machine);
 104};
 105
 106/*
 107 * Assumes that the first 4095 bytes of /proc/pid/stat contains
 108 * the comm, tgid and ppid.
 109 */
 110static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
 111                                    pid_t *tgid, pid_t *ppid)
 112{
 113        char filename[PATH_MAX];
 114        char bf[4096];
 115        int fd;
 116        size_t size = 0;
 117        ssize_t n;
 118        char *name, *tgids, *ppids;
 119
 120        *tgid = -1;
 121        *ppid = -1;
 122
 123        snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
 124
 125        fd = open(filename, O_RDONLY);
 126        if (fd < 0) {
 127                pr_debug("couldn't open %s\n", filename);
 128                return -1;
 129        }
 130
 131        n = read(fd, bf, sizeof(bf) - 1);
 132        close(fd);
 133        if (n <= 0) {
 134                pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
 135                           pid);
 136                return -1;
 137        }
 138        bf[n] = '\0';
 139
 140        name = strstr(bf, "Name:");
 141        tgids = strstr(bf, "Tgid:");
 142        ppids = strstr(bf, "PPid:");
 143
 144        if (name) {
 145                char *nl;
 146
 147                name += 5;  /* strlen("Name:") */
 148                name = ltrim(name);
 149
 150                nl = strchr(name, '\n');
 151                if (nl)
 152                        *nl = '\0';
 153
 154                size = strlen(name);
 155                if (size >= len)
 156                        size = len - 1;
 157                memcpy(comm, name, size);
 158                comm[size] = '\0';
 159        } else {
 160                pr_debug("Name: string not found for pid %d\n", pid);
 161        }
 162
 163        if (tgids) {
 164                tgids += 5;  /* strlen("Tgid:") */
 165                *tgid = atoi(tgids);
 166        } else {
 167                pr_debug("Tgid: string not found for pid %d\n", pid);
 168        }
 169
 170        if (ppids) {
 171                ppids += 5;  /* strlen("PPid:") */
 172                *ppid = atoi(ppids);
 173        } else {
 174                pr_debug("PPid: string not found for pid %d\n", pid);
 175        }
 176
 177        return 0;
 178}
 179
 180static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
 181                                    struct machine *machine,
 182                                    pid_t *tgid, pid_t *ppid)
 183{
 184        size_t size;
 185
 186        *ppid = -1;
 187
 188        memset(&event->comm, 0, sizeof(event->comm));
 189
 190        if (machine__is_host(machine)) {
 191                if (perf_event__get_comm_ids(pid, event->comm.comm,
 192                                             sizeof(event->comm.comm),
 193                                             tgid, ppid) != 0) {
 194                        return -1;
 195                }
 196        } else {
 197                *tgid = machine->pid;
 198        }
 199
 200        if (*tgid < 0)
 201                return -1;
 202
 203        event->comm.pid = *tgid;
 204        event->comm.header.type = PERF_RECORD_COMM;
 205
 206        size = strlen(event->comm.comm) + 1;
 207        size = PERF_ALIGN(size, sizeof(u64));
 208        memset(event->comm.comm + size, 0, machine->id_hdr_size);
 209        event->comm.header.size = (sizeof(event->comm) -
 210                                (sizeof(event->comm.comm) - size) +
 211                                machine->id_hdr_size);
 212        event->comm.tid = pid;
 213
 214        return 0;
 215}
 216
 217pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 218                                         union perf_event *event, pid_t pid,
 219                                         perf_event__handler_t process,
 220                                         struct machine *machine)
 221{
 222        pid_t tgid, ppid;
 223
 224        if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
 225                return -1;
 226
 227        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 228                return -1;
 229
 230        return tgid;
 231}
 232
 233static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
 234                                         struct perf_ns_link_info *ns_link_info)
 235{
 236        struct stat64 st;
 237        char proc_ns[128];
 238
 239        sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
 240        if (stat64(proc_ns, &st) == 0) {
 241                ns_link_info->dev = st.st_dev;
 242                ns_link_info->ino = st.st_ino;
 243        }
 244}
 245
 246int perf_event__synthesize_namespaces(struct perf_tool *tool,
 247                                      union perf_event *event,
 248                                      pid_t pid, pid_t tgid,
 249                                      perf_event__handler_t process,
 250                                      struct machine *machine)
 251{
 252        u32 idx;
 253        struct perf_ns_link_info *ns_link_info;
 254
 255        if (!tool || !tool->namespace_events)
 256                return 0;
 257
 258        memset(&event->namespaces, 0, (sizeof(event->namespaces) +
 259               (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 260               machine->id_hdr_size));
 261
 262        event->namespaces.pid = tgid;
 263        event->namespaces.tid = pid;
 264
 265        event->namespaces.nr_namespaces = NR_NAMESPACES;
 266
 267        ns_link_info = event->namespaces.link_info;
 268
 269        for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
 270                perf_event__get_ns_link_info(pid, perf_ns__name(idx),
 271                                             &ns_link_info[idx]);
 272
 273        event->namespaces.header.type = PERF_RECORD_NAMESPACES;
 274
 275        event->namespaces.header.size = (sizeof(event->namespaces) +
 276                        (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 277                        machine->id_hdr_size);
 278
 279        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 280                return -1;
 281
 282        return 0;
 283}
 284
 285static int perf_event__synthesize_fork(struct perf_tool *tool,
 286                                       union perf_event *event,
 287                                       pid_t pid, pid_t tgid, pid_t ppid,
 288                                       perf_event__handler_t process,
 289                                       struct machine *machine)
 290{
 291        memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
 292
 293        /*
 294         * for main thread set parent to ppid from status file. For other
 295         * threads set parent pid to main thread. ie., assume main thread
 296         * spawns all threads in a process
 297        */
 298        if (tgid == pid) {
 299                event->fork.ppid = ppid;
 300                event->fork.ptid = ppid;
 301        } else {
 302                event->fork.ppid = tgid;
 303                event->fork.ptid = tgid;
 304        }
 305        event->fork.pid  = tgid;
 306        event->fork.tid  = pid;
 307        event->fork.header.type = PERF_RECORD_FORK;
 308
 309        event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 310
 311        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 312                return -1;
 313
 314        return 0;
 315}
 316
 317int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 318                                       union perf_event *event,
 319                                       pid_t pid, pid_t tgid,
 320                                       perf_event__handler_t process,
 321                                       struct machine *machine,
 322                                       bool mmap_data,
 323                                       unsigned int proc_map_timeout)
 324{
 325        char filename[PATH_MAX];
 326        FILE *fp;
 327        unsigned long long t;
 328        bool truncation = false;
 329        unsigned long long timeout = proc_map_timeout * 1000000ULL;
 330        int rc = 0;
 331        const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
 332        int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
 333
 334        if (machine__is_default_guest(machine))
 335                return 0;
 336
 337        snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
 338                 machine->root_dir, pid, pid);
 339
 340        fp = fopen(filename, "r");
 341        if (fp == NULL) {
 342                /*
 343                 * We raced with a task exiting - just return:
 344                 */
 345                pr_debug("couldn't open %s\n", filename);
 346                return -1;
 347        }
 348
 349        event->header.type = PERF_RECORD_MMAP2;
 350        t = rdclock();
 351
 352        while (1) {
 353                char bf[BUFSIZ];
 354                char prot[5];
 355                char execname[PATH_MAX];
 356                char anonstr[] = "//anon";
 357                unsigned int ino;
 358                size_t size;
 359                ssize_t n;
 360
 361                if (fgets(bf, sizeof(bf), fp) == NULL)
 362                        break;
 363
 364                if ((rdclock() - t) > timeout) {
 365                        pr_warning("Reading %s time out. "
 366                                   "You may want to increase "
 367                                   "the time limit by --proc-map-timeout\n",
 368                                   filename);
 369                        truncation = true;
 370                        goto out;
 371                }
 372
 373                /* ensure null termination since stack will be reused. */
 374                strcpy(execname, "");
 375
 376                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
 377                n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
 378                       &event->mmap2.start, &event->mmap2.len, prot,
 379                       &event->mmap2.pgoff, &event->mmap2.maj,
 380                       &event->mmap2.min,
 381                       &ino, execname);
 382
 383                /*
 384                 * Anon maps don't have the execname.
 385                 */
 386                if (n < 7)
 387                        continue;
 388
 389                event->mmap2.ino = (u64)ino;
 390
 391                /*
 392                 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
 393                 */
 394                if (machine__is_host(machine))
 395                        event->header.misc = PERF_RECORD_MISC_USER;
 396                else
 397                        event->header.misc = PERF_RECORD_MISC_GUEST_USER;
 398
 399                /* map protection and flags bits */
 400                event->mmap2.prot = 0;
 401                event->mmap2.flags = 0;
 402                if (prot[0] == 'r')
 403                        event->mmap2.prot |= PROT_READ;
 404                if (prot[1] == 'w')
 405                        event->mmap2.prot |= PROT_WRITE;
 406                if (prot[2] == 'x')
 407                        event->mmap2.prot |= PROT_EXEC;
 408
 409                if (prot[3] == 's')
 410                        event->mmap2.flags |= MAP_SHARED;
 411                else
 412                        event->mmap2.flags |= MAP_PRIVATE;
 413
 414                if (prot[2] != 'x') {
 415                        if (!mmap_data || prot[0] != 'r')
 416                                continue;
 417
 418                        event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
 419                }
 420
 421out:
 422                if (truncation)
 423                        event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
 424
 425                if (!strcmp(execname, ""))
 426                        strcpy(execname, anonstr);
 427
 428                if (hugetlbfs_mnt_len &&
 429                    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
 430                        strcpy(execname, anonstr);
 431                        event->mmap2.flags |= MAP_HUGETLB;
 432                }
 433
 434                size = strlen(execname) + 1;
 435                memcpy(event->mmap2.filename, execname, size);
 436                size = PERF_ALIGN(size, sizeof(u64));
 437                event->mmap2.len -= event->mmap.start;
 438                event->mmap2.header.size = (sizeof(event->mmap2) -
 439                                        (sizeof(event->mmap2.filename) - size));
 440                memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
 441                event->mmap2.header.size += machine->id_hdr_size;
 442                event->mmap2.pid = tgid;
 443                event->mmap2.tid = pid;
 444
 445                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 446                        rc = -1;
 447                        break;
 448                }
 449
 450                if (truncation)
 451                        break;
 452        }
 453
 454        fclose(fp);
 455        return rc;
 456}
 457
 458int perf_event__synthesize_modules(struct perf_tool *tool,
 459                                   perf_event__handler_t process,
 460                                   struct machine *machine)
 461{
 462        int rc = 0;
 463        struct map *pos;
 464        struct map_groups *kmaps = &machine->kmaps;
 465        struct maps *maps = &kmaps->maps[MAP__FUNCTION];
 466        union perf_event *event = zalloc((sizeof(event->mmap) +
 467                                          machine->id_hdr_size));
 468        if (event == NULL) {
 469                pr_debug("Not enough memory synthesizing mmap event "
 470                         "for kernel modules\n");
 471                return -1;
 472        }
 473
 474        event->header.type = PERF_RECORD_MMAP;
 475
 476        /*
 477         * kernel uses 0 for user space maps, see kernel/perf_event.c
 478         * __perf_event_mmap
 479         */
 480        if (machine__is_host(machine))
 481                event->header.misc = PERF_RECORD_MISC_KERNEL;
 482        else
 483                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 484
 485        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
 486                size_t size;
 487
 488                if (__map__is_kernel(pos))
 489                        continue;
 490
 491                size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
 492                event->mmap.header.type = PERF_RECORD_MMAP;
 493                event->mmap.header.size = (sizeof(event->mmap) -
 494                                        (sizeof(event->mmap.filename) - size));
 495                memset(event->mmap.filename + size, 0, machine->id_hdr_size);
 496                event->mmap.header.size += machine->id_hdr_size;
 497                event->mmap.start = pos->start;
 498                event->mmap.len   = pos->end - pos->start;
 499                event->mmap.pid   = machine->pid;
 500
 501                memcpy(event->mmap.filename, pos->dso->long_name,
 502                       pos->dso->long_name_len + 1);
 503                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 504                        rc = -1;
 505                        break;
 506                }
 507        }
 508
 509        free(event);
 510        return rc;
 511}
 512
 513static int __event__synthesize_thread(union perf_event *comm_event,
 514                                      union perf_event *mmap_event,
 515                                      union perf_event *fork_event,
 516                                      union perf_event *namespaces_event,
 517                                      pid_t pid, int full,
 518                                      perf_event__handler_t process,
 519                                      struct perf_tool *tool,
 520                                      struct machine *machine,
 521                                      bool mmap_data,
 522                                      unsigned int proc_map_timeout)
 523{
 524        char filename[PATH_MAX];
 525        DIR *tasks;
 526        struct dirent *dirent;
 527        pid_t tgid, ppid;
 528        int rc = 0;
 529
 530        /* special case: only send one comm event using passed in pid */
 531        if (!full) {
 532                tgid = perf_event__synthesize_comm(tool, comm_event, pid,
 533                                                   process, machine);
 534
 535                if (tgid == -1)
 536                        return -1;
 537
 538                if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
 539                                                      tgid, process, machine) < 0)
 540                        return -1;
 541
 542
 543                return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 544                                                          process, machine, mmap_data,
 545                                                          proc_map_timeout);
 546        }
 547
 548        if (machine__is_default_guest(machine))
 549                return 0;
 550
 551        snprintf(filename, sizeof(filename), "%s/proc/%d/task",
 552                 machine->root_dir, pid);
 553
 554        tasks = opendir(filename);
 555        if (tasks == NULL) {
 556                pr_debug("couldn't open %s\n", filename);
 557                return 0;
 558        }
 559
 560        while ((dirent = readdir(tasks)) != NULL) {
 561                char *end;
 562                pid_t _pid;
 563
 564                _pid = strtol(dirent->d_name, &end, 10);
 565                if (*end)
 566                        continue;
 567
 568                rc = -1;
 569                if (perf_event__prepare_comm(comm_event, _pid, machine,
 570                                             &tgid, &ppid) != 0)
 571                        break;
 572
 573                if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
 574                                                ppid, process, machine) < 0)
 575                        break;
 576
 577                if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
 578                                                      tgid, process, machine) < 0)
 579                        break;
 580
 581                /*
 582                 * Send the prepared comm event
 583                 */
 584                if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
 585                        break;
 586
 587                rc = 0;
 588                if (_pid == pid) {
 589                        /* process the parent's maps too */
 590                        rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 591                                                process, machine, mmap_data, proc_map_timeout);
 592                        if (rc)
 593                                break;
 594                }
 595        }
 596
 597        closedir(tasks);
 598        return rc;
 599}
 600
 601int perf_event__synthesize_thread_map(struct perf_tool *tool,
 602                                      struct thread_map *threads,
 603                                      perf_event__handler_t process,
 604                                      struct machine *machine,
 605                                      bool mmap_data,
 606                                      unsigned int proc_map_timeout)
 607{
 608        union perf_event *comm_event, *mmap_event, *fork_event;
 609        union perf_event *namespaces_event;
 610        int err = -1, thread, j;
 611
 612        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 613        if (comm_event == NULL)
 614                goto out;
 615
 616        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 617        if (mmap_event == NULL)
 618                goto out_free_comm;
 619
 620        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 621        if (fork_event == NULL)
 622                goto out_free_mmap;
 623
 624        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 625                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 626                                  machine->id_hdr_size);
 627        if (namespaces_event == NULL)
 628                goto out_free_fork;
 629
 630        err = 0;
 631        for (thread = 0; thread < threads->nr; ++thread) {
 632                if (__event__synthesize_thread(comm_event, mmap_event,
 633                                               fork_event, namespaces_event,
 634                                               thread_map__pid(threads, thread), 0,
 635                                               process, tool, machine,
 636                                               mmap_data, proc_map_timeout)) {
 637                        err = -1;
 638                        break;
 639                }
 640
 641                /*
 642                 * comm.pid is set to thread group id by
 643                 * perf_event__synthesize_comm
 644                 */
 645                if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
 646                        bool need_leader = true;
 647
 648                        /* is thread group leader in thread_map? */
 649                        for (j = 0; j < threads->nr; ++j) {
 650                                if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
 651                                        need_leader = false;
 652                                        break;
 653                                }
 654                        }
 655
 656                        /* if not, generate events for it */
 657                        if (need_leader &&
 658                            __event__synthesize_thread(comm_event, mmap_event,
 659                                                       fork_event, namespaces_event,
 660                                                       comm_event->comm.pid, 0,
 661                                                       process, tool, machine,
 662                                                       mmap_data, proc_map_timeout)) {
 663                                err = -1;
 664                                break;
 665                        }
 666                }
 667        }
 668        free(namespaces_event);
 669out_free_fork:
 670        free(fork_event);
 671out_free_mmap:
 672        free(mmap_event);
 673out_free_comm:
 674        free(comm_event);
 675out:
 676        return err;
 677}
 678
 679int perf_event__synthesize_threads(struct perf_tool *tool,
 680                                   perf_event__handler_t process,
 681                                   struct machine *machine,
 682                                   bool mmap_data,
 683                                   unsigned int proc_map_timeout)
 684{
 685        DIR *proc;
 686        char proc_path[PATH_MAX];
 687        struct dirent *dirent;
 688        union perf_event *comm_event, *mmap_event, *fork_event;
 689        union perf_event *namespaces_event;
 690        int err = -1;
 691
 692        if (machine__is_default_guest(machine))
 693                return 0;
 694
 695        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 696        if (comm_event == NULL)
 697                goto out;
 698
 699        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 700        if (mmap_event == NULL)
 701                goto out_free_comm;
 702
 703        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 704        if (fork_event == NULL)
 705                goto out_free_mmap;
 706
 707        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 708                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 709                                  machine->id_hdr_size);
 710        if (namespaces_event == NULL)
 711                goto out_free_fork;
 712
 713        snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
 714        proc = opendir(proc_path);
 715
 716        if (proc == NULL)
 717                goto out_free_namespaces;
 718
 719        while ((dirent = readdir(proc)) != NULL) {
 720                char *end;
 721                pid_t pid = strtol(dirent->d_name, &end, 10);
 722
 723                if (*end) /* only interested in proper numerical dirents */
 724                        continue;
 725                /*
 726                 * We may race with exiting thread, so don't stop just because
 727                 * one thread couldn't be synthesized.
 728                 */
 729                __event__synthesize_thread(comm_event, mmap_event, fork_event,
 730                                           namespaces_event, pid, 1, process,
 731                                           tool, machine, mmap_data,
 732                                           proc_map_timeout);
 733        }
 734
 735        err = 0;
 736        closedir(proc);
 737out_free_namespaces:
 738        free(namespaces_event);
 739out_free_fork:
 740        free(fork_event);
 741out_free_mmap:
 742        free(mmap_event);
 743out_free_comm:
 744        free(comm_event);
 745out:
 746        return err;
 747}
 748
 749struct process_symbol_args {
 750        const char *name;
 751        u64        start;
 752};
 753
 754static int find_symbol_cb(void *arg, const char *name, char type,
 755                          u64 start)
 756{
 757        struct process_symbol_args *args = arg;
 758
 759        /*
 760         * Must be a function or at least an alias, as in PARISC64, where "_text" is
 761         * an 'A' to the same address as "_stext".
 762         */
 763        if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
 764              type == 'A') || strcmp(name, args->name))
 765                return 0;
 766
 767        args->start = start;
 768        return 1;
 769}
 770
 771int kallsyms__get_function_start(const char *kallsyms_filename,
 772                                 const char *symbol_name, u64 *addr)
 773{
 774        struct process_symbol_args args = { .name = symbol_name, };
 775
 776        if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
 777                return -1;
 778
 779        *addr = args.start;
 780        return 0;
 781}
 782
 783int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 784                                       perf_event__handler_t process,
 785                                       struct machine *machine)
 786{
 787        size_t size;
 788        const char *mmap_name;
 789        char name_buff[PATH_MAX];
 790        struct map *map = machine__kernel_map(machine);
 791        struct kmap *kmap;
 792        int err;
 793        union perf_event *event;
 794
 795        if (symbol_conf.kptr_restrict)
 796                return -1;
 797        if (map == NULL)
 798                return -1;
 799
 800        /*
 801         * We should get this from /sys/kernel/sections/.text, but till that is
 802         * available use this, and after it is use this as a fallback for older
 803         * kernels.
 804         */
 805        event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
 806        if (event == NULL) {
 807                pr_debug("Not enough memory synthesizing mmap event "
 808                         "for kernel modules\n");
 809                return -1;
 810        }
 811
 812        mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
 813        if (machine__is_host(machine)) {
 814                /*
 815                 * kernel uses PERF_RECORD_MISC_USER for user space maps,
 816                 * see kernel/perf_event.c __perf_event_mmap
 817                 */
 818                event->header.misc = PERF_RECORD_MISC_KERNEL;
 819        } else {
 820                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 821        }
 822
 823        kmap = map__kmap(map);
 824        size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
 825                        "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
 826        size = PERF_ALIGN(size, sizeof(u64));
 827        event->mmap.header.type = PERF_RECORD_MMAP;
 828        event->mmap.header.size = (sizeof(event->mmap) -
 829                        (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
 830        event->mmap.pgoff = kmap->ref_reloc_sym->addr;
 831        event->mmap.start = map->start;
 832        event->mmap.len   = map->end - event->mmap.start;
 833        event->mmap.pid   = machine->pid;
 834
 835        err = perf_tool__process_synth_event(tool, event, machine, process);
 836        free(event);
 837
 838        return err;
 839}
 840
 841int perf_event__synthesize_thread_map2(struct perf_tool *tool,
 842                                      struct thread_map *threads,
 843                                      perf_event__handler_t process,
 844                                      struct machine *machine)
 845{
 846        union perf_event *event;
 847        int i, err, size;
 848
 849        size  = sizeof(event->thread_map);
 850        size += threads->nr * sizeof(event->thread_map.entries[0]);
 851
 852        event = zalloc(size);
 853        if (!event)
 854                return -ENOMEM;
 855
 856        event->header.type = PERF_RECORD_THREAD_MAP;
 857        event->header.size = size;
 858        event->thread_map.nr = threads->nr;
 859
 860        for (i = 0; i < threads->nr; i++) {
 861                struct thread_map_event_entry *entry = &event->thread_map.entries[i];
 862                char *comm = thread_map__comm(threads, i);
 863
 864                if (!comm)
 865                        comm = (char *) "";
 866
 867                entry->pid = thread_map__pid(threads, i);
 868                strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
 869        }
 870
 871        err = process(tool, event, NULL, machine);
 872
 873        free(event);
 874        return err;
 875}
 876
 877static void synthesize_cpus(struct cpu_map_entries *cpus,
 878                            struct cpu_map *map)
 879{
 880        int i;
 881
 882        cpus->nr = map->nr;
 883
 884        for (i = 0; i < map->nr; i++)
 885                cpus->cpu[i] = map->map[i];
 886}
 887
 888static void synthesize_mask(struct cpu_map_mask *mask,
 889                            struct cpu_map *map, int max)
 890{
 891        int i;
 892
 893        mask->nr = BITS_TO_LONGS(max);
 894        mask->long_size = sizeof(long);
 895
 896        for (i = 0; i < map->nr; i++)
 897                set_bit(map->map[i], mask->mask);
 898}
 899
 900static size_t cpus_size(struct cpu_map *map)
 901{
 902        return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
 903}
 904
 905static size_t mask_size(struct cpu_map *map, int *max)
 906{
 907        int i;
 908
 909        *max = 0;
 910
 911        for (i = 0; i < map->nr; i++) {
 912                /* bit possition of the cpu is + 1 */
 913                int bit = map->map[i] + 1;
 914
 915                if (bit > *max)
 916                        *max = bit;
 917        }
 918
 919        return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
 920}
 921
 922void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
 923{
 924        size_t size_cpus, size_mask;
 925        bool is_dummy = cpu_map__empty(map);
 926
 927        /*
 928         * Both array and mask data have variable size based
 929         * on the number of cpus and their actual values.
 930         * The size of the 'struct cpu_map_data' is:
 931         *
 932         *   array = size of 'struct cpu_map_entries' +
 933         *           number of cpus * sizeof(u64)
 934         *
 935         *   mask  = size of 'struct cpu_map_mask' +
 936         *           maximum cpu bit converted to size of longs
 937         *
 938         * and finaly + the size of 'struct cpu_map_data'.
 939         */
 940        size_cpus = cpus_size(map);
 941        size_mask = mask_size(map, max);
 942
 943        if (is_dummy || (size_cpus < size_mask)) {
 944                *size += size_cpus;
 945                *type  = PERF_CPU_MAP__CPUS;
 946        } else {
 947                *size += size_mask;
 948                *type  = PERF_CPU_MAP__MASK;
 949        }
 950
 951        *size += sizeof(struct cpu_map_data);
 952        return zalloc(*size);
 953}
 954
 955void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
 956                              u16 type, int max)
 957{
 958        data->type = type;
 959
 960        switch (type) {
 961        case PERF_CPU_MAP__CPUS:
 962                synthesize_cpus((struct cpu_map_entries *) data->data, map);
 963                break;
 964        case PERF_CPU_MAP__MASK:
 965                synthesize_mask((struct cpu_map_mask *) data->data, map, max);
 966        default:
 967                break;
 968        };
 969}
 970
 971static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
 972{
 973        size_t size = sizeof(struct cpu_map_event);
 974        struct cpu_map_event *event;
 975        int max;
 976        u16 type;
 977
 978        event = cpu_map_data__alloc(map, &size, &type, &max);
 979        if (!event)
 980                return NULL;
 981
 982        event->header.type = PERF_RECORD_CPU_MAP;
 983        event->header.size = size;
 984        event->data.type   = type;
 985
 986        cpu_map_data__synthesize(&event->data, map, type, max);
 987        return event;
 988}
 989
 990int perf_event__synthesize_cpu_map(struct perf_tool *tool,
 991                                   struct cpu_map *map,
 992                                   perf_event__handler_t process,
 993                                   struct machine *machine)
 994{
 995        struct cpu_map_event *event;
 996        int err;
 997
 998        event = cpu_map_event__new(map);
 999        if (!event)
1000                return -ENOMEM;
1001
1002        err = process(tool, (union perf_event *) event, NULL, machine);
1003
1004        free(event);
1005        return err;
1006}
1007
1008int perf_event__synthesize_stat_config(struct perf_tool *tool,
1009                                       struct perf_stat_config *config,
1010                                       perf_event__handler_t process,
1011                                       struct machine *machine)
1012{
1013        struct stat_config_event *event;
1014        int size, i = 0, err;
1015
1016        size  = sizeof(*event);
1017        size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1018
1019        event = zalloc(size);
1020        if (!event)
1021                return -ENOMEM;
1022
1023        event->header.type = PERF_RECORD_STAT_CONFIG;
1024        event->header.size = size;
1025        event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1026
1027#define ADD(__term, __val)                                      \
1028        event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1029        event->data[i].val = __val;                             \
1030        i++;
1031
1032        ADD(AGGR_MODE,  config->aggr_mode)
1033        ADD(INTERVAL,   config->interval)
1034        ADD(SCALE,      config->scale)
1035
1036        WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1037                  "stat config terms unbalanced\n");
1038#undef ADD
1039
1040        err = process(tool, (union perf_event *) event, NULL, machine);
1041
1042        free(event);
1043        return err;
1044}
1045
1046int perf_event__synthesize_stat(struct perf_tool *tool,
1047                                u32 cpu, u32 thread, u64 id,
1048                                struct perf_counts_values *count,
1049                                perf_event__handler_t process,
1050                                struct machine *machine)
1051{
1052        struct stat_event event;
1053
1054        event.header.type = PERF_RECORD_STAT;
1055        event.header.size = sizeof(event);
1056        event.header.misc = 0;
1057
1058        event.id        = id;
1059        event.cpu       = cpu;
1060        event.thread    = thread;
1061        event.val       = count->val;
1062        event.ena       = count->ena;
1063        event.run       = count->run;
1064
1065        return process(tool, (union perf_event *) &event, NULL, machine);
1066}
1067
1068int perf_event__synthesize_stat_round(struct perf_tool *tool,
1069                                      u64 evtime, u64 type,
1070                                      perf_event__handler_t process,
1071                                      struct machine *machine)
1072{
1073        struct stat_round_event event;
1074
1075        event.header.type = PERF_RECORD_STAT_ROUND;
1076        event.header.size = sizeof(event);
1077        event.header.misc = 0;
1078
1079        event.time = evtime;
1080        event.type = type;
1081
1082        return process(tool, (union perf_event *) &event, NULL, machine);
1083}
1084
1085void perf_event__read_stat_config(struct perf_stat_config *config,
1086                                  struct stat_config_event *event)
1087{
1088        unsigned i;
1089
1090        for (i = 0; i < event->nr; i++) {
1091
1092                switch (event->data[i].tag) {
1093#define CASE(__term, __val)                                     \
1094                case PERF_STAT_CONFIG_TERM__##__term:           \
1095                        config->__val = event->data[i].val;     \
1096                        break;
1097
1098                CASE(AGGR_MODE, aggr_mode)
1099                CASE(SCALE,     scale)
1100                CASE(INTERVAL,  interval)
1101#undef CASE
1102                default:
1103                        pr_warning("unknown stat config term %" PRIu64 "\n",
1104                                   event->data[i].tag);
1105                }
1106        }
1107}
1108
1109size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1110{
1111        const char *s;
1112
1113        if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1114                s = " exec";
1115        else
1116                s = "";
1117
1118        return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1119}
1120
1121size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1122{
1123        size_t ret = 0;
1124        struct perf_ns_link_info *ns_link_info;
1125        u32 nr_namespaces, idx;
1126
1127        ns_link_info = event->namespaces.link_info;
1128        nr_namespaces = event->namespaces.nr_namespaces;
1129
1130        ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1131                       event->namespaces.pid,
1132                       event->namespaces.tid,
1133                       nr_namespaces);
1134
1135        for (idx = 0; idx < nr_namespaces; idx++) {
1136                if (idx && (idx % 4 == 0))
1137                        ret += fprintf(fp, "\n\t\t ");
1138
1139                ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1140                                perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1141                                (u64)ns_link_info[idx].ino,
1142                                ((idx + 1) != nr_namespaces) ? ", " : "]\n");
1143        }
1144
1145        return ret;
1146}
1147
1148int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1149                             union perf_event *event,
1150                             struct perf_sample *sample,
1151                             struct machine *machine)
1152{
1153        return machine__process_comm_event(machine, event, sample);
1154}
1155
1156int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1157                                   union perf_event *event,
1158                                   struct perf_sample *sample,
1159                                   struct machine *machine)
1160{
1161        return machine__process_namespaces_event(machine, event, sample);
1162}
1163
1164int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1165                             union perf_event *event,
1166                             struct perf_sample *sample,
1167                             struct machine *machine)
1168{
1169        return machine__process_lost_event(machine, event, sample);
1170}
1171
1172int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1173                            union perf_event *event,
1174                            struct perf_sample *sample __maybe_unused,
1175                            struct machine *machine)
1176{
1177        return machine__process_aux_event(machine, event);
1178}
1179
1180int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1181                                     union perf_event *event,
1182                                     struct perf_sample *sample __maybe_unused,
1183                                     struct machine *machine)
1184{
1185        return machine__process_itrace_start_event(machine, event);
1186}
1187
1188int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1189                                     union perf_event *event,
1190                                     struct perf_sample *sample,
1191                                     struct machine *machine)
1192{
1193        return machine__process_lost_samples_event(machine, event, sample);
1194}
1195
1196int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1197                               union perf_event *event,
1198                               struct perf_sample *sample __maybe_unused,
1199                               struct machine *machine)
1200{
1201        return machine__process_switch_event(machine, event);
1202}
1203
1204size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1205{
1206        return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1207                       event->mmap.pid, event->mmap.tid, event->mmap.start,
1208                       event->mmap.len, event->mmap.pgoff,
1209                       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1210                       event->mmap.filename);
1211}
1212
1213size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1214{
1215        return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1216                           " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1217                       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1218                       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1219                       event->mmap2.min, event->mmap2.ino,
1220                       event->mmap2.ino_generation,
1221                       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1222                       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1223                       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1224                       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1225                       event->mmap2.filename);
1226}
1227
1228size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1229{
1230        struct thread_map *threads = thread_map__new_event(&event->thread_map);
1231        size_t ret;
1232
1233        ret = fprintf(fp, " nr: ");
1234
1235        if (threads)
1236                ret += thread_map__fprintf(threads, fp);
1237        else
1238                ret += fprintf(fp, "failed to get threads from event\n");
1239
1240        thread_map__put(threads);
1241        return ret;
1242}
1243
1244size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1245{
1246        struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1247        size_t ret;
1248
1249        ret = fprintf(fp, ": ");
1250
1251        if (cpus)
1252                ret += cpu_map__fprintf(cpus, fp);
1253        else
1254                ret += fprintf(fp, "failed to get cpumap from event\n");
1255
1256        cpu_map__put(cpus);
1257        return ret;
1258}
1259
1260int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1261                             union perf_event *event,
1262                             struct perf_sample *sample,
1263                             struct machine *machine)
1264{
1265        return machine__process_mmap_event(machine, event, sample);
1266}
1267
1268int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1269                             union perf_event *event,
1270                             struct perf_sample *sample,
1271                             struct machine *machine)
1272{
1273        return machine__process_mmap2_event(machine, event, sample);
1274}
1275
1276size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1277{
1278        return fprintf(fp, "(%d:%d):(%d:%d)\n",
1279                       event->fork.pid, event->fork.tid,
1280                       event->fork.ppid, event->fork.ptid);
1281}
1282
1283int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1284                             union perf_event *event,
1285                             struct perf_sample *sample,
1286                             struct machine *machine)
1287{
1288        return machine__process_fork_event(machine, event, sample);
1289}
1290
1291int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1292                             union perf_event *event,
1293                             struct perf_sample *sample,
1294                             struct machine *machine)
1295{
1296        return machine__process_exit_event(machine, event, sample);
1297}
1298
1299size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1300{
1301        return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1302                       event->aux.aux_offset, event->aux.aux_size,
1303                       event->aux.flags,
1304                       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1305                       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1306                       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1307}
1308
1309size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1310{
1311        return fprintf(fp, " pid: %u tid: %u\n",
1312                       event->itrace_start.pid, event->itrace_start.tid);
1313}
1314
1315size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1316{
1317        bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1318        const char *in_out = out ? "OUT" : "IN ";
1319
1320        if (event->header.type == PERF_RECORD_SWITCH)
1321                return fprintf(fp, " %s\n", in_out);
1322
1323        return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1324                       in_out, out ? "next" : "prev",
1325                       event->context_switch.next_prev_pid,
1326                       event->context_switch.next_prev_tid);
1327}
1328
1329size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1330{
1331        size_t ret = fprintf(fp, "PERF_RECORD_%s",
1332                             perf_event__name(event->header.type));
1333
1334        switch (event->header.type) {
1335        case PERF_RECORD_COMM:
1336                ret += perf_event__fprintf_comm(event, fp);
1337                break;
1338        case PERF_RECORD_FORK:
1339        case PERF_RECORD_EXIT:
1340                ret += perf_event__fprintf_task(event, fp);
1341                break;
1342        case PERF_RECORD_MMAP:
1343                ret += perf_event__fprintf_mmap(event, fp);
1344                break;
1345        case PERF_RECORD_NAMESPACES:
1346                ret += perf_event__fprintf_namespaces(event, fp);
1347                break;
1348        case PERF_RECORD_MMAP2:
1349                ret += perf_event__fprintf_mmap2(event, fp);
1350                break;
1351        case PERF_RECORD_AUX:
1352                ret += perf_event__fprintf_aux(event, fp);
1353                break;
1354        case PERF_RECORD_ITRACE_START:
1355                ret += perf_event__fprintf_itrace_start(event, fp);
1356                break;
1357        case PERF_RECORD_SWITCH:
1358        case PERF_RECORD_SWITCH_CPU_WIDE:
1359                ret += perf_event__fprintf_switch(event, fp);
1360                break;
1361        default:
1362                ret += fprintf(fp, "\n");
1363        }
1364
1365        return ret;
1366}
1367
1368int perf_event__process(struct perf_tool *tool __maybe_unused,
1369                        union perf_event *event,
1370                        struct perf_sample *sample,
1371                        struct machine *machine)
1372{
1373        return machine__process_event(machine, event, sample);
1374}
1375
1376void thread__find_addr_map(struct thread *thread, u8 cpumode,
1377                           enum map_type type, u64 addr,
1378                           struct addr_location *al)
1379{
1380        struct map_groups *mg = thread->mg;
1381        struct machine *machine = mg->machine;
1382        bool load_map = false;
1383
1384        al->machine = machine;
1385        al->thread = thread;
1386        al->addr = addr;
1387        al->cpumode = cpumode;
1388        al->filtered = 0;
1389
1390        if (machine == NULL) {
1391                al->map = NULL;
1392                return;
1393        }
1394
1395        if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1396                al->level = 'k';
1397                mg = &machine->kmaps;
1398                load_map = true;
1399        } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1400                al->level = '.';
1401        } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1402                al->level = 'g';
1403                mg = &machine->kmaps;
1404                load_map = true;
1405        } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1406                al->level = 'u';
1407        } else {
1408                al->level = 'H';
1409                al->map = NULL;
1410
1411                if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1412                        cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1413                        !perf_guest)
1414                        al->filtered |= (1 << HIST_FILTER__GUEST);
1415                if ((cpumode == PERF_RECORD_MISC_USER ||
1416                        cpumode == PERF_RECORD_MISC_KERNEL) &&
1417                        !perf_host)
1418                        al->filtered |= (1 << HIST_FILTER__HOST);
1419
1420                return;
1421        }
1422try_again:
1423        al->map = map_groups__find(mg, type, al->addr);
1424        if (al->map == NULL) {
1425                /*
1426                 * If this is outside of all known maps, and is a negative
1427                 * address, try to look it up in the kernel dso, as it might be
1428                 * a vsyscall or vdso (which executes in user-mode).
1429                 *
1430                 * XXX This is nasty, we should have a symbol list in the
1431                 * "[vdso]" dso, but for now lets use the old trick of looking
1432                 * in the whole kernel symbol list.
1433                 */
1434                if (cpumode == PERF_RECORD_MISC_USER && machine &&
1435                    mg != &machine->kmaps &&
1436                    machine__kernel_ip(machine, al->addr)) {
1437                        mg = &machine->kmaps;
1438                        load_map = true;
1439                        goto try_again;
1440                }
1441        } else {
1442                /*
1443                 * Kernel maps might be changed when loading symbols so loading
1444                 * must be done prior to using kernel maps.
1445                 */
1446                if (load_map)
1447                        map__load(al->map);
1448                al->addr = al->map->map_ip(al->map, al->addr);
1449        }
1450}
1451
1452void thread__find_addr_location(struct thread *thread,
1453                                u8 cpumode, enum map_type type, u64 addr,
1454                                struct addr_location *al)
1455{
1456        thread__find_addr_map(thread, cpumode, type, addr, al);
1457        if (al->map != NULL)
1458                al->sym = map__find_symbol(al->map, al->addr);
1459        else
1460                al->sym = NULL;
1461}
1462
1463/*
1464 * Callers need to drop the reference to al->thread, obtained in
1465 * machine__findnew_thread()
1466 */
1467int machine__resolve(struct machine *machine, struct addr_location *al,
1468                     struct perf_sample *sample)
1469{
1470        struct thread *thread = machine__findnew_thread(machine, sample->pid,
1471                                                        sample->tid);
1472
1473        if (thread == NULL)
1474                return -1;
1475
1476        dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1477        /*
1478         * Have we already created the kernel maps for this machine?
1479         *
1480         * This should have happened earlier, when we processed the kernel MMAP
1481         * events, but for older perf.data files there was no such thing, so do
1482         * it now.
1483         */
1484        if (sample->cpumode == PERF_RECORD_MISC_KERNEL &&
1485            machine__kernel_map(machine) == NULL)
1486                machine__create_kernel_maps(machine);
1487
1488        thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al);
1489        dump_printf(" ...... dso: %s\n",
1490                    al->map ? al->map->dso->long_name :
1491                        al->level == 'H' ? "[hypervisor]" : "<not found>");
1492
1493        if (thread__is_filtered(thread))
1494                al->filtered |= (1 << HIST_FILTER__THREAD);
1495
1496        al->sym = NULL;
1497        al->cpu = sample->cpu;
1498        al->socket = -1;
1499
1500        if (al->cpu >= 0) {
1501                struct perf_env *env = machine->env;
1502
1503                if (env && env->cpu)
1504                        al->socket = env->cpu[al->cpu].socket_id;
1505        }
1506
1507        if (al->map) {
1508                struct dso *dso = al->map->dso;
1509
1510                if (symbol_conf.dso_list &&
1511                    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1512                                                  dso->short_name) ||
1513                               (dso->short_name != dso->long_name &&
1514                                strlist__has_entry(symbol_conf.dso_list,
1515                                                   dso->long_name))))) {
1516                        al->filtered |= (1 << HIST_FILTER__DSO);
1517                }
1518
1519                al->sym = map__find_symbol(al->map, al->addr);
1520        }
1521
1522        if (symbol_conf.sym_list &&
1523                (!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1524                                                al->sym->name))) {
1525                al->filtered |= (1 << HIST_FILTER__SYMBOL);
1526        }
1527
1528        return 0;
1529}
1530
1531/*
1532 * The preprocess_sample method will return with reference counts for the
1533 * in it, when done using (and perhaps getting ref counts if needing to
1534 * keep a pointer to one of those entries) it must be paired with
1535 * addr_location__put(), so that the refcounts can be decremented.
1536 */
1537void addr_location__put(struct addr_location *al)
1538{
1539        thread__zput(al->thread);
1540}
1541
1542bool is_bts_event(struct perf_event_attr *attr)
1543{
1544        return attr->type == PERF_TYPE_HARDWARE &&
1545               (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1546               attr->sample_period == 1;
1547}
1548
1549bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1550{
1551        if (attr->type == PERF_TYPE_SOFTWARE &&
1552            (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1553             attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1554             attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1555                return true;
1556
1557        if (is_bts_event(attr))
1558                return true;
1559
1560        return false;
1561}
1562
1563void thread__resolve(struct thread *thread, struct addr_location *al,
1564                     struct perf_sample *sample)
1565{
1566        thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al);
1567        if (!al->map)
1568                thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE,
1569                                      sample->addr, al);
1570
1571        al->cpu = sample->cpu;
1572        al->sym = NULL;
1573
1574        if (al->map)
1575                al->sym = map__find_symbol(al->map, al->addr);
1576}
1577