linux/tools/perf/util/synthetic-events.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only 
   2
   3#include "util/debug.h"
   4#include "util/dso.h"
   5#include "util/event.h"
   6#include "util/evlist.h"
   7#include "util/machine.h"
   8#include "util/map.h"
   9#include "util/map_symbol.h"
  10#include "util/branch.h"
  11#include "util/memswap.h"
  12#include "util/namespaces.h"
  13#include "util/session.h"
  14#include "util/stat.h"
  15#include "util/symbol.h"
  16#include "util/synthetic-events.h"
  17#include "util/target.h"
  18#include "util/time-utils.h"
  19#include "util/cgroup.h"
  20#include <linux/bitops.h>
  21#include <linux/kernel.h>
  22#include <linux/string.h>
  23#include <linux/zalloc.h>
  24#include <linux/perf_event.h>
  25#include <asm/bug.h>
  26#include <perf/evsel.h>
  27#include <internal/cpumap.h>
  28#include <perf/cpumap.h>
  29#include <internal/lib.h> // page_size
  30#include <internal/threadmap.h>
  31#include <perf/threadmap.h>
  32#include <symbol/kallsyms.h>
  33#include <dirent.h>
  34#include <errno.h>
  35#include <inttypes.h>
  36#include <stdio.h>
  37#include <string.h>
  38#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  39#include <api/fs/fs.h>
  40#include <sys/types.h>
  41#include <sys/stat.h>
  42#include <fcntl.h>
  43#include <unistd.h>
  44
  45#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
  46
  47unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
  48
  49int perf_tool__process_synth_event(struct perf_tool *tool,
  50                                   union perf_event *event,
  51                                   struct machine *machine,
  52                                   perf_event__handler_t process)
  53{
  54        struct perf_sample synth_sample = {
  55                .pid       = -1,
  56                .tid       = -1,
  57                .time      = -1,
  58                .stream_id = -1,
  59                .cpu       = -1,
  60                .period    = 1,
  61                .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
  62        };
  63
  64        return process(tool, event, &synth_sample, machine);
  65};
  66
  67/*
  68 * Assumes that the first 4095 bytes of /proc/pid/stat contains
  69 * the comm, tgid and ppid.
  70 */
  71static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
  72                                    pid_t *tgid, pid_t *ppid)
  73{
  74        char filename[PATH_MAX];
  75        char bf[4096];
  76        int fd;
  77        size_t size = 0;
  78        ssize_t n;
  79        char *name, *tgids, *ppids;
  80
  81        *tgid = -1;
  82        *ppid = -1;
  83
  84        snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
  85
  86        fd = open(filename, O_RDONLY);
  87        if (fd < 0) {
  88                pr_debug("couldn't open %s\n", filename);
  89                return -1;
  90        }
  91
  92        n = read(fd, bf, sizeof(bf) - 1);
  93        close(fd);
  94        if (n <= 0) {
  95                pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
  96                           pid);
  97                return -1;
  98        }
  99        bf[n] = '\0';
 100
 101        name = strstr(bf, "Name:");
 102        tgids = strstr(bf, "Tgid:");
 103        ppids = strstr(bf, "PPid:");
 104
 105        if (name) {
 106                char *nl;
 107
 108                name = skip_spaces(name + 5);  /* strlen("Name:") */
 109                nl = strchr(name, '\n');
 110                if (nl)
 111                        *nl = '\0';
 112
 113                size = strlen(name);
 114                if (size >= len)
 115                        size = len - 1;
 116                memcpy(comm, name, size);
 117                comm[size] = '\0';
 118        } else {
 119                pr_debug("Name: string not found for pid %d\n", pid);
 120        }
 121
 122        if (tgids) {
 123                tgids += 5;  /* strlen("Tgid:") */
 124                *tgid = atoi(tgids);
 125        } else {
 126                pr_debug("Tgid: string not found for pid %d\n", pid);
 127        }
 128
 129        if (ppids) {
 130                ppids += 5;  /* strlen("PPid:") */
 131                *ppid = atoi(ppids);
 132        } else {
 133                pr_debug("PPid: string not found for pid %d\n", pid);
 134        }
 135
 136        return 0;
 137}
 138
 139static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
 140                                    struct machine *machine,
 141                                    pid_t *tgid, pid_t *ppid)
 142{
 143        size_t size;
 144
 145        *ppid = -1;
 146
 147        memset(&event->comm, 0, sizeof(event->comm));
 148
 149        if (machine__is_host(machine)) {
 150                if (perf_event__get_comm_ids(pid, event->comm.comm,
 151                                             sizeof(event->comm.comm),
 152                                             tgid, ppid) != 0) {
 153                        return -1;
 154                }
 155        } else {
 156                *tgid = machine->pid;
 157        }
 158
 159        if (*tgid < 0)
 160                return -1;
 161
 162        event->comm.pid = *tgid;
 163        event->comm.header.type = PERF_RECORD_COMM;
 164
 165        size = strlen(event->comm.comm) + 1;
 166        size = PERF_ALIGN(size, sizeof(u64));
 167        memset(event->comm.comm + size, 0, machine->id_hdr_size);
 168        event->comm.header.size = (sizeof(event->comm) -
 169                                (sizeof(event->comm.comm) - size) +
 170                                machine->id_hdr_size);
 171        event->comm.tid = pid;
 172
 173        return 0;
 174}
 175
 176pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 177                                         union perf_event *event, pid_t pid,
 178                                         perf_event__handler_t process,
 179                                         struct machine *machine)
 180{
 181        pid_t tgid, ppid;
 182
 183        if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
 184                return -1;
 185
 186        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 187                return -1;
 188
 189        return tgid;
 190}
 191
 192static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
 193                                         struct perf_ns_link_info *ns_link_info)
 194{
 195        struct stat64 st;
 196        char proc_ns[128];
 197
 198        sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
 199        if (stat64(proc_ns, &st) == 0) {
 200                ns_link_info->dev = st.st_dev;
 201                ns_link_info->ino = st.st_ino;
 202        }
 203}
 204
 205int perf_event__synthesize_namespaces(struct perf_tool *tool,
 206                                      union perf_event *event,
 207                                      pid_t pid, pid_t tgid,
 208                                      perf_event__handler_t process,
 209                                      struct machine *machine)
 210{
 211        u32 idx;
 212        struct perf_ns_link_info *ns_link_info;
 213
 214        if (!tool || !tool->namespace_events)
 215                return 0;
 216
 217        memset(&event->namespaces, 0, (sizeof(event->namespaces) +
 218               (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 219               machine->id_hdr_size));
 220
 221        event->namespaces.pid = tgid;
 222        event->namespaces.tid = pid;
 223
 224        event->namespaces.nr_namespaces = NR_NAMESPACES;
 225
 226        ns_link_info = event->namespaces.link_info;
 227
 228        for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
 229                perf_event__get_ns_link_info(pid, perf_ns__name(idx),
 230                                             &ns_link_info[idx]);
 231
 232        event->namespaces.header.type = PERF_RECORD_NAMESPACES;
 233
 234        event->namespaces.header.size = (sizeof(event->namespaces) +
 235                        (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 236                        machine->id_hdr_size);
 237
 238        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 239                return -1;
 240
 241        return 0;
 242}
 243
 244static int perf_event__synthesize_fork(struct perf_tool *tool,
 245                                       union perf_event *event,
 246                                       pid_t pid, pid_t tgid, pid_t ppid,
 247                                       perf_event__handler_t process,
 248                                       struct machine *machine)
 249{
 250        memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
 251
 252        /*
 253         * for main thread set parent to ppid from status file. For other
 254         * threads set parent pid to main thread. ie., assume main thread
 255         * spawns all threads in a process
 256        */
 257        if (tgid == pid) {
 258                event->fork.ppid = ppid;
 259                event->fork.ptid = ppid;
 260        } else {
 261                event->fork.ppid = tgid;
 262                event->fork.ptid = tgid;
 263        }
 264        event->fork.pid  = tgid;
 265        event->fork.tid  = pid;
 266        event->fork.header.type = PERF_RECORD_FORK;
 267        event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
 268
 269        event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 270
 271        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 272                return -1;
 273
 274        return 0;
 275}
 276
 277int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 278                                       union perf_event *event,
 279                                       pid_t pid, pid_t tgid,
 280                                       perf_event__handler_t process,
 281                                       struct machine *machine,
 282                                       bool mmap_data)
 283{
 284        char filename[PATH_MAX];
 285        FILE *fp;
 286        unsigned long long t;
 287        bool truncation = false;
 288        unsigned long long timeout = proc_map_timeout * 1000000ULL;
 289        int rc = 0;
 290        const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
 291        int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
 292
 293        if (machine__is_default_guest(machine))
 294                return 0;
 295
 296        snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
 297                 machine->root_dir, pid, pid);
 298
 299        fp = fopen(filename, "r");
 300        if (fp == NULL) {
 301                /*
 302                 * We raced with a task exiting - just return:
 303                 */
 304                pr_debug("couldn't open %s\n", filename);
 305                return -1;
 306        }
 307
 308        event->header.type = PERF_RECORD_MMAP2;
 309        t = rdclock();
 310
 311        while (1) {
 312                char bf[BUFSIZ];
 313                char prot[5];
 314                char execname[PATH_MAX];
 315                char anonstr[] = "//anon";
 316                unsigned int ino;
 317                size_t size;
 318                ssize_t n;
 319
 320                if (fgets(bf, sizeof(bf), fp) == NULL)
 321                        break;
 322
 323                if ((rdclock() - t) > timeout) {
 324                        pr_warning("Reading %s time out. "
 325                                   "You may want to increase "
 326                                   "the time limit by --proc-map-timeout\n",
 327                                   filename);
 328                        truncation = true;
 329                        goto out;
 330                }
 331
 332                /* ensure null termination since stack will be reused. */
 333                strcpy(execname, "");
 334
 335                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
 336                n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n",
 337                       &event->mmap2.start, &event->mmap2.len, prot,
 338                       &event->mmap2.pgoff, &event->mmap2.maj,
 339                       &event->mmap2.min,
 340                       &ino, execname);
 341
 342                /*
 343                 * Anon maps don't have the execname.
 344                 */
 345                if (n < 7)
 346                        continue;
 347
 348                event->mmap2.ino = (u64)ino;
 349                event->mmap2.ino_generation = 0;
 350
 351                /*
 352                 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
 353                 */
 354                if (machine__is_host(machine))
 355                        event->header.misc = PERF_RECORD_MISC_USER;
 356                else
 357                        event->header.misc = PERF_RECORD_MISC_GUEST_USER;
 358
 359                /* map protection and flags bits */
 360                event->mmap2.prot = 0;
 361                event->mmap2.flags = 0;
 362                if (prot[0] == 'r')
 363                        event->mmap2.prot |= PROT_READ;
 364                if (prot[1] == 'w')
 365                        event->mmap2.prot |= PROT_WRITE;
 366                if (prot[2] == 'x')
 367                        event->mmap2.prot |= PROT_EXEC;
 368
 369                if (prot[3] == 's')
 370                        event->mmap2.flags |= MAP_SHARED;
 371                else
 372                        event->mmap2.flags |= MAP_PRIVATE;
 373
 374                if (prot[2] != 'x') {
 375                        if (!mmap_data || prot[0] != 'r')
 376                                continue;
 377
 378                        event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
 379                }
 380
 381out:
 382                if (truncation)
 383                        event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
 384
 385                if (!strcmp(execname, ""))
 386                        strcpy(execname, anonstr);
 387
 388                if (hugetlbfs_mnt_len &&
 389                    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
 390                        strcpy(execname, anonstr);
 391                        event->mmap2.flags |= MAP_HUGETLB;
 392                }
 393
 394                size = strlen(execname) + 1;
 395                memcpy(event->mmap2.filename, execname, size);
 396                size = PERF_ALIGN(size, sizeof(u64));
 397                event->mmap2.len -= event->mmap.start;
 398                event->mmap2.header.size = (sizeof(event->mmap2) -
 399                                        (sizeof(event->mmap2.filename) - size));
 400                memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
 401                event->mmap2.header.size += machine->id_hdr_size;
 402                event->mmap2.pid = tgid;
 403                event->mmap2.tid = pid;
 404
 405                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 406                        rc = -1;
 407                        break;
 408                }
 409
 410                if (truncation)
 411                        break;
 412        }
 413
 414        fclose(fp);
 415        return rc;
 416}
 417
 418#ifdef HAVE_FILE_HANDLE
 419static int perf_event__synthesize_cgroup(struct perf_tool *tool,
 420                                         union perf_event *event,
 421                                         char *path, size_t mount_len,
 422                                         perf_event__handler_t process,
 423                                         struct machine *machine)
 424{
 425        size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
 426        size_t path_len = strlen(path) - mount_len + 1;
 427        struct {
 428                struct file_handle fh;
 429                uint64_t cgroup_id;
 430        } handle;
 431        int mount_id;
 432
 433        while (path_len % sizeof(u64))
 434                path[mount_len + path_len++] = '\0';
 435
 436        memset(&event->cgroup, 0, event_size);
 437
 438        event->cgroup.header.type = PERF_RECORD_CGROUP;
 439        event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
 440
 441        handle.fh.handle_bytes = sizeof(handle.cgroup_id);
 442        if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
 443                pr_debug("stat failed: %s\n", path);
 444                return -1;
 445        }
 446
 447        event->cgroup.id = handle.cgroup_id;
 448        strncpy(event->cgroup.path, path + mount_len, path_len);
 449        memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
 450
 451        if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
 452                pr_debug("process synth event failed\n");
 453                return -1;
 454        }
 455
 456        return 0;
 457}
 458
 459static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
 460                                        union perf_event *event,
 461                                        char *path, size_t mount_len,
 462                                        perf_event__handler_t process,
 463                                        struct machine *machine)
 464{
 465        size_t pos = strlen(path);
 466        DIR *d;
 467        struct dirent *dent;
 468        int ret = 0;
 469
 470        if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
 471                                          process, machine) < 0)
 472                return -1;
 473
 474        d = opendir(path);
 475        if (d == NULL) {
 476                pr_debug("failed to open directory: %s\n", path);
 477                return -1;
 478        }
 479
 480        while ((dent = readdir(d)) != NULL) {
 481                if (dent->d_type != DT_DIR)
 482                        continue;
 483                if (!strcmp(dent->d_name, ".") ||
 484                    !strcmp(dent->d_name, ".."))
 485                        continue;
 486
 487                /* any sane path should be less than PATH_MAX */
 488                if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
 489                        continue;
 490
 491                if (path[pos - 1] != '/')
 492                        strcat(path, "/");
 493                strcat(path, dent->d_name);
 494
 495                ret = perf_event__walk_cgroup_tree(tool, event, path,
 496                                                   mount_len, process, machine);
 497                if (ret < 0)
 498                        break;
 499
 500                path[pos] = '\0';
 501        }
 502
 503        closedir(d);
 504        return ret;
 505}
 506
 507int perf_event__synthesize_cgroups(struct perf_tool *tool,
 508                                   perf_event__handler_t process,
 509                                   struct machine *machine)
 510{
 511        union perf_event event;
 512        char cgrp_root[PATH_MAX];
 513        size_t mount_len;  /* length of mount point in the path */
 514
 515        if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
 516                pr_debug("cannot find cgroup mount point\n");
 517                return -1;
 518        }
 519
 520        mount_len = strlen(cgrp_root);
 521        /* make sure the path starts with a slash (after mount point) */
 522        strcat(cgrp_root, "/");
 523
 524        if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
 525                                         process, machine) < 0)
 526                return -1;
 527
 528        return 0;
 529}
 530#else
 531int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
 532                                   perf_event__handler_t process __maybe_unused,
 533                                   struct machine *machine __maybe_unused)
 534{
 535        return -1;
 536}
 537#endif
 538
 539int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
 540                                   struct machine *machine)
 541{
 542        int rc = 0;
 543        struct map *pos;
 544        struct maps *maps = machine__kernel_maps(machine);
 545        union perf_event *event = zalloc((sizeof(event->mmap) +
 546                                          machine->id_hdr_size));
 547        if (event == NULL) {
 548                pr_debug("Not enough memory synthesizing mmap event "
 549                         "for kernel modules\n");
 550                return -1;
 551        }
 552
 553        event->header.type = PERF_RECORD_MMAP;
 554
 555        /*
 556         * kernel uses 0 for user space maps, see kernel/perf_event.c
 557         * __perf_event_mmap
 558         */
 559        if (machine__is_host(machine))
 560                event->header.misc = PERF_RECORD_MISC_KERNEL;
 561        else
 562                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 563
 564        maps__for_each_entry(maps, pos) {
 565                size_t size;
 566
 567                if (!__map__is_kmodule(pos))
 568                        continue;
 569
 570                size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
 571                event->mmap.header.type = PERF_RECORD_MMAP;
 572                event->mmap.header.size = (sizeof(event->mmap) -
 573                                        (sizeof(event->mmap.filename) - size));
 574                memset(event->mmap.filename + size, 0, machine->id_hdr_size);
 575                event->mmap.header.size += machine->id_hdr_size;
 576                event->mmap.start = pos->start;
 577                event->mmap.len   = pos->end - pos->start;
 578                event->mmap.pid   = machine->pid;
 579
 580                memcpy(event->mmap.filename, pos->dso->long_name,
 581                       pos->dso->long_name_len + 1);
 582                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 583                        rc = -1;
 584                        break;
 585                }
 586        }
 587
 588        free(event);
 589        return rc;
 590}
 591
 592static int __event__synthesize_thread(union perf_event *comm_event,
 593                                      union perf_event *mmap_event,
 594                                      union perf_event *fork_event,
 595                                      union perf_event *namespaces_event,
 596                                      pid_t pid, int full, perf_event__handler_t process,
 597                                      struct perf_tool *tool, struct machine *machine, bool mmap_data)
 598{
 599        char filename[PATH_MAX];
 600        DIR *tasks;
 601        struct dirent *dirent;
 602        pid_t tgid, ppid;
 603        int rc = 0;
 604
 605        /* special case: only send one comm event using passed in pid */
 606        if (!full) {
 607                tgid = perf_event__synthesize_comm(tool, comm_event, pid,
 608                                                   process, machine);
 609
 610                if (tgid == -1)
 611                        return -1;
 612
 613                if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
 614                                                      tgid, process, machine) < 0)
 615                        return -1;
 616
 617                /*
 618                 * send mmap only for thread group leader
 619                 * see thread__init_maps()
 620                 */
 621                if (pid == tgid &&
 622                    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 623                                                       process, machine, mmap_data))
 624                        return -1;
 625
 626                return 0;
 627        }
 628
 629        if (machine__is_default_guest(machine))
 630                return 0;
 631
 632        snprintf(filename, sizeof(filename), "%s/proc/%d/task",
 633                 machine->root_dir, pid);
 634
 635        tasks = opendir(filename);
 636        if (tasks == NULL) {
 637                pr_debug("couldn't open %s\n", filename);
 638                return 0;
 639        }
 640
 641        while ((dirent = readdir(tasks)) != NULL) {
 642                char *end;
 643                pid_t _pid;
 644
 645                _pid = strtol(dirent->d_name, &end, 10);
 646                if (*end)
 647                        continue;
 648
 649                rc = -1;
 650                if (perf_event__prepare_comm(comm_event, _pid, machine,
 651                                             &tgid, &ppid) != 0)
 652                        break;
 653
 654                if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
 655                                                ppid, process, machine) < 0)
 656                        break;
 657
 658                if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
 659                                                      tgid, process, machine) < 0)
 660                        break;
 661
 662                /*
 663                 * Send the prepared comm event
 664                 */
 665                if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
 666                        break;
 667
 668                rc = 0;
 669                if (_pid == pid) {
 670                        /* process the parent's maps too */
 671                        rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 672                                                process, machine, mmap_data);
 673                        if (rc)
 674                                break;
 675                }
 676        }
 677
 678        closedir(tasks);
 679        return rc;
 680}
 681
 682int perf_event__synthesize_thread_map(struct perf_tool *tool,
 683                                      struct perf_thread_map *threads,
 684                                      perf_event__handler_t process,
 685                                      struct machine *machine,
 686                                      bool mmap_data)
 687{
 688        union perf_event *comm_event, *mmap_event, *fork_event;
 689        union perf_event *namespaces_event;
 690        int err = -1, thread, j;
 691
 692        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 693        if (comm_event == NULL)
 694                goto out;
 695
 696        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 697        if (mmap_event == NULL)
 698                goto out_free_comm;
 699
 700        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 701        if (fork_event == NULL)
 702                goto out_free_mmap;
 703
 704        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 705                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 706                                  machine->id_hdr_size);
 707        if (namespaces_event == NULL)
 708                goto out_free_fork;
 709
 710        err = 0;
 711        for (thread = 0; thread < threads->nr; ++thread) {
 712                if (__event__synthesize_thread(comm_event, mmap_event,
 713                                               fork_event, namespaces_event,
 714                                               perf_thread_map__pid(threads, thread), 0,
 715                                               process, tool, machine,
 716                                               mmap_data)) {
 717                        err = -1;
 718                        break;
 719                }
 720
 721                /*
 722                 * comm.pid is set to thread group id by
 723                 * perf_event__synthesize_comm
 724                 */
 725                if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
 726                        bool need_leader = true;
 727
 728                        /* is thread group leader in thread_map? */
 729                        for (j = 0; j < threads->nr; ++j) {
 730                                if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
 731                                        need_leader = false;
 732                                        break;
 733                                }
 734                        }
 735
 736                        /* if not, generate events for it */
 737                        if (need_leader &&
 738                            __event__synthesize_thread(comm_event, mmap_event,
 739                                                       fork_event, namespaces_event,
 740                                                       comm_event->comm.pid, 0,
 741                                                       process, tool, machine,
 742                                                       mmap_data)) {
 743                                err = -1;
 744                                break;
 745                        }
 746                }
 747        }
 748        free(namespaces_event);
 749out_free_fork:
 750        free(fork_event);
 751out_free_mmap:
 752        free(mmap_event);
 753out_free_comm:
 754        free(comm_event);
 755out:
 756        return err;
 757}
 758
 759static int __perf_event__synthesize_threads(struct perf_tool *tool,
 760                                            perf_event__handler_t process,
 761                                            struct machine *machine,
 762                                            bool mmap_data,
 763                                            struct dirent **dirent,
 764                                            int start,
 765                                            int num)
 766{
 767        union perf_event *comm_event, *mmap_event, *fork_event;
 768        union perf_event *namespaces_event;
 769        int err = -1;
 770        char *end;
 771        pid_t pid;
 772        int i;
 773
 774        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 775        if (comm_event == NULL)
 776                goto out;
 777
 778        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 779        if (mmap_event == NULL)
 780                goto out_free_comm;
 781
 782        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 783        if (fork_event == NULL)
 784                goto out_free_mmap;
 785
 786        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 787                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 788                                  machine->id_hdr_size);
 789        if (namespaces_event == NULL)
 790                goto out_free_fork;
 791
 792        for (i = start; i < start + num; i++) {
 793                if (!isdigit(dirent[i]->d_name[0]))
 794                        continue;
 795
 796                pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
 797                /* only interested in proper numerical dirents */
 798                if (*end)
 799                        continue;
 800                /*
 801                 * We may race with exiting thread, so don't stop just because
 802                 * one thread couldn't be synthesized.
 803                 */
 804                __event__synthesize_thread(comm_event, mmap_event, fork_event,
 805                                           namespaces_event, pid, 1, process,
 806                                           tool, machine, mmap_data);
 807        }
 808        err = 0;
 809
 810        free(namespaces_event);
 811out_free_fork:
 812        free(fork_event);
 813out_free_mmap:
 814        free(mmap_event);
 815out_free_comm:
 816        free(comm_event);
 817out:
 818        return err;
 819}
 820
 821struct synthesize_threads_arg {
 822        struct perf_tool *tool;
 823        perf_event__handler_t process;
 824        struct machine *machine;
 825        bool mmap_data;
 826        struct dirent **dirent;
 827        int num;
 828        int start;
 829};
 830
 831static void *synthesize_threads_worker(void *arg)
 832{
 833        struct synthesize_threads_arg *args = arg;
 834
 835        __perf_event__synthesize_threads(args->tool, args->process,
 836                                         args->machine, args->mmap_data,
 837                                         args->dirent,
 838                                         args->start, args->num);
 839        return NULL;
 840}
 841
 842int perf_event__synthesize_threads(struct perf_tool *tool,
 843                                   perf_event__handler_t process,
 844                                   struct machine *machine,
 845                                   bool mmap_data,
 846                                   unsigned int nr_threads_synthesize)
 847{
 848        struct synthesize_threads_arg *args = NULL;
 849        pthread_t *synthesize_threads = NULL;
 850        char proc_path[PATH_MAX];
 851        struct dirent **dirent;
 852        int num_per_thread;
 853        int m, n, i, j;
 854        int thread_nr;
 855        int base = 0;
 856        int err = -1;
 857
 858
 859        if (machine__is_default_guest(machine))
 860                return 0;
 861
 862        snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
 863        n = scandir(proc_path, &dirent, 0, alphasort);
 864        if (n < 0)
 865                return err;
 866
 867        if (nr_threads_synthesize == UINT_MAX)
 868                thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
 869        else
 870                thread_nr = nr_threads_synthesize;
 871
 872        if (thread_nr <= 1) {
 873                err = __perf_event__synthesize_threads(tool, process,
 874                                                       machine, mmap_data,
 875                                                       dirent, base, n);
 876                goto free_dirent;
 877        }
 878        if (thread_nr > n)
 879                thread_nr = n;
 880
 881        synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
 882        if (synthesize_threads == NULL)
 883                goto free_dirent;
 884
 885        args = calloc(sizeof(*args), thread_nr);
 886        if (args == NULL)
 887                goto free_threads;
 888
 889        num_per_thread = n / thread_nr;
 890        m = n % thread_nr;
 891        for (i = 0; i < thread_nr; i++) {
 892                args[i].tool = tool;
 893                args[i].process = process;
 894                args[i].machine = machine;
 895                args[i].mmap_data = mmap_data;
 896                args[i].dirent = dirent;
 897        }
 898        for (i = 0; i < m; i++) {
 899                args[i].num = num_per_thread + 1;
 900                args[i].start = i * args[i].num;
 901        }
 902        if (i != 0)
 903                base = args[i-1].start + args[i-1].num;
 904        for (j = i; j < thread_nr; j++) {
 905                args[j].num = num_per_thread;
 906                args[j].start = base + (j - i) * args[i].num;
 907        }
 908
 909        for (i = 0; i < thread_nr; i++) {
 910                if (pthread_create(&synthesize_threads[i], NULL,
 911                                   synthesize_threads_worker, &args[i]))
 912                        goto out_join;
 913        }
 914        err = 0;
 915out_join:
 916        for (i = 0; i < thread_nr; i++)
 917                pthread_join(synthesize_threads[i], NULL);
 918        free(args);
 919free_threads:
 920        free(synthesize_threads);
 921free_dirent:
 922        for (i = 0; i < n; i++)
 923                zfree(&dirent[i]);
 924        free(dirent);
 925
 926        return err;
 927}
 928
 929int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
 930                                              perf_event__handler_t process __maybe_unused,
 931                                              struct machine *machine __maybe_unused)
 932{
 933        return 0;
 934}
 935
 936static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 937                                                perf_event__handler_t process,
 938                                                struct machine *machine)
 939{
 940        size_t size;
 941        struct map *map = machine__kernel_map(machine);
 942        struct kmap *kmap;
 943        int err;
 944        union perf_event *event;
 945
 946        if (map == NULL)
 947                return -1;
 948
 949        kmap = map__kmap(map);
 950        if (!kmap->ref_reloc_sym)
 951                return -1;
 952
 953        /*
 954         * We should get this from /sys/kernel/sections/.text, but till that is
 955         * available use this, and after it is use this as a fallback for older
 956         * kernels.
 957         */
 958        event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
 959        if (event == NULL) {
 960                pr_debug("Not enough memory synthesizing mmap event "
 961                         "for kernel modules\n");
 962                return -1;
 963        }
 964
 965        if (machine__is_host(machine)) {
 966                /*
 967                 * kernel uses PERF_RECORD_MISC_USER for user space maps,
 968                 * see kernel/perf_event.c __perf_event_mmap
 969                 */
 970                event->header.misc = PERF_RECORD_MISC_KERNEL;
 971        } else {
 972                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 973        }
 974
 975        size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
 976                        "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
 977        size = PERF_ALIGN(size, sizeof(u64));
 978        event->mmap.header.type = PERF_RECORD_MMAP;
 979        event->mmap.header.size = (sizeof(event->mmap) -
 980                        (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
 981        event->mmap.pgoff = kmap->ref_reloc_sym->addr;
 982        event->mmap.start = map->start;
 983        event->mmap.len   = map->end - event->mmap.start;
 984        event->mmap.pid   = machine->pid;
 985
 986        err = perf_tool__process_synth_event(tool, event, machine, process);
 987        free(event);
 988
 989        return err;
 990}
 991
 992int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 993                                       perf_event__handler_t process,
 994                                       struct machine *machine)
 995{
 996        int err;
 997
 998        err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
 999        if (err < 0)
1000                return err;
1001
1002        return perf_event__synthesize_extra_kmaps(tool, process, machine);
1003}
1004
1005int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1006                                      struct perf_thread_map *threads,
1007                                      perf_event__handler_t process,
1008                                      struct machine *machine)
1009{
1010        union perf_event *event;
1011        int i, err, size;
1012
1013        size  = sizeof(event->thread_map);
1014        size += threads->nr * sizeof(event->thread_map.entries[0]);
1015
1016        event = zalloc(size);
1017        if (!event)
1018                return -ENOMEM;
1019
1020        event->header.type = PERF_RECORD_THREAD_MAP;
1021        event->header.size = size;
1022        event->thread_map.nr = threads->nr;
1023
1024        for (i = 0; i < threads->nr; i++) {
1025                struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1026                char *comm = perf_thread_map__comm(threads, i);
1027
1028                if (!comm)
1029                        comm = (char *) "";
1030
1031                entry->pid = perf_thread_map__pid(threads, i);
1032                strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1033        }
1034
1035        err = process(tool, event, NULL, machine);
1036
1037        free(event);
1038        return err;
1039}
1040
1041static void synthesize_cpus(struct cpu_map_entries *cpus,
1042                            struct perf_cpu_map *map)
1043{
1044        int i;
1045
1046        cpus->nr = map->nr;
1047
1048        for (i = 0; i < map->nr; i++)
1049                cpus->cpu[i] = map->map[i];
1050}
1051
1052static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1053                            struct perf_cpu_map *map, int max)
1054{
1055        int i;
1056
1057        mask->nr = BITS_TO_LONGS(max);
1058        mask->long_size = sizeof(long);
1059
1060        for (i = 0; i < map->nr; i++)
1061                set_bit(map->map[i], mask->mask);
1062}
1063
1064static size_t cpus_size(struct perf_cpu_map *map)
1065{
1066        return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1067}
1068
1069static size_t mask_size(struct perf_cpu_map *map, int *max)
1070{
1071        int i;
1072
1073        *max = 0;
1074
1075        for (i = 0; i < map->nr; i++) {
1076                /* bit possition of the cpu is + 1 */
1077                int bit = map->map[i] + 1;
1078
1079                if (bit > *max)
1080                        *max = bit;
1081        }
1082
1083        return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1084}
1085
1086void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1087{
1088        size_t size_cpus, size_mask;
1089        bool is_dummy = perf_cpu_map__empty(map);
1090
1091        /*
1092         * Both array and mask data have variable size based
1093         * on the number of cpus and their actual values.
1094         * The size of the 'struct perf_record_cpu_map_data' is:
1095         *
1096         *   array = size of 'struct cpu_map_entries' +
1097         *           number of cpus * sizeof(u64)
1098         *
1099         *   mask  = size of 'struct perf_record_record_cpu_map' +
1100         *           maximum cpu bit converted to size of longs
1101         *
1102         * and finaly + the size of 'struct perf_record_cpu_map_data'.
1103         */
1104        size_cpus = cpus_size(map);
1105        size_mask = mask_size(map, max);
1106
1107        if (is_dummy || (size_cpus < size_mask)) {
1108                *size += size_cpus;
1109                *type  = PERF_CPU_MAP__CPUS;
1110        } else {
1111                *size += size_mask;
1112                *type  = PERF_CPU_MAP__MASK;
1113        }
1114
1115        *size += sizeof(struct perf_record_cpu_map_data);
1116        *size = PERF_ALIGN(*size, sizeof(u64));
1117        return zalloc(*size);
1118}
1119
1120void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1121                              u16 type, int max)
1122{
1123        data->type = type;
1124
1125        switch (type) {
1126        case PERF_CPU_MAP__CPUS:
1127                synthesize_cpus((struct cpu_map_entries *) data->data, map);
1128                break;
1129        case PERF_CPU_MAP__MASK:
1130                synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1131        default:
1132                break;
1133        };
1134}
1135
1136static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1137{
1138        size_t size = sizeof(struct perf_record_cpu_map);
1139        struct perf_record_cpu_map *event;
1140        int max;
1141        u16 type;
1142
1143        event = cpu_map_data__alloc(map, &size, &type, &max);
1144        if (!event)
1145                return NULL;
1146
1147        event->header.type = PERF_RECORD_CPU_MAP;
1148        event->header.size = size;
1149        event->data.type   = type;
1150
1151        cpu_map_data__synthesize(&event->data, map, type, max);
1152        return event;
1153}
1154
1155int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1156                                   struct perf_cpu_map *map,
1157                                   perf_event__handler_t process,
1158                                   struct machine *machine)
1159{
1160        struct perf_record_cpu_map *event;
1161        int err;
1162
1163        event = cpu_map_event__new(map);
1164        if (!event)
1165                return -ENOMEM;
1166
1167        err = process(tool, (union perf_event *) event, NULL, machine);
1168
1169        free(event);
1170        return err;
1171}
1172
1173int perf_event__synthesize_stat_config(struct perf_tool *tool,
1174                                       struct perf_stat_config *config,
1175                                       perf_event__handler_t process,
1176                                       struct machine *machine)
1177{
1178        struct perf_record_stat_config *event;
1179        int size, i = 0, err;
1180
1181        size  = sizeof(*event);
1182        size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1183
1184        event = zalloc(size);
1185        if (!event)
1186                return -ENOMEM;
1187
1188        event->header.type = PERF_RECORD_STAT_CONFIG;
1189        event->header.size = size;
1190        event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1191
1192#define ADD(__term, __val)                                      \
1193        event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1194        event->data[i].val = __val;                             \
1195        i++;
1196
1197        ADD(AGGR_MODE,  config->aggr_mode)
1198        ADD(INTERVAL,   config->interval)
1199        ADD(SCALE,      config->scale)
1200
1201        WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1202                  "stat config terms unbalanced\n");
1203#undef ADD
1204
1205        err = process(tool, (union perf_event *) event, NULL, machine);
1206
1207        free(event);
1208        return err;
1209}
1210
1211int perf_event__synthesize_stat(struct perf_tool *tool,
1212                                u32 cpu, u32 thread, u64 id,
1213                                struct perf_counts_values *count,
1214                                perf_event__handler_t process,
1215                                struct machine *machine)
1216{
1217        struct perf_record_stat event;
1218
1219        event.header.type = PERF_RECORD_STAT;
1220        event.header.size = sizeof(event);
1221        event.header.misc = 0;
1222
1223        event.id        = id;
1224        event.cpu       = cpu;
1225        event.thread    = thread;
1226        event.val       = count->val;
1227        event.ena       = count->ena;
1228        event.run       = count->run;
1229
1230        return process(tool, (union perf_event *) &event, NULL, machine);
1231}
1232
1233int perf_event__synthesize_stat_round(struct perf_tool *tool,
1234                                      u64 evtime, u64 type,
1235                                      perf_event__handler_t process,
1236                                      struct machine *machine)
1237{
1238        struct perf_record_stat_round event;
1239
1240        event.header.type = PERF_RECORD_STAT_ROUND;
1241        event.header.size = sizeof(event);
1242        event.header.misc = 0;
1243
1244        event.time = evtime;
1245        event.type = type;
1246
1247        return process(tool, (union perf_event *) &event, NULL, machine);
1248}
1249
1250size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1251{
1252        size_t sz, result = sizeof(struct perf_record_sample);
1253
1254        if (type & PERF_SAMPLE_IDENTIFIER)
1255                result += sizeof(u64);
1256
1257        if (type & PERF_SAMPLE_IP)
1258                result += sizeof(u64);
1259
1260        if (type & PERF_SAMPLE_TID)
1261                result += sizeof(u64);
1262
1263        if (type & PERF_SAMPLE_TIME)
1264                result += sizeof(u64);
1265
1266        if (type & PERF_SAMPLE_ADDR)
1267                result += sizeof(u64);
1268
1269        if (type & PERF_SAMPLE_ID)
1270                result += sizeof(u64);
1271
1272        if (type & PERF_SAMPLE_STREAM_ID)
1273                result += sizeof(u64);
1274
1275        if (type & PERF_SAMPLE_CPU)
1276                result += sizeof(u64);
1277
1278        if (type & PERF_SAMPLE_PERIOD)
1279                result += sizeof(u64);
1280
1281        if (type & PERF_SAMPLE_READ) {
1282                result += sizeof(u64);
1283                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1284                        result += sizeof(u64);
1285                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1286                        result += sizeof(u64);
1287                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1288                if (read_format & PERF_FORMAT_GROUP) {
1289                        sz = sample->read.group.nr *
1290                             sizeof(struct sample_read_value);
1291                        result += sz;
1292                } else {
1293                        result += sizeof(u64);
1294                }
1295        }
1296
1297        if (type & PERF_SAMPLE_CALLCHAIN) {
1298                sz = (sample->callchain->nr + 1) * sizeof(u64);
1299                result += sz;
1300        }
1301
1302        if (type & PERF_SAMPLE_RAW) {
1303                result += sizeof(u32);
1304                result += sample->raw_size;
1305        }
1306
1307        if (type & PERF_SAMPLE_BRANCH_STACK) {
1308                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1309                /* nr, hw_idx */
1310                sz += 2 * sizeof(u64);
1311                result += sz;
1312        }
1313
1314        if (type & PERF_SAMPLE_REGS_USER) {
1315                if (sample->user_regs.abi) {
1316                        result += sizeof(u64);
1317                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1318                        result += sz;
1319                } else {
1320                        result += sizeof(u64);
1321                }
1322        }
1323
1324        if (type & PERF_SAMPLE_STACK_USER) {
1325                sz = sample->user_stack.size;
1326                result += sizeof(u64);
1327                if (sz) {
1328                        result += sz;
1329                        result += sizeof(u64);
1330                }
1331        }
1332
1333        if (type & PERF_SAMPLE_WEIGHT)
1334                result += sizeof(u64);
1335
1336        if (type & PERF_SAMPLE_DATA_SRC)
1337                result += sizeof(u64);
1338
1339        if (type & PERF_SAMPLE_TRANSACTION)
1340                result += sizeof(u64);
1341
1342        if (type & PERF_SAMPLE_REGS_INTR) {
1343                if (sample->intr_regs.abi) {
1344                        result += sizeof(u64);
1345                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1346                        result += sz;
1347                } else {
1348                        result += sizeof(u64);
1349                }
1350        }
1351
1352        if (type & PERF_SAMPLE_PHYS_ADDR)
1353                result += sizeof(u64);
1354
1355        if (type & PERF_SAMPLE_CGROUP)
1356                result += sizeof(u64);
1357
1358        if (type & PERF_SAMPLE_AUX) {
1359                result += sizeof(u64);
1360                result += sample->aux_sample.size;
1361        }
1362
1363        return result;
1364}
1365
1366int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1367                                  const struct perf_sample *sample)
1368{
1369        __u64 *array;
1370        size_t sz;
1371        /*
1372         * used for cross-endian analysis. See git commit 65014ab3
1373         * for why this goofiness is needed.
1374         */
1375        union u64_swap u;
1376
1377        array = event->sample.array;
1378
1379        if (type & PERF_SAMPLE_IDENTIFIER) {
1380                *array = sample->id;
1381                array++;
1382        }
1383
1384        if (type & PERF_SAMPLE_IP) {
1385                *array = sample->ip;
1386                array++;
1387        }
1388
1389        if (type & PERF_SAMPLE_TID) {
1390                u.val32[0] = sample->pid;
1391                u.val32[1] = sample->tid;
1392                *array = u.val64;
1393                array++;
1394        }
1395
1396        if (type & PERF_SAMPLE_TIME) {
1397                *array = sample->time;
1398                array++;
1399        }
1400
1401        if (type & PERF_SAMPLE_ADDR) {
1402                *array = sample->addr;
1403                array++;
1404        }
1405
1406        if (type & PERF_SAMPLE_ID) {
1407                *array = sample->id;
1408                array++;
1409        }
1410
1411        if (type & PERF_SAMPLE_STREAM_ID) {
1412                *array = sample->stream_id;
1413                array++;
1414        }
1415
1416        if (type & PERF_SAMPLE_CPU) {
1417                u.val32[0] = sample->cpu;
1418                u.val32[1] = 0;
1419                *array = u.val64;
1420                array++;
1421        }
1422
1423        if (type & PERF_SAMPLE_PERIOD) {
1424                *array = sample->period;
1425                array++;
1426        }
1427
1428        if (type & PERF_SAMPLE_READ) {
1429                if (read_format & PERF_FORMAT_GROUP)
1430                        *array = sample->read.group.nr;
1431                else
1432                        *array = sample->read.one.value;
1433                array++;
1434
1435                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1436                        *array = sample->read.time_enabled;
1437                        array++;
1438                }
1439
1440                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1441                        *array = sample->read.time_running;
1442                        array++;
1443                }
1444
1445                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1446                if (read_format & PERF_FORMAT_GROUP) {
1447                        sz = sample->read.group.nr *
1448                             sizeof(struct sample_read_value);
1449                        memcpy(array, sample->read.group.values, sz);
1450                        array = (void *)array + sz;
1451                } else {
1452                        *array = sample->read.one.id;
1453                        array++;
1454                }
1455        }
1456
1457        if (type & PERF_SAMPLE_CALLCHAIN) {
1458                sz = (sample->callchain->nr + 1) * sizeof(u64);
1459                memcpy(array, sample->callchain, sz);
1460                array = (void *)array + sz;
1461        }
1462
1463        if (type & PERF_SAMPLE_RAW) {
1464                u.val32[0] = sample->raw_size;
1465                *array = u.val64;
1466                array = (void *)array + sizeof(u32);
1467
1468                memcpy(array, sample->raw_data, sample->raw_size);
1469                array = (void *)array + sample->raw_size;
1470        }
1471
1472        if (type & PERF_SAMPLE_BRANCH_STACK) {
1473                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1474                /* nr, hw_idx */
1475                sz += 2 * sizeof(u64);
1476                memcpy(array, sample->branch_stack, sz);
1477                array = (void *)array + sz;
1478        }
1479
1480        if (type & PERF_SAMPLE_REGS_USER) {
1481                if (sample->user_regs.abi) {
1482                        *array++ = sample->user_regs.abi;
1483                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1484                        memcpy(array, sample->user_regs.regs, sz);
1485                        array = (void *)array + sz;
1486                } else {
1487                        *array++ = 0;
1488                }
1489        }
1490
1491        if (type & PERF_SAMPLE_STACK_USER) {
1492                sz = sample->user_stack.size;
1493                *array++ = sz;
1494                if (sz) {
1495                        memcpy(array, sample->user_stack.data, sz);
1496                        array = (void *)array + sz;
1497                        *array++ = sz;
1498                }
1499        }
1500
1501        if (type & PERF_SAMPLE_WEIGHT) {
1502                *array = sample->weight;
1503                array++;
1504        }
1505
1506        if (type & PERF_SAMPLE_DATA_SRC) {
1507                *array = sample->data_src;
1508                array++;
1509        }
1510
1511        if (type & PERF_SAMPLE_TRANSACTION) {
1512                *array = sample->transaction;
1513                array++;
1514        }
1515
1516        if (type & PERF_SAMPLE_REGS_INTR) {
1517                if (sample->intr_regs.abi) {
1518                        *array++ = sample->intr_regs.abi;
1519                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1520                        memcpy(array, sample->intr_regs.regs, sz);
1521                        array = (void *)array + sz;
1522                } else {
1523                        *array++ = 0;
1524                }
1525        }
1526
1527        if (type & PERF_SAMPLE_PHYS_ADDR) {
1528                *array = sample->phys_addr;
1529                array++;
1530        }
1531
1532        if (type & PERF_SAMPLE_CGROUP) {
1533                *array = sample->cgroup;
1534                array++;
1535        }
1536
1537        if (type & PERF_SAMPLE_AUX) {
1538                sz = sample->aux_sample.size;
1539                *array++ = sz;
1540                memcpy(array, sample->aux_sample.data, sz);
1541                array = (void *)array + sz;
1542        }
1543
1544        return 0;
1545}
1546
1547int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1548                                    struct evlist *evlist, struct machine *machine)
1549{
1550        union perf_event *ev;
1551        struct evsel *evsel;
1552        size_t nr = 0, i = 0, sz, max_nr, n;
1553        int err;
1554
1555        pr_debug2("Synthesizing id index\n");
1556
1557        max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1558                 sizeof(struct id_index_entry);
1559
1560        evlist__for_each_entry(evlist, evsel)
1561                nr += evsel->core.ids;
1562
1563        n = nr > max_nr ? max_nr : nr;
1564        sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1565        ev = zalloc(sz);
1566        if (!ev)
1567                return -ENOMEM;
1568
1569        ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1570        ev->id_index.header.size = sz;
1571        ev->id_index.nr = n;
1572
1573        evlist__for_each_entry(evlist, evsel) {
1574                u32 j;
1575
1576                for (j = 0; j < evsel->core.ids; j++) {
1577                        struct id_index_entry *e;
1578                        struct perf_sample_id *sid;
1579
1580                        if (i >= n) {
1581                                err = process(tool, ev, NULL, machine);
1582                                if (err)
1583                                        goto out_err;
1584                                nr -= n;
1585                                i = 0;
1586                        }
1587
1588                        e = &ev->id_index.entries[i++];
1589
1590                        e->id = evsel->core.id[j];
1591
1592                        sid = perf_evlist__id2sid(evlist, e->id);
1593                        if (!sid) {
1594                                free(ev);
1595                                return -ENOENT;
1596                        }
1597
1598                        e->idx = sid->idx;
1599                        e->cpu = sid->cpu;
1600                        e->tid = sid->tid;
1601                }
1602        }
1603
1604        sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1605        ev->id_index.header.size = sz;
1606        ev->id_index.nr = nr;
1607
1608        err = process(tool, ev, NULL, machine);
1609out_err:
1610        free(ev);
1611
1612        return err;
1613}
1614
1615int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1616                                  struct target *target, struct perf_thread_map *threads,
1617                                  perf_event__handler_t process, bool data_mmap,
1618                                  unsigned int nr_threads_synthesize)
1619{
1620        if (target__has_task(target))
1621                return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1622        else if (target__has_cpu(target))
1623                return perf_event__synthesize_threads(tool, process,
1624                                                      machine, data_mmap,
1625                                                      nr_threads_synthesize);
1626        /* command specified */
1627        return 0;
1628}
1629
1630int machine__synthesize_threads(struct machine *machine, struct target *target,
1631                                struct perf_thread_map *threads, bool data_mmap,
1632                                unsigned int nr_threads_synthesize)
1633{
1634        return __machine__synthesize_threads(machine, NULL, target, threads,
1635                                             perf_event__process, data_mmap,
1636                                             nr_threads_synthesize);
1637}
1638
1639static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1640{
1641        struct perf_record_event_update *ev;
1642
1643        size += sizeof(*ev);
1644        size  = PERF_ALIGN(size, sizeof(u64));
1645
1646        ev = zalloc(size);
1647        if (ev) {
1648                ev->header.type = PERF_RECORD_EVENT_UPDATE;
1649                ev->header.size = (u16)size;
1650                ev->type        = type;
1651                ev->id          = id;
1652        }
1653        return ev;
1654}
1655
1656int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1657                                             perf_event__handler_t process)
1658{
1659        size_t size = strlen(evsel->unit);
1660        struct perf_record_event_update *ev;
1661        int err;
1662
1663        ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1664        if (ev == NULL)
1665                return -ENOMEM;
1666
1667        strlcpy(ev->data, evsel->unit, size + 1);
1668        err = process(tool, (union perf_event *)ev, NULL, NULL);
1669        free(ev);
1670        return err;
1671}
1672
1673int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1674                                              perf_event__handler_t process)
1675{
1676        struct perf_record_event_update *ev;
1677        struct perf_record_event_update_scale *ev_data;
1678        int err;
1679
1680        ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1681        if (ev == NULL)
1682                return -ENOMEM;
1683
1684        ev_data = (struct perf_record_event_update_scale *)ev->data;
1685        ev_data->scale = evsel->scale;
1686        err = process(tool, (union perf_event *)ev, NULL, NULL);
1687        free(ev);
1688        return err;
1689}
1690
1691int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1692                                             perf_event__handler_t process)
1693{
1694        struct perf_record_event_update *ev;
1695        size_t len = strlen(evsel->name);
1696        int err;
1697
1698        ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1699        if (ev == NULL)
1700                return -ENOMEM;
1701
1702        strlcpy(ev->data, evsel->name, len + 1);
1703        err = process(tool, (union perf_event *)ev, NULL, NULL);
1704        free(ev);
1705        return err;
1706}
1707
1708int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1709                                             perf_event__handler_t process)
1710{
1711        size_t size = sizeof(struct perf_record_event_update);
1712        struct perf_record_event_update *ev;
1713        int max, err;
1714        u16 type;
1715
1716        if (!evsel->core.own_cpus)
1717                return 0;
1718
1719        ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1720        if (!ev)
1721                return -ENOMEM;
1722
1723        ev->header.type = PERF_RECORD_EVENT_UPDATE;
1724        ev->header.size = (u16)size;
1725        ev->type        = PERF_EVENT_UPDATE__CPUS;
1726        ev->id          = evsel->core.id[0];
1727
1728        cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1729                                 evsel->core.own_cpus, type, max);
1730
1731        err = process(tool, (union perf_event *)ev, NULL, NULL);
1732        free(ev);
1733        return err;
1734}
1735
1736int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1737                                 perf_event__handler_t process)
1738{
1739        struct evsel *evsel;
1740        int err = 0;
1741
1742        evlist__for_each_entry(evlist, evsel) {
1743                err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1744                                                  evsel->core.id, process);
1745                if (err) {
1746                        pr_debug("failed to create perf header attribute\n");
1747                        return err;
1748                }
1749        }
1750
1751        return err;
1752}
1753
1754static bool has_unit(struct evsel *evsel)
1755{
1756        return evsel->unit && *evsel->unit;
1757}
1758
1759static bool has_scale(struct evsel *evsel)
1760{
1761        return evsel->scale != 1;
1762}
1763
1764int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1765                                      perf_event__handler_t process, bool is_pipe)
1766{
1767        struct evsel *evsel;
1768        int err;
1769
1770        /*
1771         * Synthesize other events stuff not carried within
1772         * attr event - unit, scale, name
1773         */
1774        evlist__for_each_entry(evsel_list, evsel) {
1775                if (!evsel->supported)
1776                        continue;
1777
1778                /*
1779                 * Synthesize unit and scale only if it's defined.
1780                 */
1781                if (has_unit(evsel)) {
1782                        err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1783                        if (err < 0) {
1784                                pr_err("Couldn't synthesize evsel unit.\n");
1785                                return err;
1786                        }
1787                }
1788
1789                if (has_scale(evsel)) {
1790                        err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1791                        if (err < 0) {
1792                                pr_err("Couldn't synthesize evsel evsel.\n");
1793                                return err;
1794                        }
1795                }
1796
1797                if (evsel->core.own_cpus) {
1798                        err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1799                        if (err < 0) {
1800                                pr_err("Couldn't synthesize evsel cpus.\n");
1801                                return err;
1802                        }
1803                }
1804
1805                /*
1806                 * Name is needed only for pipe output,
1807                 * perf.data carries event names.
1808                 */
1809                if (is_pipe) {
1810                        err = perf_event__synthesize_event_update_name(tool, evsel, process);
1811                        if (err < 0) {
1812                                pr_err("Couldn't synthesize evsel name.\n");
1813                                return err;
1814                        }
1815                }
1816        }
1817        return 0;
1818}
1819
1820int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1821                                u32 ids, u64 *id, perf_event__handler_t process)
1822{
1823        union perf_event *ev;
1824        size_t size;
1825        int err;
1826
1827        size = sizeof(struct perf_event_attr);
1828        size = PERF_ALIGN(size, sizeof(u64));
1829        size += sizeof(struct perf_event_header);
1830        size += ids * sizeof(u64);
1831
1832        ev = zalloc(size);
1833
1834        if (ev == NULL)
1835                return -ENOMEM;
1836
1837        ev->attr.attr = *attr;
1838        memcpy(ev->attr.id, id, ids * sizeof(u64));
1839
1840        ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
1841        ev->attr.header.size = (u16)size;
1842
1843        if (ev->attr.header.size == size)
1844                err = process(tool, ev, NULL, NULL);
1845        else
1846                err = -E2BIG;
1847
1848        free(ev);
1849
1850        return err;
1851}
1852
1853int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
1854                                        perf_event__handler_t process)
1855{
1856        union perf_event ev;
1857        struct tracing_data *tdata;
1858        ssize_t size = 0, aligned_size = 0, padding;
1859        struct feat_fd ff;
1860
1861        /*
1862         * We are going to store the size of the data followed
1863         * by the data contents. Since the fd descriptor is a pipe,
1864         * we cannot seek back to store the size of the data once
1865         * we know it. Instead we:
1866         *
1867         * - write the tracing data to the temp file
1868         * - get/write the data size to pipe
1869         * - write the tracing data from the temp file
1870         *   to the pipe
1871         */
1872        tdata = tracing_data_get(&evlist->core.entries, fd, true);
1873        if (!tdata)
1874                return -1;
1875
1876        memset(&ev, 0, sizeof(ev));
1877
1878        ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1879        size = tdata->size;
1880        aligned_size = PERF_ALIGN(size, sizeof(u64));
1881        padding = aligned_size - size;
1882        ev.tracing_data.header.size = sizeof(ev.tracing_data);
1883        ev.tracing_data.size = aligned_size;
1884
1885        process(tool, &ev, NULL, NULL);
1886
1887        /*
1888         * The put function will copy all the tracing data
1889         * stored in temp file to the pipe.
1890         */
1891        tracing_data_put(tdata);
1892
1893        ff = (struct feat_fd){ .fd = fd };
1894        if (write_padded(&ff, NULL, 0, padding))
1895                return -1;
1896
1897        return aligned_size;
1898}
1899
1900int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
1901                                    perf_event__handler_t process, struct machine *machine)
1902{
1903        union perf_event ev;
1904        size_t len;
1905
1906        if (!pos->hit)
1907                return 0;
1908
1909        memset(&ev, 0, sizeof(ev));
1910
1911        len = pos->long_name_len + 1;
1912        len = PERF_ALIGN(len, NAME_ALIGN);
1913        memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1914        ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1915        ev.build_id.header.misc = misc;
1916        ev.build_id.pid = machine->pid;
1917        ev.build_id.header.size = sizeof(ev.build_id) + len;
1918        memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1919
1920        return process(tool, &ev, NULL, machine);
1921}
1922
1923int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
1924                                       struct evlist *evlist, perf_event__handler_t process, bool attrs)
1925{
1926        int err;
1927
1928        if (attrs) {
1929                err = perf_event__synthesize_attrs(tool, evlist, process);
1930                if (err < 0) {
1931                        pr_err("Couldn't synthesize attrs.\n");
1932                        return err;
1933                }
1934        }
1935
1936        err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
1937        err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
1938        if (err < 0) {
1939                pr_err("Couldn't synthesize thread map.\n");
1940                return err;
1941        }
1942
1943        err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
1944        if (err < 0) {
1945                pr_err("Couldn't synthesize thread map.\n");
1946                return err;
1947        }
1948
1949        err = perf_event__synthesize_stat_config(tool, config, process, NULL);
1950        if (err < 0) {
1951                pr_err("Couldn't synthesize config.\n");
1952                return err;
1953        }
1954
1955        return 0;
1956}
1957
1958int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1959                                       struct perf_tool *tool __maybe_unused,
1960                                       perf_event__handler_t process __maybe_unused,
1961                                       struct machine *machine __maybe_unused)
1962{
1963        return 0;
1964}
1965
1966extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
1967
1968int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
1969                                    struct evlist *evlist, perf_event__handler_t process)
1970{
1971        struct perf_header *header = &session->header;
1972        struct perf_record_header_feature *fe;
1973        struct feat_fd ff;
1974        size_t sz, sz_hdr;
1975        int feat, ret;
1976
1977        sz_hdr = sizeof(fe->header);
1978        sz = sizeof(union perf_event);
1979        /* get a nice alignment */
1980        sz = PERF_ALIGN(sz, page_size);
1981
1982        memset(&ff, 0, sizeof(ff));
1983
1984        ff.buf = malloc(sz);
1985        if (!ff.buf)
1986                return -ENOMEM;
1987
1988        ff.size = sz - sz_hdr;
1989        ff.ph = &session->header;
1990
1991        for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
1992                if (!feat_ops[feat].synthesize) {
1993                        pr_debug("No record header feature for header :%d\n", feat);
1994                        continue;
1995                }
1996
1997                ff.offset = sizeof(*fe);
1998
1999                ret = feat_ops[feat].write(&ff, evlist);
2000                if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2001                        pr_debug("Error writing feature\n");
2002                        continue;
2003                }
2004                /* ff.buf may have changed due to realloc in do_write() */
2005                fe = ff.buf;
2006                memset(fe, 0, sizeof(*fe));
2007
2008                fe->feat_id = feat;
2009                fe->header.type = PERF_RECORD_HEADER_FEATURE;
2010                fe->header.size = ff.offset;
2011
2012                ret = process(tool, ff.buf, NULL, NULL);
2013                if (ret) {
2014                        free(ff.buf);
2015                        return ret;
2016                }
2017        }
2018
2019        /* Send HEADER_LAST_FEATURE mark. */
2020        fe = ff.buf;
2021        fe->feat_id     = HEADER_LAST_FEATURE;
2022        fe->header.type = PERF_RECORD_HEADER_FEATURE;
2023        fe->header.size = sizeof(*fe);
2024
2025        ret = process(tool, ff.buf, NULL, NULL);
2026
2027        free(ff.buf);
2028        return ret;
2029}
2030