linux/tools/perf/util/synthetic-events.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only 
   2
   3#include "util/cgroup.h"
   4#include "util/data.h"
   5#include "util/debug.h"
   6#include "util/dso.h"
   7#include "util/event.h"
   8#include "util/evlist.h"
   9#include "util/machine.h"
  10#include "util/map.h"
  11#include "util/map_symbol.h"
  12#include "util/branch.h"
  13#include "util/memswap.h"
  14#include "util/namespaces.h"
  15#include "util/session.h"
  16#include "util/stat.h"
  17#include "util/symbol.h"
  18#include "util/synthetic-events.h"
  19#include "util/target.h"
  20#include "util/time-utils.h"
  21#include <linux/bitops.h>
  22#include <linux/kernel.h>
  23#include <linux/string.h>
  24#include <linux/zalloc.h>
  25#include <linux/perf_event.h>
  26#include <asm/bug.h>
  27#include <perf/evsel.h>
  28#include <perf/cpumap.h>
  29#include <internal/lib.h> // page_size
  30#include <internal/threadmap.h>
  31#include <perf/threadmap.h>
  32#include <symbol/kallsyms.h>
  33#include <dirent.h>
  34#include <errno.h>
  35#include <inttypes.h>
  36#include <stdio.h>
  37#include <string.h>
  38#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  39#include <api/fs/fs.h>
  40#include <api/io.h>
  41#include <sys/types.h>
  42#include <sys/stat.h>
  43#include <fcntl.h>
  44#include <unistd.h>
  45
  46#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
  47
  48unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
  49
  50int perf_tool__process_synth_event(struct perf_tool *tool,
  51                                   union perf_event *event,
  52                                   struct machine *machine,
  53                                   perf_event__handler_t process)
  54{
  55        struct perf_sample synth_sample = {
  56                .pid       = -1,
  57                .tid       = -1,
  58                .time      = -1,
  59                .stream_id = -1,
  60                .cpu       = -1,
  61                .period    = 1,
  62                .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
  63        };
  64
  65        return process(tool, event, &synth_sample, machine);
  66};
  67
  68/*
  69 * Assumes that the first 4095 bytes of /proc/pid/stat contains
  70 * the comm, tgid and ppid.
  71 */
  72static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
  73                                    pid_t *tgid, pid_t *ppid, bool *kernel)
  74{
  75        char bf[4096];
  76        int fd;
  77        size_t size = 0;
  78        ssize_t n;
  79        char *name, *tgids, *ppids, *vmpeak, *threads;
  80
  81        *tgid = -1;
  82        *ppid = -1;
  83
  84        if (pid)
  85                snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
  86        else
  87                snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
  88
  89        fd = open(bf, O_RDONLY);
  90        if (fd < 0) {
  91                pr_debug("couldn't open %s\n", bf);
  92                return -1;
  93        }
  94
  95        n = read(fd, bf, sizeof(bf) - 1);
  96        close(fd);
  97        if (n <= 0) {
  98                pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
  99                           tid);
 100                return -1;
 101        }
 102        bf[n] = '\0';
 103
 104        name = strstr(bf, "Name:");
 105        tgids = strstr(name ?: bf, "Tgid:");
 106        ppids = strstr(tgids ?: bf, "PPid:");
 107        vmpeak = strstr(ppids ?: bf, "VmPeak:");
 108
 109        if (vmpeak)
 110                threads = NULL;
 111        else
 112                threads = strstr(ppids ?: bf, "Threads:");
 113
 114        if (name) {
 115                char *nl;
 116
 117                name = skip_spaces(name + 5);  /* strlen("Name:") */
 118                nl = strchr(name, '\n');
 119                if (nl)
 120                        *nl = '\0';
 121
 122                size = strlen(name);
 123                if (size >= len)
 124                        size = len - 1;
 125                memcpy(comm, name, size);
 126                comm[size] = '\0';
 127        } else {
 128                pr_debug("Name: string not found for pid %d\n", tid);
 129        }
 130
 131        if (tgids) {
 132                tgids += 5;  /* strlen("Tgid:") */
 133                *tgid = atoi(tgids);
 134        } else {
 135                pr_debug("Tgid: string not found for pid %d\n", tid);
 136        }
 137
 138        if (ppids) {
 139                ppids += 5;  /* strlen("PPid:") */
 140                *ppid = atoi(ppids);
 141        } else {
 142                pr_debug("PPid: string not found for pid %d\n", tid);
 143        }
 144
 145        if (!vmpeak && threads)
 146                *kernel = true;
 147        else
 148                *kernel = false;
 149
 150        return 0;
 151}
 152
 153static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
 154                                    struct machine *machine,
 155                                    pid_t *tgid, pid_t *ppid, bool *kernel)
 156{
 157        size_t size;
 158
 159        *ppid = -1;
 160
 161        memset(&event->comm, 0, sizeof(event->comm));
 162
 163        if (machine__is_host(machine)) {
 164                if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
 165                                             sizeof(event->comm.comm),
 166                                             tgid, ppid, kernel) != 0) {
 167                        return -1;
 168                }
 169        } else {
 170                *tgid = machine->pid;
 171        }
 172
 173        if (*tgid < 0)
 174                return -1;
 175
 176        event->comm.pid = *tgid;
 177        event->comm.header.type = PERF_RECORD_COMM;
 178
 179        size = strlen(event->comm.comm) + 1;
 180        size = PERF_ALIGN(size, sizeof(u64));
 181        memset(event->comm.comm + size, 0, machine->id_hdr_size);
 182        event->comm.header.size = (sizeof(event->comm) -
 183                                (sizeof(event->comm.comm) - size) +
 184                                machine->id_hdr_size);
 185        event->comm.tid = tid;
 186
 187        return 0;
 188}
 189
 190pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 191                                         union perf_event *event, pid_t pid,
 192                                         perf_event__handler_t process,
 193                                         struct machine *machine)
 194{
 195        pid_t tgid, ppid;
 196        bool kernel_thread;
 197
 198        if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
 199                                     &kernel_thread) != 0)
 200                return -1;
 201
 202        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 203                return -1;
 204
 205        return tgid;
 206}
 207
 208static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
 209                                         struct perf_ns_link_info *ns_link_info)
 210{
 211        struct stat64 st;
 212        char proc_ns[128];
 213
 214        sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
 215        if (stat64(proc_ns, &st) == 0) {
 216                ns_link_info->dev = st.st_dev;
 217                ns_link_info->ino = st.st_ino;
 218        }
 219}
 220
 221int perf_event__synthesize_namespaces(struct perf_tool *tool,
 222                                      union perf_event *event,
 223                                      pid_t pid, pid_t tgid,
 224                                      perf_event__handler_t process,
 225                                      struct machine *machine)
 226{
 227        u32 idx;
 228        struct perf_ns_link_info *ns_link_info;
 229
 230        if (!tool || !tool->namespace_events)
 231                return 0;
 232
 233        memset(&event->namespaces, 0, (sizeof(event->namespaces) +
 234               (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 235               machine->id_hdr_size));
 236
 237        event->namespaces.pid = tgid;
 238        event->namespaces.tid = pid;
 239
 240        event->namespaces.nr_namespaces = NR_NAMESPACES;
 241
 242        ns_link_info = event->namespaces.link_info;
 243
 244        for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
 245                perf_event__get_ns_link_info(pid, perf_ns__name(idx),
 246                                             &ns_link_info[idx]);
 247
 248        event->namespaces.header.type = PERF_RECORD_NAMESPACES;
 249
 250        event->namespaces.header.size = (sizeof(event->namespaces) +
 251                        (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 252                        machine->id_hdr_size);
 253
 254        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 255                return -1;
 256
 257        return 0;
 258}
 259
 260static int perf_event__synthesize_fork(struct perf_tool *tool,
 261                                       union perf_event *event,
 262                                       pid_t pid, pid_t tgid, pid_t ppid,
 263                                       perf_event__handler_t process,
 264                                       struct machine *machine)
 265{
 266        memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
 267
 268        /*
 269         * for main thread set parent to ppid from status file. For other
 270         * threads set parent pid to main thread. ie., assume main thread
 271         * spawns all threads in a process
 272        */
 273        if (tgid == pid) {
 274                event->fork.ppid = ppid;
 275                event->fork.ptid = ppid;
 276        } else {
 277                event->fork.ppid = tgid;
 278                event->fork.ptid = tgid;
 279        }
 280        event->fork.pid  = tgid;
 281        event->fork.tid  = pid;
 282        event->fork.header.type = PERF_RECORD_FORK;
 283        event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
 284
 285        event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 286
 287        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 288                return -1;
 289
 290        return 0;
 291}
 292
 293static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
 294                                u32 *prot, u32 *flags, __u64 *offset,
 295                                u32 *maj, u32 *min,
 296                                __u64 *inode,
 297                                ssize_t pathname_size, char *pathname)
 298{
 299        __u64 temp;
 300        int ch;
 301        char *start_pathname = pathname;
 302
 303        if (io__get_hex(io, start) != '-')
 304                return false;
 305        if (io__get_hex(io, end) != ' ')
 306                return false;
 307
 308        /* map protection and flags bits */
 309        *prot = 0;
 310        ch = io__get_char(io);
 311        if (ch == 'r')
 312                *prot |= PROT_READ;
 313        else if (ch != '-')
 314                return false;
 315        ch = io__get_char(io);
 316        if (ch == 'w')
 317                *prot |= PROT_WRITE;
 318        else if (ch != '-')
 319                return false;
 320        ch = io__get_char(io);
 321        if (ch == 'x')
 322                *prot |= PROT_EXEC;
 323        else if (ch != '-')
 324                return false;
 325        ch = io__get_char(io);
 326        if (ch == 's')
 327                *flags = MAP_SHARED;
 328        else if (ch == 'p')
 329                *flags = MAP_PRIVATE;
 330        else
 331                return false;
 332        if (io__get_char(io) != ' ')
 333                return false;
 334
 335        if (io__get_hex(io, offset) != ' ')
 336                return false;
 337
 338        if (io__get_hex(io, &temp) != ':')
 339                return false;
 340        *maj = temp;
 341        if (io__get_hex(io, &temp) != ' ')
 342                return false;
 343        *min = temp;
 344
 345        ch = io__get_dec(io, inode);
 346        if (ch != ' ') {
 347                *pathname = '\0';
 348                return ch == '\n';
 349        }
 350        do {
 351                ch = io__get_char(io);
 352        } while (ch == ' ');
 353        while (true) {
 354                if (ch < 0)
 355                        return false;
 356                if (ch == '\0' || ch == '\n' ||
 357                    (pathname + 1 - start_pathname) >= pathname_size) {
 358                        *pathname = '\0';
 359                        return true;
 360                }
 361                *pathname++ = ch;
 362                ch = io__get_char(io);
 363        }
 364}
 365
 366static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
 367                                             bool is_kernel)
 368{
 369        struct build_id bid;
 370        int rc;
 371
 372        if (is_kernel)
 373                rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
 374        else
 375                rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
 376
 377        if (rc == 0) {
 378                memcpy(event->build_id, bid.data, sizeof(bid.data));
 379                event->build_id_size = (u8) bid.size;
 380                event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
 381                event->__reserved_1 = 0;
 382                event->__reserved_2 = 0;
 383        } else {
 384                if (event->filename[0] == '/') {
 385                        pr_debug2("Failed to read build ID for %s\n",
 386                                  event->filename);
 387                }
 388        }
 389}
 390
 391int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 392                                       union perf_event *event,
 393                                       pid_t pid, pid_t tgid,
 394                                       perf_event__handler_t process,
 395                                       struct machine *machine,
 396                                       bool mmap_data)
 397{
 398        unsigned long long t;
 399        char bf[BUFSIZ];
 400        struct io io;
 401        bool truncation = false;
 402        unsigned long long timeout = proc_map_timeout * 1000000ULL;
 403        int rc = 0;
 404        const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
 405        int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
 406
 407        if (machine__is_default_guest(machine))
 408                return 0;
 409
 410        snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
 411                machine->root_dir, pid, pid);
 412
 413        io.fd = open(bf, O_RDONLY, 0);
 414        if (io.fd < 0) {
 415                /*
 416                 * We raced with a task exiting - just return:
 417                 */
 418                pr_debug("couldn't open %s\n", bf);
 419                return -1;
 420        }
 421        io__init(&io, io.fd, bf, sizeof(bf));
 422
 423        event->header.type = PERF_RECORD_MMAP2;
 424        t = rdclock();
 425
 426        while (!io.eof) {
 427                static const char anonstr[] = "//anon";
 428                size_t size, aligned_size;
 429
 430                /* ensure null termination since stack will be reused. */
 431                event->mmap2.filename[0] = '\0';
 432
 433                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
 434                if (!read_proc_maps_line(&io,
 435                                        &event->mmap2.start,
 436                                        &event->mmap2.len,
 437                                        &event->mmap2.prot,
 438                                        &event->mmap2.flags,
 439                                        &event->mmap2.pgoff,
 440                                        &event->mmap2.maj,
 441                                        &event->mmap2.min,
 442                                        &event->mmap2.ino,
 443                                        sizeof(event->mmap2.filename),
 444                                        event->mmap2.filename))
 445                        continue;
 446
 447                if ((rdclock() - t) > timeout) {
 448                        pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
 449                                   "You may want to increase "
 450                                   "the time limit by --proc-map-timeout\n",
 451                                   machine->root_dir, pid, pid);
 452                        truncation = true;
 453                        goto out;
 454                }
 455
 456                event->mmap2.ino_generation = 0;
 457
 458                /*
 459                 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
 460                 */
 461                if (machine__is_host(machine))
 462                        event->header.misc = PERF_RECORD_MISC_USER;
 463                else
 464                        event->header.misc = PERF_RECORD_MISC_GUEST_USER;
 465
 466                if ((event->mmap2.prot & PROT_EXEC) == 0) {
 467                        if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
 468                                continue;
 469
 470                        event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
 471                }
 472
 473out:
 474                if (truncation)
 475                        event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
 476
 477                if (!strcmp(event->mmap2.filename, ""))
 478                        strcpy(event->mmap2.filename, anonstr);
 479
 480                if (hugetlbfs_mnt_len &&
 481                    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
 482                             hugetlbfs_mnt_len)) {
 483                        strcpy(event->mmap2.filename, anonstr);
 484                        event->mmap2.flags |= MAP_HUGETLB;
 485                }
 486
 487                size = strlen(event->mmap2.filename) + 1;
 488                aligned_size = PERF_ALIGN(size, sizeof(u64));
 489                event->mmap2.len -= event->mmap.start;
 490                event->mmap2.header.size = (sizeof(event->mmap2) -
 491                                        (sizeof(event->mmap2.filename) - aligned_size));
 492                memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
 493                        (aligned_size - size));
 494                event->mmap2.header.size += machine->id_hdr_size;
 495                event->mmap2.pid = tgid;
 496                event->mmap2.tid = pid;
 497
 498                if (symbol_conf.buildid_mmap2)
 499                        perf_record_mmap2__read_build_id(&event->mmap2, false);
 500
 501                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 502                        rc = -1;
 503                        break;
 504                }
 505
 506                if (truncation)
 507                        break;
 508        }
 509
 510        close(io.fd);
 511        return rc;
 512}
 513
 514#ifdef HAVE_FILE_HANDLE
 515static int perf_event__synthesize_cgroup(struct perf_tool *tool,
 516                                         union perf_event *event,
 517                                         char *path, size_t mount_len,
 518                                         perf_event__handler_t process,
 519                                         struct machine *machine)
 520{
 521        size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
 522        size_t path_len = strlen(path) - mount_len + 1;
 523        struct {
 524                struct file_handle fh;
 525                uint64_t cgroup_id;
 526        } handle;
 527        int mount_id;
 528
 529        while (path_len % sizeof(u64))
 530                path[mount_len + path_len++] = '\0';
 531
 532        memset(&event->cgroup, 0, event_size);
 533
 534        event->cgroup.header.type = PERF_RECORD_CGROUP;
 535        event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
 536
 537        handle.fh.handle_bytes = sizeof(handle.cgroup_id);
 538        if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
 539                pr_debug("stat failed: %s\n", path);
 540                return -1;
 541        }
 542
 543        event->cgroup.id = handle.cgroup_id;
 544        strncpy(event->cgroup.path, path + mount_len, path_len);
 545        memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
 546
 547        if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
 548                pr_debug("process synth event failed\n");
 549                return -1;
 550        }
 551
 552        return 0;
 553}
 554
 555static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
 556                                        union perf_event *event,
 557                                        char *path, size_t mount_len,
 558                                        perf_event__handler_t process,
 559                                        struct machine *machine)
 560{
 561        size_t pos = strlen(path);
 562        DIR *d;
 563        struct dirent *dent;
 564        int ret = 0;
 565
 566        if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
 567                                          process, machine) < 0)
 568                return -1;
 569
 570        d = opendir(path);
 571        if (d == NULL) {
 572                pr_debug("failed to open directory: %s\n", path);
 573                return -1;
 574        }
 575
 576        while ((dent = readdir(d)) != NULL) {
 577                if (dent->d_type != DT_DIR)
 578                        continue;
 579                if (!strcmp(dent->d_name, ".") ||
 580                    !strcmp(dent->d_name, ".."))
 581                        continue;
 582
 583                /* any sane path should be less than PATH_MAX */
 584                if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
 585                        continue;
 586
 587                if (path[pos - 1] != '/')
 588                        strcat(path, "/");
 589                strcat(path, dent->d_name);
 590
 591                ret = perf_event__walk_cgroup_tree(tool, event, path,
 592                                                   mount_len, process, machine);
 593                if (ret < 0)
 594                        break;
 595
 596                path[pos] = '\0';
 597        }
 598
 599        closedir(d);
 600        return ret;
 601}
 602
 603int perf_event__synthesize_cgroups(struct perf_tool *tool,
 604                                   perf_event__handler_t process,
 605                                   struct machine *machine)
 606{
 607        union perf_event event;
 608        char cgrp_root[PATH_MAX];
 609        size_t mount_len;  /* length of mount point in the path */
 610
 611        if (!tool || !tool->cgroup_events)
 612                return 0;
 613
 614        if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
 615                pr_debug("cannot find cgroup mount point\n");
 616                return -1;
 617        }
 618
 619        mount_len = strlen(cgrp_root);
 620        /* make sure the path starts with a slash (after mount point) */
 621        strcat(cgrp_root, "/");
 622
 623        if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
 624                                         process, machine) < 0)
 625                return -1;
 626
 627        return 0;
 628}
 629#else
 630int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
 631                                   perf_event__handler_t process __maybe_unused,
 632                                   struct machine *machine __maybe_unused)
 633{
 634        return -1;
 635}
 636#endif
 637
 638int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
 639                                   struct machine *machine)
 640{
 641        int rc = 0;
 642        struct map *pos;
 643        struct maps *maps = machine__kernel_maps(machine);
 644        union perf_event *event;
 645        size_t size = symbol_conf.buildid_mmap2 ?
 646                        sizeof(event->mmap2) : sizeof(event->mmap);
 647
 648        event = zalloc(size + machine->id_hdr_size);
 649        if (event == NULL) {
 650                pr_debug("Not enough memory synthesizing mmap event "
 651                         "for kernel modules\n");
 652                return -1;
 653        }
 654
 655        /*
 656         * kernel uses 0 for user space maps, see kernel/perf_event.c
 657         * __perf_event_mmap
 658         */
 659        if (machine__is_host(machine))
 660                event->header.misc = PERF_RECORD_MISC_KERNEL;
 661        else
 662                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 663
 664        maps__for_each_entry(maps, pos) {
 665                if (!__map__is_kmodule(pos))
 666                        continue;
 667
 668                if (symbol_conf.buildid_mmap2) {
 669                        size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
 670                        event->mmap2.header.type = PERF_RECORD_MMAP2;
 671                        event->mmap2.header.size = (sizeof(event->mmap2) -
 672                                                (sizeof(event->mmap2.filename) - size));
 673                        memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
 674                        event->mmap2.header.size += machine->id_hdr_size;
 675                        event->mmap2.start = pos->start;
 676                        event->mmap2.len   = pos->end - pos->start;
 677                        event->mmap2.pid   = machine->pid;
 678
 679                        memcpy(event->mmap2.filename, pos->dso->long_name,
 680                               pos->dso->long_name_len + 1);
 681
 682                        perf_record_mmap2__read_build_id(&event->mmap2, false);
 683                } else {
 684                        size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
 685                        event->mmap.header.type = PERF_RECORD_MMAP;
 686                        event->mmap.header.size = (sizeof(event->mmap) -
 687                                                (sizeof(event->mmap.filename) - size));
 688                        memset(event->mmap.filename + size, 0, machine->id_hdr_size);
 689                        event->mmap.header.size += machine->id_hdr_size;
 690                        event->mmap.start = pos->start;
 691                        event->mmap.len   = pos->end - pos->start;
 692                        event->mmap.pid   = machine->pid;
 693
 694                        memcpy(event->mmap.filename, pos->dso->long_name,
 695                               pos->dso->long_name_len + 1);
 696                }
 697
 698                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 699                        rc = -1;
 700                        break;
 701                }
 702        }
 703
 704        free(event);
 705        return rc;
 706}
 707
 708static int filter_task(const struct dirent *dirent)
 709{
 710        return isdigit(dirent->d_name[0]);
 711}
 712
 713static int __event__synthesize_thread(union perf_event *comm_event,
 714                                      union perf_event *mmap_event,
 715                                      union perf_event *fork_event,
 716                                      union perf_event *namespaces_event,
 717                                      pid_t pid, int full, perf_event__handler_t process,
 718                                      struct perf_tool *tool, struct machine *machine, bool mmap_data)
 719{
 720        char filename[PATH_MAX];
 721        struct dirent **dirent;
 722        pid_t tgid, ppid;
 723        int rc = 0;
 724        int i, n;
 725
 726        /* special case: only send one comm event using passed in pid */
 727        if (!full) {
 728                tgid = perf_event__synthesize_comm(tool, comm_event, pid,
 729                                                   process, machine);
 730
 731                if (tgid == -1)
 732                        return -1;
 733
 734                if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
 735                                                      tgid, process, machine) < 0)
 736                        return -1;
 737
 738                /*
 739                 * send mmap only for thread group leader
 740                 * see thread__init_maps()
 741                 */
 742                if (pid == tgid &&
 743                    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 744                                                       process, machine, mmap_data))
 745                        return -1;
 746
 747                return 0;
 748        }
 749
 750        if (machine__is_default_guest(machine))
 751                return 0;
 752
 753        snprintf(filename, sizeof(filename), "%s/proc/%d/task",
 754                 machine->root_dir, pid);
 755
 756        n = scandir(filename, &dirent, filter_task, alphasort);
 757        if (n < 0)
 758                return n;
 759
 760        for (i = 0; i < n; i++) {
 761                char *end;
 762                pid_t _pid;
 763                bool kernel_thread = false;
 764
 765                _pid = strtol(dirent[i]->d_name, &end, 10);
 766                if (*end)
 767                        continue;
 768
 769                rc = -1;
 770                if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
 771                                             &tgid, &ppid, &kernel_thread) != 0)
 772                        break;
 773
 774                if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
 775                                                ppid, process, machine) < 0)
 776                        break;
 777
 778                if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
 779                                                      tgid, process, machine) < 0)
 780                        break;
 781
 782                /*
 783                 * Send the prepared comm event
 784                 */
 785                if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
 786                        break;
 787
 788                rc = 0;
 789                if (_pid == pid && !kernel_thread) {
 790                        /* process the parent's maps too */
 791                        rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 792                                                process, machine, mmap_data);
 793                        if (rc)
 794                                break;
 795                }
 796        }
 797
 798        for (i = 0; i < n; i++)
 799                zfree(&dirent[i]);
 800        free(dirent);
 801
 802        return rc;
 803}
 804
 805int perf_event__synthesize_thread_map(struct perf_tool *tool,
 806                                      struct perf_thread_map *threads,
 807                                      perf_event__handler_t process,
 808                                      struct machine *machine,
 809                                      bool mmap_data)
 810{
 811        union perf_event *comm_event, *mmap_event, *fork_event;
 812        union perf_event *namespaces_event;
 813        int err = -1, thread, j;
 814
 815        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 816        if (comm_event == NULL)
 817                goto out;
 818
 819        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 820        if (mmap_event == NULL)
 821                goto out_free_comm;
 822
 823        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 824        if (fork_event == NULL)
 825                goto out_free_mmap;
 826
 827        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 828                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 829                                  machine->id_hdr_size);
 830        if (namespaces_event == NULL)
 831                goto out_free_fork;
 832
 833        err = 0;
 834        for (thread = 0; thread < threads->nr; ++thread) {
 835                if (__event__synthesize_thread(comm_event, mmap_event,
 836                                               fork_event, namespaces_event,
 837                                               perf_thread_map__pid(threads, thread), 0,
 838                                               process, tool, machine,
 839                                               mmap_data)) {
 840                        err = -1;
 841                        break;
 842                }
 843
 844                /*
 845                 * comm.pid is set to thread group id by
 846                 * perf_event__synthesize_comm
 847                 */
 848                if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
 849                        bool need_leader = true;
 850
 851                        /* is thread group leader in thread_map? */
 852                        for (j = 0; j < threads->nr; ++j) {
 853                                if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
 854                                        need_leader = false;
 855                                        break;
 856                                }
 857                        }
 858
 859                        /* if not, generate events for it */
 860                        if (need_leader &&
 861                            __event__synthesize_thread(comm_event, mmap_event,
 862                                                       fork_event, namespaces_event,
 863                                                       comm_event->comm.pid, 0,
 864                                                       process, tool, machine,
 865                                                       mmap_data)) {
 866                                err = -1;
 867                                break;
 868                        }
 869                }
 870        }
 871        free(namespaces_event);
 872out_free_fork:
 873        free(fork_event);
 874out_free_mmap:
 875        free(mmap_event);
 876out_free_comm:
 877        free(comm_event);
 878out:
 879        return err;
 880}
 881
 882static int __perf_event__synthesize_threads(struct perf_tool *tool,
 883                                            perf_event__handler_t process,
 884                                            struct machine *machine,
 885                                            bool mmap_data,
 886                                            struct dirent **dirent,
 887                                            int start,
 888                                            int num)
 889{
 890        union perf_event *comm_event, *mmap_event, *fork_event;
 891        union perf_event *namespaces_event;
 892        int err = -1;
 893        char *end;
 894        pid_t pid;
 895        int i;
 896
 897        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 898        if (comm_event == NULL)
 899                goto out;
 900
 901        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 902        if (mmap_event == NULL)
 903                goto out_free_comm;
 904
 905        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 906        if (fork_event == NULL)
 907                goto out_free_mmap;
 908
 909        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 910                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 911                                  machine->id_hdr_size);
 912        if (namespaces_event == NULL)
 913                goto out_free_fork;
 914
 915        for (i = start; i < start + num; i++) {
 916                if (!isdigit(dirent[i]->d_name[0]))
 917                        continue;
 918
 919                pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
 920                /* only interested in proper numerical dirents */
 921                if (*end)
 922                        continue;
 923                /*
 924                 * We may race with exiting thread, so don't stop just because
 925                 * one thread couldn't be synthesized.
 926                 */
 927                __event__synthesize_thread(comm_event, mmap_event, fork_event,
 928                                           namespaces_event, pid, 1, process,
 929                                           tool, machine, mmap_data);
 930        }
 931        err = 0;
 932
 933        free(namespaces_event);
 934out_free_fork:
 935        free(fork_event);
 936out_free_mmap:
 937        free(mmap_event);
 938out_free_comm:
 939        free(comm_event);
 940out:
 941        return err;
 942}
 943
 944struct synthesize_threads_arg {
 945        struct perf_tool *tool;
 946        perf_event__handler_t process;
 947        struct machine *machine;
 948        bool mmap_data;
 949        struct dirent **dirent;
 950        int num;
 951        int start;
 952};
 953
 954static void *synthesize_threads_worker(void *arg)
 955{
 956        struct synthesize_threads_arg *args = arg;
 957
 958        __perf_event__synthesize_threads(args->tool, args->process,
 959                                         args->machine, args->mmap_data,
 960                                         args->dirent,
 961                                         args->start, args->num);
 962        return NULL;
 963}
 964
 965int perf_event__synthesize_threads(struct perf_tool *tool,
 966                                   perf_event__handler_t process,
 967                                   struct machine *machine,
 968                                   bool mmap_data,
 969                                   unsigned int nr_threads_synthesize)
 970{
 971        struct synthesize_threads_arg *args = NULL;
 972        pthread_t *synthesize_threads = NULL;
 973        char proc_path[PATH_MAX];
 974        struct dirent **dirent;
 975        int num_per_thread;
 976        int m, n, i, j;
 977        int thread_nr;
 978        int base = 0;
 979        int err = -1;
 980
 981
 982        if (machine__is_default_guest(machine))
 983                return 0;
 984
 985        snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
 986        n = scandir(proc_path, &dirent, filter_task, alphasort);
 987        if (n < 0)
 988                return err;
 989
 990        if (nr_threads_synthesize == UINT_MAX)
 991                thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
 992        else
 993                thread_nr = nr_threads_synthesize;
 994
 995        if (thread_nr <= 1) {
 996                err = __perf_event__synthesize_threads(tool, process,
 997                                                       machine, mmap_data,
 998                                                       dirent, base, n);
 999                goto free_dirent;
1000        }
1001        if (thread_nr > n)
1002                thread_nr = n;
1003
1004        synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1005        if (synthesize_threads == NULL)
1006                goto free_dirent;
1007
1008        args = calloc(sizeof(*args), thread_nr);
1009        if (args == NULL)
1010                goto free_threads;
1011
1012        num_per_thread = n / thread_nr;
1013        m = n % thread_nr;
1014        for (i = 0; i < thread_nr; i++) {
1015                args[i].tool = tool;
1016                args[i].process = process;
1017                args[i].machine = machine;
1018                args[i].mmap_data = mmap_data;
1019                args[i].dirent = dirent;
1020        }
1021        for (i = 0; i < m; i++) {
1022                args[i].num = num_per_thread + 1;
1023                args[i].start = i * args[i].num;
1024        }
1025        if (i != 0)
1026                base = args[i-1].start + args[i-1].num;
1027        for (j = i; j < thread_nr; j++) {
1028                args[j].num = num_per_thread;
1029                args[j].start = base + (j - i) * args[i].num;
1030        }
1031
1032        for (i = 0; i < thread_nr; i++) {
1033                if (pthread_create(&synthesize_threads[i], NULL,
1034                                   synthesize_threads_worker, &args[i]))
1035                        goto out_join;
1036        }
1037        err = 0;
1038out_join:
1039        for (i = 0; i < thread_nr; i++)
1040                pthread_join(synthesize_threads[i], NULL);
1041        free(args);
1042free_threads:
1043        free(synthesize_threads);
1044free_dirent:
1045        for (i = 0; i < n; i++)
1046                zfree(&dirent[i]);
1047        free(dirent);
1048
1049        return err;
1050}
1051
1052int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1053                                              perf_event__handler_t process __maybe_unused,
1054                                              struct machine *machine __maybe_unused)
1055{
1056        return 0;
1057}
1058
1059static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1060                                                perf_event__handler_t process,
1061                                                struct machine *machine)
1062{
1063        union perf_event *event;
1064        size_t size = symbol_conf.buildid_mmap2 ?
1065                        sizeof(event->mmap2) : sizeof(event->mmap);
1066        struct map *map = machine__kernel_map(machine);
1067        struct kmap *kmap;
1068        int err;
1069
1070        if (map == NULL)
1071                return -1;
1072
1073        kmap = map__kmap(map);
1074        if (!kmap->ref_reloc_sym)
1075                return -1;
1076
1077        /*
1078         * We should get this from /sys/kernel/sections/.text, but till that is
1079         * available use this, and after it is use this as a fallback for older
1080         * kernels.
1081         */
1082        event = zalloc(size + machine->id_hdr_size);
1083        if (event == NULL) {
1084                pr_debug("Not enough memory synthesizing mmap event "
1085                         "for kernel modules\n");
1086                return -1;
1087        }
1088
1089        if (machine__is_host(machine)) {
1090                /*
1091                 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1092                 * see kernel/perf_event.c __perf_event_mmap
1093                 */
1094                event->header.misc = PERF_RECORD_MISC_KERNEL;
1095        } else {
1096                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1097        }
1098
1099        if (symbol_conf.buildid_mmap2) {
1100                size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1101                                "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1102                size = PERF_ALIGN(size, sizeof(u64));
1103                event->mmap2.header.type = PERF_RECORD_MMAP2;
1104                event->mmap2.header.size = (sizeof(event->mmap2) -
1105                                (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1106                event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1107                event->mmap2.start = map->start;
1108                event->mmap2.len   = map->end - event->mmap.start;
1109                event->mmap2.pid   = machine->pid;
1110
1111                perf_record_mmap2__read_build_id(&event->mmap2, true);
1112        } else {
1113                size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1114                                "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1115                size = PERF_ALIGN(size, sizeof(u64));
1116                event->mmap.header.type = PERF_RECORD_MMAP;
1117                event->mmap.header.size = (sizeof(event->mmap) -
1118                                (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1119                event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1120                event->mmap.start = map->start;
1121                event->mmap.len   = map->end - event->mmap.start;
1122                event->mmap.pid   = machine->pid;
1123        }
1124
1125        err = perf_tool__process_synth_event(tool, event, machine, process);
1126        free(event);
1127
1128        return err;
1129}
1130
1131int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1132                                       perf_event__handler_t process,
1133                                       struct machine *machine)
1134{
1135        int err;
1136
1137        err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1138        if (err < 0)
1139                return err;
1140
1141        return perf_event__synthesize_extra_kmaps(tool, process, machine);
1142}
1143
1144int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1145                                      struct perf_thread_map *threads,
1146                                      perf_event__handler_t process,
1147                                      struct machine *machine)
1148{
1149        union perf_event *event;
1150        int i, err, size;
1151
1152        size  = sizeof(event->thread_map);
1153        size += threads->nr * sizeof(event->thread_map.entries[0]);
1154
1155        event = zalloc(size);
1156        if (!event)
1157                return -ENOMEM;
1158
1159        event->header.type = PERF_RECORD_THREAD_MAP;
1160        event->header.size = size;
1161        event->thread_map.nr = threads->nr;
1162
1163        for (i = 0; i < threads->nr; i++) {
1164                struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1165                char *comm = perf_thread_map__comm(threads, i);
1166
1167                if (!comm)
1168                        comm = (char *) "";
1169
1170                entry->pid = perf_thread_map__pid(threads, i);
1171                strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1172        }
1173
1174        err = process(tool, event, NULL, machine);
1175
1176        free(event);
1177        return err;
1178}
1179
1180static void synthesize_cpus(struct cpu_map_entries *cpus,
1181                            struct perf_cpu_map *map)
1182{
1183        int i;
1184
1185        cpus->nr = map->nr;
1186
1187        for (i = 0; i < map->nr; i++)
1188                cpus->cpu[i] = map->map[i];
1189}
1190
1191static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1192                            struct perf_cpu_map *map, int max)
1193{
1194        int i;
1195
1196        mask->nr = BITS_TO_LONGS(max);
1197        mask->long_size = sizeof(long);
1198
1199        for (i = 0; i < map->nr; i++)
1200                set_bit(map->map[i], mask->mask);
1201}
1202
1203static size_t cpus_size(struct perf_cpu_map *map)
1204{
1205        return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1206}
1207
1208static size_t mask_size(struct perf_cpu_map *map, int *max)
1209{
1210        int i;
1211
1212        *max = 0;
1213
1214        for (i = 0; i < map->nr; i++) {
1215                /* bit position of the cpu is + 1 */
1216                int bit = map->map[i] + 1;
1217
1218                if (bit > *max)
1219                        *max = bit;
1220        }
1221
1222        return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1223}
1224
1225void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1226{
1227        size_t size_cpus, size_mask;
1228        bool is_dummy = perf_cpu_map__empty(map);
1229
1230        /*
1231         * Both array and mask data have variable size based
1232         * on the number of cpus and their actual values.
1233         * The size of the 'struct perf_record_cpu_map_data' is:
1234         *
1235         *   array = size of 'struct cpu_map_entries' +
1236         *           number of cpus * sizeof(u64)
1237         *
1238         *   mask  = size of 'struct perf_record_record_cpu_map' +
1239         *           maximum cpu bit converted to size of longs
1240         *
1241         * and finally + the size of 'struct perf_record_cpu_map_data'.
1242         */
1243        size_cpus = cpus_size(map);
1244        size_mask = mask_size(map, max);
1245
1246        if (is_dummy || (size_cpus < size_mask)) {
1247                *size += size_cpus;
1248                *type  = PERF_CPU_MAP__CPUS;
1249        } else {
1250                *size += size_mask;
1251                *type  = PERF_CPU_MAP__MASK;
1252        }
1253
1254        *size += sizeof(struct perf_record_cpu_map_data);
1255        *size = PERF_ALIGN(*size, sizeof(u64));
1256        return zalloc(*size);
1257}
1258
1259void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1260                              u16 type, int max)
1261{
1262        data->type = type;
1263
1264        switch (type) {
1265        case PERF_CPU_MAP__CPUS:
1266                synthesize_cpus((struct cpu_map_entries *) data->data, map);
1267                break;
1268        case PERF_CPU_MAP__MASK:
1269                synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1270        default:
1271                break;
1272        }
1273}
1274
1275static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1276{
1277        size_t size = sizeof(struct perf_record_cpu_map);
1278        struct perf_record_cpu_map *event;
1279        int max;
1280        u16 type;
1281
1282        event = cpu_map_data__alloc(map, &size, &type, &max);
1283        if (!event)
1284                return NULL;
1285
1286        event->header.type = PERF_RECORD_CPU_MAP;
1287        event->header.size = size;
1288        event->data.type   = type;
1289
1290        cpu_map_data__synthesize(&event->data, map, type, max);
1291        return event;
1292}
1293
1294int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1295                                   struct perf_cpu_map *map,
1296                                   perf_event__handler_t process,
1297                                   struct machine *machine)
1298{
1299        struct perf_record_cpu_map *event;
1300        int err;
1301
1302        event = cpu_map_event__new(map);
1303        if (!event)
1304                return -ENOMEM;
1305
1306        err = process(tool, (union perf_event *) event, NULL, machine);
1307
1308        free(event);
1309        return err;
1310}
1311
1312int perf_event__synthesize_stat_config(struct perf_tool *tool,
1313                                       struct perf_stat_config *config,
1314                                       perf_event__handler_t process,
1315                                       struct machine *machine)
1316{
1317        struct perf_record_stat_config *event;
1318        int size, i = 0, err;
1319
1320        size  = sizeof(*event);
1321        size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1322
1323        event = zalloc(size);
1324        if (!event)
1325                return -ENOMEM;
1326
1327        event->header.type = PERF_RECORD_STAT_CONFIG;
1328        event->header.size = size;
1329        event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1330
1331#define ADD(__term, __val)                                      \
1332        event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1333        event->data[i].val = __val;                             \
1334        i++;
1335
1336        ADD(AGGR_MODE,  config->aggr_mode)
1337        ADD(INTERVAL,   config->interval)
1338        ADD(SCALE,      config->scale)
1339
1340        WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1341                  "stat config terms unbalanced\n");
1342#undef ADD
1343
1344        err = process(tool, (union perf_event *) event, NULL, machine);
1345
1346        free(event);
1347        return err;
1348}
1349
1350int perf_event__synthesize_stat(struct perf_tool *tool,
1351                                u32 cpu, u32 thread, u64 id,
1352                                struct perf_counts_values *count,
1353                                perf_event__handler_t process,
1354                                struct machine *machine)
1355{
1356        struct perf_record_stat event;
1357
1358        event.header.type = PERF_RECORD_STAT;
1359        event.header.size = sizeof(event);
1360        event.header.misc = 0;
1361
1362        event.id        = id;
1363        event.cpu       = cpu;
1364        event.thread    = thread;
1365        event.val       = count->val;
1366        event.ena       = count->ena;
1367        event.run       = count->run;
1368
1369        return process(tool, (union perf_event *) &event, NULL, machine);
1370}
1371
1372int perf_event__synthesize_stat_round(struct perf_tool *tool,
1373                                      u64 evtime, u64 type,
1374                                      perf_event__handler_t process,
1375                                      struct machine *machine)
1376{
1377        struct perf_record_stat_round event;
1378
1379        event.header.type = PERF_RECORD_STAT_ROUND;
1380        event.header.size = sizeof(event);
1381        event.header.misc = 0;
1382
1383        event.time = evtime;
1384        event.type = type;
1385
1386        return process(tool, (union perf_event *) &event, NULL, machine);
1387}
1388
1389size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1390{
1391        size_t sz, result = sizeof(struct perf_record_sample);
1392
1393        if (type & PERF_SAMPLE_IDENTIFIER)
1394                result += sizeof(u64);
1395
1396        if (type & PERF_SAMPLE_IP)
1397                result += sizeof(u64);
1398
1399        if (type & PERF_SAMPLE_TID)
1400                result += sizeof(u64);
1401
1402        if (type & PERF_SAMPLE_TIME)
1403                result += sizeof(u64);
1404
1405        if (type & PERF_SAMPLE_ADDR)
1406                result += sizeof(u64);
1407
1408        if (type & PERF_SAMPLE_ID)
1409                result += sizeof(u64);
1410
1411        if (type & PERF_SAMPLE_STREAM_ID)
1412                result += sizeof(u64);
1413
1414        if (type & PERF_SAMPLE_CPU)
1415                result += sizeof(u64);
1416
1417        if (type & PERF_SAMPLE_PERIOD)
1418                result += sizeof(u64);
1419
1420        if (type & PERF_SAMPLE_READ) {
1421                result += sizeof(u64);
1422                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1423                        result += sizeof(u64);
1424                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1425                        result += sizeof(u64);
1426                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1427                if (read_format & PERF_FORMAT_GROUP) {
1428                        sz = sample->read.group.nr *
1429                             sizeof(struct sample_read_value);
1430                        result += sz;
1431                } else {
1432                        result += sizeof(u64);
1433                }
1434        }
1435
1436        if (type & PERF_SAMPLE_CALLCHAIN) {
1437                sz = (sample->callchain->nr + 1) * sizeof(u64);
1438                result += sz;
1439        }
1440
1441        if (type & PERF_SAMPLE_RAW) {
1442                result += sizeof(u32);
1443                result += sample->raw_size;
1444        }
1445
1446        if (type & PERF_SAMPLE_BRANCH_STACK) {
1447                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1448                /* nr, hw_idx */
1449                sz += 2 * sizeof(u64);
1450                result += sz;
1451        }
1452
1453        if (type & PERF_SAMPLE_REGS_USER) {
1454                if (sample->user_regs.abi) {
1455                        result += sizeof(u64);
1456                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1457                        result += sz;
1458                } else {
1459                        result += sizeof(u64);
1460                }
1461        }
1462
1463        if (type & PERF_SAMPLE_STACK_USER) {
1464                sz = sample->user_stack.size;
1465                result += sizeof(u64);
1466                if (sz) {
1467                        result += sz;
1468                        result += sizeof(u64);
1469                }
1470        }
1471
1472        if (type & PERF_SAMPLE_WEIGHT_TYPE)
1473                result += sizeof(u64);
1474
1475        if (type & PERF_SAMPLE_DATA_SRC)
1476                result += sizeof(u64);
1477
1478        if (type & PERF_SAMPLE_TRANSACTION)
1479                result += sizeof(u64);
1480
1481        if (type & PERF_SAMPLE_REGS_INTR) {
1482                if (sample->intr_regs.abi) {
1483                        result += sizeof(u64);
1484                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1485                        result += sz;
1486                } else {
1487                        result += sizeof(u64);
1488                }
1489        }
1490
1491        if (type & PERF_SAMPLE_PHYS_ADDR)
1492                result += sizeof(u64);
1493
1494        if (type & PERF_SAMPLE_CGROUP)
1495                result += sizeof(u64);
1496
1497        if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1498                result += sizeof(u64);
1499
1500        if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1501                result += sizeof(u64);
1502
1503        if (type & PERF_SAMPLE_AUX) {
1504                result += sizeof(u64);
1505                result += sample->aux_sample.size;
1506        }
1507
1508        return result;
1509}
1510
1511void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
1512                                               __u64 *array, u64 type __maybe_unused)
1513{
1514        *array = data->weight;
1515}
1516
1517int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1518                                  const struct perf_sample *sample)
1519{
1520        __u64 *array;
1521        size_t sz;
1522        /*
1523         * used for cross-endian analysis. See git commit 65014ab3
1524         * for why this goofiness is needed.
1525         */
1526        union u64_swap u;
1527
1528        array = event->sample.array;
1529
1530        if (type & PERF_SAMPLE_IDENTIFIER) {
1531                *array = sample->id;
1532                array++;
1533        }
1534
1535        if (type & PERF_SAMPLE_IP) {
1536                *array = sample->ip;
1537                array++;
1538        }
1539
1540        if (type & PERF_SAMPLE_TID) {
1541                u.val32[0] = sample->pid;
1542                u.val32[1] = sample->tid;
1543                *array = u.val64;
1544                array++;
1545        }
1546
1547        if (type & PERF_SAMPLE_TIME) {
1548                *array = sample->time;
1549                array++;
1550        }
1551
1552        if (type & PERF_SAMPLE_ADDR) {
1553                *array = sample->addr;
1554                array++;
1555        }
1556
1557        if (type & PERF_SAMPLE_ID) {
1558                *array = sample->id;
1559                array++;
1560        }
1561
1562        if (type & PERF_SAMPLE_STREAM_ID) {
1563                *array = sample->stream_id;
1564                array++;
1565        }
1566
1567        if (type & PERF_SAMPLE_CPU) {
1568                u.val32[0] = sample->cpu;
1569                u.val32[1] = 0;
1570                *array = u.val64;
1571                array++;
1572        }
1573
1574        if (type & PERF_SAMPLE_PERIOD) {
1575                *array = sample->period;
1576                array++;
1577        }
1578
1579        if (type & PERF_SAMPLE_READ) {
1580                if (read_format & PERF_FORMAT_GROUP)
1581                        *array = sample->read.group.nr;
1582                else
1583                        *array = sample->read.one.value;
1584                array++;
1585
1586                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1587                        *array = sample->read.time_enabled;
1588                        array++;
1589                }
1590
1591                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1592                        *array = sample->read.time_running;
1593                        array++;
1594                }
1595
1596                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1597                if (read_format & PERF_FORMAT_GROUP) {
1598                        sz = sample->read.group.nr *
1599                             sizeof(struct sample_read_value);
1600                        memcpy(array, sample->read.group.values, sz);
1601                        array = (void *)array + sz;
1602                } else {
1603                        *array = sample->read.one.id;
1604                        array++;
1605                }
1606        }
1607
1608        if (type & PERF_SAMPLE_CALLCHAIN) {
1609                sz = (sample->callchain->nr + 1) * sizeof(u64);
1610                memcpy(array, sample->callchain, sz);
1611                array = (void *)array + sz;
1612        }
1613
1614        if (type & PERF_SAMPLE_RAW) {
1615                u.val32[0] = sample->raw_size;
1616                *array = u.val64;
1617                array = (void *)array + sizeof(u32);
1618
1619                memcpy(array, sample->raw_data, sample->raw_size);
1620                array = (void *)array + sample->raw_size;
1621        }
1622
1623        if (type & PERF_SAMPLE_BRANCH_STACK) {
1624                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1625                /* nr, hw_idx */
1626                sz += 2 * sizeof(u64);
1627                memcpy(array, sample->branch_stack, sz);
1628                array = (void *)array + sz;
1629        }
1630
1631        if (type & PERF_SAMPLE_REGS_USER) {
1632                if (sample->user_regs.abi) {
1633                        *array++ = sample->user_regs.abi;
1634                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1635                        memcpy(array, sample->user_regs.regs, sz);
1636                        array = (void *)array + sz;
1637                } else {
1638                        *array++ = 0;
1639                }
1640        }
1641
1642        if (type & PERF_SAMPLE_STACK_USER) {
1643                sz = sample->user_stack.size;
1644                *array++ = sz;
1645                if (sz) {
1646                        memcpy(array, sample->user_stack.data, sz);
1647                        array = (void *)array + sz;
1648                        *array++ = sz;
1649                }
1650        }
1651
1652        if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1653                arch_perf_synthesize_sample_weight(sample, array, type);
1654                array++;
1655        }
1656
1657        if (type & PERF_SAMPLE_DATA_SRC) {
1658                *array = sample->data_src;
1659                array++;
1660        }
1661
1662        if (type & PERF_SAMPLE_TRANSACTION) {
1663                *array = sample->transaction;
1664                array++;
1665        }
1666
1667        if (type & PERF_SAMPLE_REGS_INTR) {
1668                if (sample->intr_regs.abi) {
1669                        *array++ = sample->intr_regs.abi;
1670                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1671                        memcpy(array, sample->intr_regs.regs, sz);
1672                        array = (void *)array + sz;
1673                } else {
1674                        *array++ = 0;
1675                }
1676        }
1677
1678        if (type & PERF_SAMPLE_PHYS_ADDR) {
1679                *array = sample->phys_addr;
1680                array++;
1681        }
1682
1683        if (type & PERF_SAMPLE_CGROUP) {
1684                *array = sample->cgroup;
1685                array++;
1686        }
1687
1688        if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1689                *array = sample->data_page_size;
1690                array++;
1691        }
1692
1693        if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1694                *array = sample->code_page_size;
1695                array++;
1696        }
1697
1698        if (type & PERF_SAMPLE_AUX) {
1699                sz = sample->aux_sample.size;
1700                *array++ = sz;
1701                memcpy(array, sample->aux_sample.data, sz);
1702                array = (void *)array + sz;
1703        }
1704
1705        return 0;
1706}
1707
1708int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1709                                    struct evlist *evlist, struct machine *machine)
1710{
1711        union perf_event *ev;
1712        struct evsel *evsel;
1713        size_t nr = 0, i = 0, sz, max_nr, n;
1714        int err;
1715
1716        pr_debug2("Synthesizing id index\n");
1717
1718        max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1719                 sizeof(struct id_index_entry);
1720
1721        evlist__for_each_entry(evlist, evsel)
1722                nr += evsel->core.ids;
1723
1724        n = nr > max_nr ? max_nr : nr;
1725        sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1726        ev = zalloc(sz);
1727        if (!ev)
1728                return -ENOMEM;
1729
1730        ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1731        ev->id_index.header.size = sz;
1732        ev->id_index.nr = n;
1733
1734        evlist__for_each_entry(evlist, evsel) {
1735                u32 j;
1736
1737                for (j = 0; j < evsel->core.ids; j++) {
1738                        struct id_index_entry *e;
1739                        struct perf_sample_id *sid;
1740
1741                        if (i >= n) {
1742                                err = process(tool, ev, NULL, machine);
1743                                if (err)
1744                                        goto out_err;
1745                                nr -= n;
1746                                i = 0;
1747                        }
1748
1749                        e = &ev->id_index.entries[i++];
1750
1751                        e->id = evsel->core.id[j];
1752
1753                        sid = evlist__id2sid(evlist, e->id);
1754                        if (!sid) {
1755                                free(ev);
1756                                return -ENOENT;
1757                        }
1758
1759                        e->idx = sid->idx;
1760                        e->cpu = sid->cpu;
1761                        e->tid = sid->tid;
1762                }
1763        }
1764
1765        sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1766        ev->id_index.header.size = sz;
1767        ev->id_index.nr = nr;
1768
1769        err = process(tool, ev, NULL, machine);
1770out_err:
1771        free(ev);
1772
1773        return err;
1774}
1775
1776int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1777                                  struct target *target, struct perf_thread_map *threads,
1778                                  perf_event__handler_t process, bool data_mmap,
1779                                  unsigned int nr_threads_synthesize)
1780{
1781        if (target__has_task(target))
1782                return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1783        else if (target__has_cpu(target))
1784                return perf_event__synthesize_threads(tool, process,
1785                                                      machine, data_mmap,
1786                                                      nr_threads_synthesize);
1787        /* command specified */
1788        return 0;
1789}
1790
1791int machine__synthesize_threads(struct machine *machine, struct target *target,
1792                                struct perf_thread_map *threads, bool data_mmap,
1793                                unsigned int nr_threads_synthesize)
1794{
1795        return __machine__synthesize_threads(machine, NULL, target, threads,
1796                                             perf_event__process, data_mmap,
1797                                             nr_threads_synthesize);
1798}
1799
1800static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1801{
1802        struct perf_record_event_update *ev;
1803
1804        size += sizeof(*ev);
1805        size  = PERF_ALIGN(size, sizeof(u64));
1806
1807        ev = zalloc(size);
1808        if (ev) {
1809                ev->header.type = PERF_RECORD_EVENT_UPDATE;
1810                ev->header.size = (u16)size;
1811                ev->type        = type;
1812                ev->id          = id;
1813        }
1814        return ev;
1815}
1816
1817int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1818                                             perf_event__handler_t process)
1819{
1820        size_t size = strlen(evsel->unit);
1821        struct perf_record_event_update *ev;
1822        int err;
1823
1824        ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1825        if (ev == NULL)
1826                return -ENOMEM;
1827
1828        strlcpy(ev->data, evsel->unit, size + 1);
1829        err = process(tool, (union perf_event *)ev, NULL, NULL);
1830        free(ev);
1831        return err;
1832}
1833
1834int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1835                                              perf_event__handler_t process)
1836{
1837        struct perf_record_event_update *ev;
1838        struct perf_record_event_update_scale *ev_data;
1839        int err;
1840
1841        ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1842        if (ev == NULL)
1843                return -ENOMEM;
1844
1845        ev_data = (struct perf_record_event_update_scale *)ev->data;
1846        ev_data->scale = evsel->scale;
1847        err = process(tool, (union perf_event *)ev, NULL, NULL);
1848        free(ev);
1849        return err;
1850}
1851
1852int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1853                                             perf_event__handler_t process)
1854{
1855        struct perf_record_event_update *ev;
1856        size_t len = strlen(evsel->name);
1857        int err;
1858
1859        ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1860        if (ev == NULL)
1861                return -ENOMEM;
1862
1863        strlcpy(ev->data, evsel->name, len + 1);
1864        err = process(tool, (union perf_event *)ev, NULL, NULL);
1865        free(ev);
1866        return err;
1867}
1868
1869int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1870                                             perf_event__handler_t process)
1871{
1872        size_t size = sizeof(struct perf_record_event_update);
1873        struct perf_record_event_update *ev;
1874        int max, err;
1875        u16 type;
1876
1877        if (!evsel->core.own_cpus)
1878                return 0;
1879
1880        ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1881        if (!ev)
1882                return -ENOMEM;
1883
1884        ev->header.type = PERF_RECORD_EVENT_UPDATE;
1885        ev->header.size = (u16)size;
1886        ev->type        = PERF_EVENT_UPDATE__CPUS;
1887        ev->id          = evsel->core.id[0];
1888
1889        cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1890                                 evsel->core.own_cpus, type, max);
1891
1892        err = process(tool, (union perf_event *)ev, NULL, NULL);
1893        free(ev);
1894        return err;
1895}
1896
1897int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1898                                 perf_event__handler_t process)
1899{
1900        struct evsel *evsel;
1901        int err = 0;
1902
1903        evlist__for_each_entry(evlist, evsel) {
1904                err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1905                                                  evsel->core.id, process);
1906                if (err) {
1907                        pr_debug("failed to create perf header attribute\n");
1908                        return err;
1909                }
1910        }
1911
1912        return err;
1913}
1914
1915static bool has_unit(struct evsel *evsel)
1916{
1917        return evsel->unit && *evsel->unit;
1918}
1919
1920static bool has_scale(struct evsel *evsel)
1921{
1922        return evsel->scale != 1;
1923}
1924
1925int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1926                                      perf_event__handler_t process, bool is_pipe)
1927{
1928        struct evsel *evsel;
1929        int err;
1930
1931        /*
1932         * Synthesize other events stuff not carried within
1933         * attr event - unit, scale, name
1934         */
1935        evlist__for_each_entry(evsel_list, evsel) {
1936                if (!evsel->supported)
1937                        continue;
1938
1939                /*
1940                 * Synthesize unit and scale only if it's defined.
1941                 */
1942                if (has_unit(evsel)) {
1943                        err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1944                        if (err < 0) {
1945                                pr_err("Couldn't synthesize evsel unit.\n");
1946                                return err;
1947                        }
1948                }
1949
1950                if (has_scale(evsel)) {
1951                        err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1952                        if (err < 0) {
1953                                pr_err("Couldn't synthesize evsel evsel.\n");
1954                                return err;
1955                        }
1956                }
1957
1958                if (evsel->core.own_cpus) {
1959                        err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1960                        if (err < 0) {
1961                                pr_err("Couldn't synthesize evsel cpus.\n");
1962                                return err;
1963                        }
1964                }
1965
1966                /*
1967                 * Name is needed only for pipe output,
1968                 * perf.data carries event names.
1969                 */
1970                if (is_pipe) {
1971                        err = perf_event__synthesize_event_update_name(tool, evsel, process);
1972                        if (err < 0) {
1973                                pr_err("Couldn't synthesize evsel name.\n");
1974                                return err;
1975                        }
1976                }
1977        }
1978        return 0;
1979}
1980
1981int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1982                                u32 ids, u64 *id, perf_event__handler_t process)
1983{
1984        union perf_event *ev;
1985        size_t size;
1986        int err;
1987
1988        size = sizeof(struct perf_event_attr);
1989        size = PERF_ALIGN(size, sizeof(u64));
1990        size += sizeof(struct perf_event_header);
1991        size += ids * sizeof(u64);
1992
1993        ev = zalloc(size);
1994
1995        if (ev == NULL)
1996                return -ENOMEM;
1997
1998        ev->attr.attr = *attr;
1999        memcpy(ev->attr.id, id, ids * sizeof(u64));
2000
2001        ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2002        ev->attr.header.size = (u16)size;
2003
2004        if (ev->attr.header.size == size)
2005                err = process(tool, ev, NULL, NULL);
2006        else
2007                err = -E2BIG;
2008
2009        free(ev);
2010
2011        return err;
2012}
2013
2014int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2015                                        perf_event__handler_t process)
2016{
2017        union perf_event ev;
2018        struct tracing_data *tdata;
2019        ssize_t size = 0, aligned_size = 0, padding;
2020        struct feat_fd ff;
2021
2022        /*
2023         * We are going to store the size of the data followed
2024         * by the data contents. Since the fd descriptor is a pipe,
2025         * we cannot seek back to store the size of the data once
2026         * we know it. Instead we:
2027         *
2028         * - write the tracing data to the temp file
2029         * - get/write the data size to pipe
2030         * - write the tracing data from the temp file
2031         *   to the pipe
2032         */
2033        tdata = tracing_data_get(&evlist->core.entries, fd, true);
2034        if (!tdata)
2035                return -1;
2036
2037        memset(&ev, 0, sizeof(ev));
2038
2039        ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2040        size = tdata->size;
2041        aligned_size = PERF_ALIGN(size, sizeof(u64));
2042        padding = aligned_size - size;
2043        ev.tracing_data.header.size = sizeof(ev.tracing_data);
2044        ev.tracing_data.size = aligned_size;
2045
2046        process(tool, &ev, NULL, NULL);
2047
2048        /*
2049         * The put function will copy all the tracing data
2050         * stored in temp file to the pipe.
2051         */
2052        tracing_data_put(tdata);
2053
2054        ff = (struct feat_fd){ .fd = fd };
2055        if (write_padded(&ff, NULL, 0, padding))
2056                return -1;
2057
2058        return aligned_size;
2059}
2060
2061int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2062                                    perf_event__handler_t process, struct machine *machine)
2063{
2064        union perf_event ev;
2065        size_t len;
2066
2067        if (!pos->hit)
2068                return 0;
2069
2070        memset(&ev, 0, sizeof(ev));
2071
2072        len = pos->long_name_len + 1;
2073        len = PERF_ALIGN(len, NAME_ALIGN);
2074        memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
2075        ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2076        ev.build_id.header.misc = misc;
2077        ev.build_id.pid = machine->pid;
2078        ev.build_id.header.size = sizeof(ev.build_id) + len;
2079        memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2080
2081        return process(tool, &ev, NULL, machine);
2082}
2083
2084int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2085                                       struct evlist *evlist, perf_event__handler_t process, bool attrs)
2086{
2087        int err;
2088
2089        if (attrs) {
2090                err = perf_event__synthesize_attrs(tool, evlist, process);
2091                if (err < 0) {
2092                        pr_err("Couldn't synthesize attrs.\n");
2093                        return err;
2094                }
2095        }
2096
2097        err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2098        err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2099        if (err < 0) {
2100                pr_err("Couldn't synthesize thread map.\n");
2101                return err;
2102        }
2103
2104        err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
2105        if (err < 0) {
2106                pr_err("Couldn't synthesize thread map.\n");
2107                return err;
2108        }
2109
2110        err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2111        if (err < 0) {
2112                pr_err("Couldn't synthesize config.\n");
2113                return err;
2114        }
2115
2116        return 0;
2117}
2118
2119extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2120
2121int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2122                                    struct evlist *evlist, perf_event__handler_t process)
2123{
2124        struct perf_header *header = &session->header;
2125        struct perf_record_header_feature *fe;
2126        struct feat_fd ff;
2127        size_t sz, sz_hdr;
2128        int feat, ret;
2129
2130        sz_hdr = sizeof(fe->header);
2131        sz = sizeof(union perf_event);
2132        /* get a nice alignment */
2133        sz = PERF_ALIGN(sz, page_size);
2134
2135        memset(&ff, 0, sizeof(ff));
2136
2137        ff.buf = malloc(sz);
2138        if (!ff.buf)
2139                return -ENOMEM;
2140
2141        ff.size = sz - sz_hdr;
2142        ff.ph = &session->header;
2143
2144        for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2145                if (!feat_ops[feat].synthesize) {
2146                        pr_debug("No record header feature for header :%d\n", feat);
2147                        continue;
2148                }
2149
2150                ff.offset = sizeof(*fe);
2151
2152                ret = feat_ops[feat].write(&ff, evlist);
2153                if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2154                        pr_debug("Error writing feature\n");
2155                        continue;
2156                }
2157                /* ff.buf may have changed due to realloc in do_write() */
2158                fe = ff.buf;
2159                memset(fe, 0, sizeof(*fe));
2160
2161                fe->feat_id = feat;
2162                fe->header.type = PERF_RECORD_HEADER_FEATURE;
2163                fe->header.size = ff.offset;
2164
2165                ret = process(tool, ff.buf, NULL, NULL);
2166                if (ret) {
2167                        free(ff.buf);
2168                        return ret;
2169                }
2170        }
2171
2172        /* Send HEADER_LAST_FEATURE mark. */
2173        fe = ff.buf;
2174        fe->feat_id     = HEADER_LAST_FEATURE;
2175        fe->header.type = PERF_RECORD_HEADER_FEATURE;
2176        fe->header.size = sizeof(*fe);
2177
2178        ret = process(tool, ff.buf, NULL, NULL);
2179
2180        free(ff.buf);
2181        return ret;
2182}
2183
2184int perf_event__synthesize_for_pipe(struct perf_tool *tool,
2185                                    struct perf_session *session,
2186                                    struct perf_data *data,
2187                                    perf_event__handler_t process)
2188{
2189        int err;
2190        int ret = 0;
2191        struct evlist *evlist = session->evlist;
2192
2193        /*
2194         * We need to synthesize events first, because some
2195         * features works on top of them (on report side).
2196         */
2197        err = perf_event__synthesize_attrs(tool, evlist, process);
2198        if (err < 0) {
2199                pr_err("Couldn't synthesize attrs.\n");
2200                return err;
2201        }
2202        ret += err;
2203
2204        err = perf_event__synthesize_features(tool, session, evlist, process);
2205        if (err < 0) {
2206                pr_err("Couldn't synthesize features.\n");
2207                return err;
2208        }
2209        ret += err;
2210
2211        if (have_tracepoints(&evlist->core.entries)) {
2212                int fd = perf_data__fd(data);
2213
2214                /*
2215                 * FIXME err <= 0 here actually means that
2216                 * there were no tracepoints so its not really
2217                 * an error, just that we don't need to
2218                 * synthesize anything.  We really have to
2219                 * return this more properly and also
2220                 * propagate errors that now are calling die()
2221                 */
2222                err = perf_event__synthesize_tracing_data(tool, fd, evlist,
2223                                                          process);
2224                if (err <= 0) {
2225                        pr_err("Couldn't record tracing data.\n");
2226                        return err;
2227                }
2228                ret += err;
2229        }
2230
2231        return ret;
2232}
2233