linux/tools/perf/util/synthetic-events.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only 
   2
   3#include "util/debug.h"
   4#include "util/dso.h"
   5#include "util/event.h"
   6#include "util/evlist.h"
   7#include "util/machine.h"
   8#include "util/map.h"
   9#include "util/map_symbol.h"
  10#include "util/branch.h"
  11#include "util/memswap.h"
  12#include "util/namespaces.h"
  13#include "util/session.h"
  14#include "util/stat.h"
  15#include "util/symbol.h"
  16#include "util/synthetic-events.h"
  17#include "util/target.h"
  18#include "util/time-utils.h"
  19#include "util/cgroup.h"
  20#include <linux/bitops.h>
  21#include <linux/kernel.h>
  22#include <linux/string.h>
  23#include <linux/zalloc.h>
  24#include <linux/perf_event.h>
  25#include <asm/bug.h>
  26#include <perf/evsel.h>
  27#include <perf/cpumap.h>
  28#include <internal/lib.h> // page_size
  29#include <internal/threadmap.h>
  30#include <perf/threadmap.h>
  31#include <symbol/kallsyms.h>
  32#include <dirent.h>
  33#include <errno.h>
  34#include <inttypes.h>
  35#include <stdio.h>
  36#include <string.h>
  37#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  38#include <api/fs/fs.h>
  39#include <api/io.h>
  40#include <sys/types.h>
  41#include <sys/stat.h>
  42#include <fcntl.h>
  43#include <unistd.h>
  44
  45#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
  46
  47unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
  48
  49int perf_tool__process_synth_event(struct perf_tool *tool,
  50                                   union perf_event *event,
  51                                   struct machine *machine,
  52                                   perf_event__handler_t process)
  53{
  54        struct perf_sample synth_sample = {
  55                .pid       = -1,
  56                .tid       = -1,
  57                .time      = -1,
  58                .stream_id = -1,
  59                .cpu       = -1,
  60                .period    = 1,
  61                .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
  62        };
  63
  64        return process(tool, event, &synth_sample, machine);
  65};
  66
  67/*
  68 * Assumes that the first 4095 bytes of /proc/pid/stat contains
  69 * the comm, tgid and ppid.
  70 */
  71static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
  72                                    pid_t *tgid, pid_t *ppid, bool *kernel)
  73{
  74        char bf[4096];
  75        int fd;
  76        size_t size = 0;
  77        ssize_t n;
  78        char *name, *tgids, *ppids, *vmpeak, *threads;
  79
  80        *tgid = -1;
  81        *ppid = -1;
  82
  83        if (pid)
  84                snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
  85        else
  86                snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
  87
  88        fd = open(bf, O_RDONLY);
  89        if (fd < 0) {
  90                pr_debug("couldn't open %s\n", bf);
  91                return -1;
  92        }
  93
  94        n = read(fd, bf, sizeof(bf) - 1);
  95        close(fd);
  96        if (n <= 0) {
  97                pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
  98                           tid);
  99                return -1;
 100        }
 101        bf[n] = '\0';
 102
 103        name = strstr(bf, "Name:");
 104        tgids = strstr(name ?: bf, "Tgid:");
 105        ppids = strstr(tgids ?: bf, "PPid:");
 106        vmpeak = strstr(ppids ?: bf, "VmPeak:");
 107
 108        if (vmpeak)
 109                threads = NULL;
 110        else
 111                threads = strstr(ppids ?: bf, "Threads:");
 112
 113        if (name) {
 114                char *nl;
 115
 116                name = skip_spaces(name + 5);  /* strlen("Name:") */
 117                nl = strchr(name, '\n');
 118                if (nl)
 119                        *nl = '\0';
 120
 121                size = strlen(name);
 122                if (size >= len)
 123                        size = len - 1;
 124                memcpy(comm, name, size);
 125                comm[size] = '\0';
 126        } else {
 127                pr_debug("Name: string not found for pid %d\n", tid);
 128        }
 129
 130        if (tgids) {
 131                tgids += 5;  /* strlen("Tgid:") */
 132                *tgid = atoi(tgids);
 133        } else {
 134                pr_debug("Tgid: string not found for pid %d\n", tid);
 135        }
 136
 137        if (ppids) {
 138                ppids += 5;  /* strlen("PPid:") */
 139                *ppid = atoi(ppids);
 140        } else {
 141                pr_debug("PPid: string not found for pid %d\n", tid);
 142        }
 143
 144        if (!vmpeak && threads)
 145                *kernel = true;
 146        else
 147                *kernel = false;
 148
 149        return 0;
 150}
 151
 152static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
 153                                    struct machine *machine,
 154                                    pid_t *tgid, pid_t *ppid, bool *kernel)
 155{
 156        size_t size;
 157
 158        *ppid = -1;
 159
 160        memset(&event->comm, 0, sizeof(event->comm));
 161
 162        if (machine__is_host(machine)) {
 163                if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
 164                                             sizeof(event->comm.comm),
 165                                             tgid, ppid, kernel) != 0) {
 166                        return -1;
 167                }
 168        } else {
 169                *tgid = machine->pid;
 170        }
 171
 172        if (*tgid < 0)
 173                return -1;
 174
 175        event->comm.pid = *tgid;
 176        event->comm.header.type = PERF_RECORD_COMM;
 177
 178        size = strlen(event->comm.comm) + 1;
 179        size = PERF_ALIGN(size, sizeof(u64));
 180        memset(event->comm.comm + size, 0, machine->id_hdr_size);
 181        event->comm.header.size = (sizeof(event->comm) -
 182                                (sizeof(event->comm.comm) - size) +
 183                                machine->id_hdr_size);
 184        event->comm.tid = tid;
 185
 186        return 0;
 187}
 188
 189pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 190                                         union perf_event *event, pid_t pid,
 191                                         perf_event__handler_t process,
 192                                         struct machine *machine)
 193{
 194        pid_t tgid, ppid;
 195        bool kernel_thread;
 196
 197        if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
 198                                     &kernel_thread) != 0)
 199                return -1;
 200
 201        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 202                return -1;
 203
 204        return tgid;
 205}
 206
 207static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
 208                                         struct perf_ns_link_info *ns_link_info)
 209{
 210        struct stat64 st;
 211        char proc_ns[128];
 212
 213        sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
 214        if (stat64(proc_ns, &st) == 0) {
 215                ns_link_info->dev = st.st_dev;
 216                ns_link_info->ino = st.st_ino;
 217        }
 218}
 219
 220int perf_event__synthesize_namespaces(struct perf_tool *tool,
 221                                      union perf_event *event,
 222                                      pid_t pid, pid_t tgid,
 223                                      perf_event__handler_t process,
 224                                      struct machine *machine)
 225{
 226        u32 idx;
 227        struct perf_ns_link_info *ns_link_info;
 228
 229        if (!tool || !tool->namespace_events)
 230                return 0;
 231
 232        memset(&event->namespaces, 0, (sizeof(event->namespaces) +
 233               (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 234               machine->id_hdr_size));
 235
 236        event->namespaces.pid = tgid;
 237        event->namespaces.tid = pid;
 238
 239        event->namespaces.nr_namespaces = NR_NAMESPACES;
 240
 241        ns_link_info = event->namespaces.link_info;
 242
 243        for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
 244                perf_event__get_ns_link_info(pid, perf_ns__name(idx),
 245                                             &ns_link_info[idx]);
 246
 247        event->namespaces.header.type = PERF_RECORD_NAMESPACES;
 248
 249        event->namespaces.header.size = (sizeof(event->namespaces) +
 250                        (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 251                        machine->id_hdr_size);
 252
 253        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 254                return -1;
 255
 256        return 0;
 257}
 258
 259static int perf_event__synthesize_fork(struct perf_tool *tool,
 260                                       union perf_event *event,
 261                                       pid_t pid, pid_t tgid, pid_t ppid,
 262                                       perf_event__handler_t process,
 263                                       struct machine *machine)
 264{
 265        memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
 266
 267        /*
 268         * for main thread set parent to ppid from status file. For other
 269         * threads set parent pid to main thread. ie., assume main thread
 270         * spawns all threads in a process
 271        */
 272        if (tgid == pid) {
 273                event->fork.ppid = ppid;
 274                event->fork.ptid = ppid;
 275        } else {
 276                event->fork.ppid = tgid;
 277                event->fork.ptid = tgid;
 278        }
 279        event->fork.pid  = tgid;
 280        event->fork.tid  = pid;
 281        event->fork.header.type = PERF_RECORD_FORK;
 282        event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
 283
 284        event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 285
 286        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 287                return -1;
 288
 289        return 0;
 290}
 291
 292static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
 293                                u32 *prot, u32 *flags, __u64 *offset,
 294                                u32 *maj, u32 *min,
 295                                __u64 *inode,
 296                                ssize_t pathname_size, char *pathname)
 297{
 298        __u64 temp;
 299        int ch;
 300        char *start_pathname = pathname;
 301
 302        if (io__get_hex(io, start) != '-')
 303                return false;
 304        if (io__get_hex(io, end) != ' ')
 305                return false;
 306
 307        /* map protection and flags bits */
 308        *prot = 0;
 309        ch = io__get_char(io);
 310        if (ch == 'r')
 311                *prot |= PROT_READ;
 312        else if (ch != '-')
 313                return false;
 314        ch = io__get_char(io);
 315        if (ch == 'w')
 316                *prot |= PROT_WRITE;
 317        else if (ch != '-')
 318                return false;
 319        ch = io__get_char(io);
 320        if (ch == 'x')
 321                *prot |= PROT_EXEC;
 322        else if (ch != '-')
 323                return false;
 324        ch = io__get_char(io);
 325        if (ch == 's')
 326                *flags = MAP_SHARED;
 327        else if (ch == 'p')
 328                *flags = MAP_PRIVATE;
 329        else
 330                return false;
 331        if (io__get_char(io) != ' ')
 332                return false;
 333
 334        if (io__get_hex(io, offset) != ' ')
 335                return false;
 336
 337        if (io__get_hex(io, &temp) != ':')
 338                return false;
 339        *maj = temp;
 340        if (io__get_hex(io, &temp) != ' ')
 341                return false;
 342        *min = temp;
 343
 344        ch = io__get_dec(io, inode);
 345        if (ch != ' ') {
 346                *pathname = '\0';
 347                return ch == '\n';
 348        }
 349        do {
 350                ch = io__get_char(io);
 351        } while (ch == ' ');
 352        while (true) {
 353                if (ch < 0)
 354                        return false;
 355                if (ch == '\0' || ch == '\n' ||
 356                    (pathname + 1 - start_pathname) >= pathname_size) {
 357                        *pathname = '\0';
 358                        return true;
 359                }
 360                *pathname++ = ch;
 361                ch = io__get_char(io);
 362        }
 363}
 364
 365static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
 366                                             bool is_kernel)
 367{
 368        struct build_id bid;
 369        int rc;
 370
 371        if (is_kernel)
 372                rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
 373        else
 374                rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
 375
 376        if (rc == 0) {
 377                memcpy(event->build_id, bid.data, sizeof(bid.data));
 378                event->build_id_size = (u8) bid.size;
 379                event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
 380                event->__reserved_1 = 0;
 381                event->__reserved_2 = 0;
 382        } else {
 383                if (event->filename[0] == '/') {
 384                        pr_debug2("Failed to read build ID for %s\n",
 385                                  event->filename);
 386                }
 387        }
 388}
 389
 390int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 391                                       union perf_event *event,
 392                                       pid_t pid, pid_t tgid,
 393                                       perf_event__handler_t process,
 394                                       struct machine *machine,
 395                                       bool mmap_data)
 396{
 397        unsigned long long t;
 398        char bf[BUFSIZ];
 399        struct io io;
 400        bool truncation = false;
 401        unsigned long long timeout = proc_map_timeout * 1000000ULL;
 402        int rc = 0;
 403        const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
 404        int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
 405
 406        if (machine__is_default_guest(machine))
 407                return 0;
 408
 409        snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
 410                machine->root_dir, pid, pid);
 411
 412        io.fd = open(bf, O_RDONLY, 0);
 413        if (io.fd < 0) {
 414                /*
 415                 * We raced with a task exiting - just return:
 416                 */
 417                pr_debug("couldn't open %s\n", bf);
 418                return -1;
 419        }
 420        io__init(&io, io.fd, bf, sizeof(bf));
 421
 422        event->header.type = PERF_RECORD_MMAP2;
 423        t = rdclock();
 424
 425        while (!io.eof) {
 426                static const char anonstr[] = "//anon";
 427                size_t size, aligned_size;
 428
 429                /* ensure null termination since stack will be reused. */
 430                event->mmap2.filename[0] = '\0';
 431
 432                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
 433                if (!read_proc_maps_line(&io,
 434                                        &event->mmap2.start,
 435                                        &event->mmap2.len,
 436                                        &event->mmap2.prot,
 437                                        &event->mmap2.flags,
 438                                        &event->mmap2.pgoff,
 439                                        &event->mmap2.maj,
 440                                        &event->mmap2.min,
 441                                        &event->mmap2.ino,
 442                                        sizeof(event->mmap2.filename),
 443                                        event->mmap2.filename))
 444                        continue;
 445
 446                if ((rdclock() - t) > timeout) {
 447                        pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
 448                                   "You may want to increase "
 449                                   "the time limit by --proc-map-timeout\n",
 450                                   machine->root_dir, pid, pid);
 451                        truncation = true;
 452                        goto out;
 453                }
 454
 455                event->mmap2.ino_generation = 0;
 456
 457                /*
 458                 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
 459                 */
 460                if (machine__is_host(machine))
 461                        event->header.misc = PERF_RECORD_MISC_USER;
 462                else
 463                        event->header.misc = PERF_RECORD_MISC_GUEST_USER;
 464
 465                if ((event->mmap2.prot & PROT_EXEC) == 0) {
 466                        if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
 467                                continue;
 468
 469                        event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
 470                }
 471
 472out:
 473                if (truncation)
 474                        event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
 475
 476                if (!strcmp(event->mmap2.filename, ""))
 477                        strcpy(event->mmap2.filename, anonstr);
 478
 479                if (hugetlbfs_mnt_len &&
 480                    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
 481                             hugetlbfs_mnt_len)) {
 482                        strcpy(event->mmap2.filename, anonstr);
 483                        event->mmap2.flags |= MAP_HUGETLB;
 484                }
 485
 486                size = strlen(event->mmap2.filename) + 1;
 487                aligned_size = PERF_ALIGN(size, sizeof(u64));
 488                event->mmap2.len -= event->mmap.start;
 489                event->mmap2.header.size = (sizeof(event->mmap2) -
 490                                        (sizeof(event->mmap2.filename) - aligned_size));
 491                memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
 492                        (aligned_size - size));
 493                event->mmap2.header.size += machine->id_hdr_size;
 494                event->mmap2.pid = tgid;
 495                event->mmap2.tid = pid;
 496
 497                if (symbol_conf.buildid_mmap2)
 498                        perf_record_mmap2__read_build_id(&event->mmap2, false);
 499
 500                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 501                        rc = -1;
 502                        break;
 503                }
 504
 505                if (truncation)
 506                        break;
 507        }
 508
 509        close(io.fd);
 510        return rc;
 511}
 512
 513#ifdef HAVE_FILE_HANDLE
 514static int perf_event__synthesize_cgroup(struct perf_tool *tool,
 515                                         union perf_event *event,
 516                                         char *path, size_t mount_len,
 517                                         perf_event__handler_t process,
 518                                         struct machine *machine)
 519{
 520        size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
 521        size_t path_len = strlen(path) - mount_len + 1;
 522        struct {
 523                struct file_handle fh;
 524                uint64_t cgroup_id;
 525        } handle;
 526        int mount_id;
 527
 528        while (path_len % sizeof(u64))
 529                path[mount_len + path_len++] = '\0';
 530
 531        memset(&event->cgroup, 0, event_size);
 532
 533        event->cgroup.header.type = PERF_RECORD_CGROUP;
 534        event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
 535
 536        handle.fh.handle_bytes = sizeof(handle.cgroup_id);
 537        if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
 538                pr_debug("stat failed: %s\n", path);
 539                return -1;
 540        }
 541
 542        event->cgroup.id = handle.cgroup_id;
 543        strncpy(event->cgroup.path, path + mount_len, path_len);
 544        memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
 545
 546        if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
 547                pr_debug("process synth event failed\n");
 548                return -1;
 549        }
 550
 551        return 0;
 552}
 553
 554static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
 555                                        union perf_event *event,
 556                                        char *path, size_t mount_len,
 557                                        perf_event__handler_t process,
 558                                        struct machine *machine)
 559{
 560        size_t pos = strlen(path);
 561        DIR *d;
 562        struct dirent *dent;
 563        int ret = 0;
 564
 565        if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
 566                                          process, machine) < 0)
 567                return -1;
 568
 569        d = opendir(path);
 570        if (d == NULL) {
 571                pr_debug("failed to open directory: %s\n", path);
 572                return -1;
 573        }
 574
 575        while ((dent = readdir(d)) != NULL) {
 576                if (dent->d_type != DT_DIR)
 577                        continue;
 578                if (!strcmp(dent->d_name, ".") ||
 579                    !strcmp(dent->d_name, ".."))
 580                        continue;
 581
 582                /* any sane path should be less than PATH_MAX */
 583                if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
 584                        continue;
 585
 586                if (path[pos - 1] != '/')
 587                        strcat(path, "/");
 588                strcat(path, dent->d_name);
 589
 590                ret = perf_event__walk_cgroup_tree(tool, event, path,
 591                                                   mount_len, process, machine);
 592                if (ret < 0)
 593                        break;
 594
 595                path[pos] = '\0';
 596        }
 597
 598        closedir(d);
 599        return ret;
 600}
 601
 602int perf_event__synthesize_cgroups(struct perf_tool *tool,
 603                                   perf_event__handler_t process,
 604                                   struct machine *machine)
 605{
 606        union perf_event event;
 607        char cgrp_root[PATH_MAX];
 608        size_t mount_len;  /* length of mount point in the path */
 609
 610        if (!tool || !tool->cgroup_events)
 611                return 0;
 612
 613        if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
 614                pr_debug("cannot find cgroup mount point\n");
 615                return -1;
 616        }
 617
 618        mount_len = strlen(cgrp_root);
 619        /* make sure the path starts with a slash (after mount point) */
 620        strcat(cgrp_root, "/");
 621
 622        if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
 623                                         process, machine) < 0)
 624                return -1;
 625
 626        return 0;
 627}
 628#else
 629int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
 630                                   perf_event__handler_t process __maybe_unused,
 631                                   struct machine *machine __maybe_unused)
 632{
 633        return -1;
 634}
 635#endif
 636
 637int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
 638                                   struct machine *machine)
 639{
 640        int rc = 0;
 641        struct map *pos;
 642        struct maps *maps = machine__kernel_maps(machine);
 643        union perf_event *event;
 644        size_t size = symbol_conf.buildid_mmap2 ?
 645                        sizeof(event->mmap2) : sizeof(event->mmap);
 646
 647        event = zalloc(size + machine->id_hdr_size);
 648        if (event == NULL) {
 649                pr_debug("Not enough memory synthesizing mmap event "
 650                         "for kernel modules\n");
 651                return -1;
 652        }
 653
 654        /*
 655         * kernel uses 0 for user space maps, see kernel/perf_event.c
 656         * __perf_event_mmap
 657         */
 658        if (machine__is_host(machine))
 659                event->header.misc = PERF_RECORD_MISC_KERNEL;
 660        else
 661                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 662
 663        maps__for_each_entry(maps, pos) {
 664                if (!__map__is_kmodule(pos))
 665                        continue;
 666
 667                if (symbol_conf.buildid_mmap2) {
 668                        size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
 669                        event->mmap2.header.type = PERF_RECORD_MMAP2;
 670                        event->mmap2.header.size = (sizeof(event->mmap2) -
 671                                                (sizeof(event->mmap2.filename) - size));
 672                        memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
 673                        event->mmap2.header.size += machine->id_hdr_size;
 674                        event->mmap2.start = pos->start;
 675                        event->mmap2.len   = pos->end - pos->start;
 676                        event->mmap2.pid   = machine->pid;
 677
 678                        memcpy(event->mmap2.filename, pos->dso->long_name,
 679                               pos->dso->long_name_len + 1);
 680
 681                        perf_record_mmap2__read_build_id(&event->mmap2, false);
 682                } else {
 683                        size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
 684                        event->mmap.header.type = PERF_RECORD_MMAP;
 685                        event->mmap.header.size = (sizeof(event->mmap) -
 686                                                (sizeof(event->mmap.filename) - size));
 687                        memset(event->mmap.filename + size, 0, machine->id_hdr_size);
 688                        event->mmap.header.size += machine->id_hdr_size;
 689                        event->mmap.start = pos->start;
 690                        event->mmap.len   = pos->end - pos->start;
 691                        event->mmap.pid   = machine->pid;
 692
 693                        memcpy(event->mmap.filename, pos->dso->long_name,
 694                               pos->dso->long_name_len + 1);
 695                }
 696
 697                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 698                        rc = -1;
 699                        break;
 700                }
 701        }
 702
 703        free(event);
 704        return rc;
 705}
 706
 707static int filter_task(const struct dirent *dirent)
 708{
 709        return isdigit(dirent->d_name[0]);
 710}
 711
 712static int __event__synthesize_thread(union perf_event *comm_event,
 713                                      union perf_event *mmap_event,
 714                                      union perf_event *fork_event,
 715                                      union perf_event *namespaces_event,
 716                                      pid_t pid, int full, perf_event__handler_t process,
 717                                      struct perf_tool *tool, struct machine *machine, bool mmap_data)
 718{
 719        char filename[PATH_MAX];
 720        struct dirent **dirent;
 721        pid_t tgid, ppid;
 722        int rc = 0;
 723        int i, n;
 724
 725        /* special case: only send one comm event using passed in pid */
 726        if (!full) {
 727                tgid = perf_event__synthesize_comm(tool, comm_event, pid,
 728                                                   process, machine);
 729
 730                if (tgid == -1)
 731                        return -1;
 732
 733                if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
 734                                                      tgid, process, machine) < 0)
 735                        return -1;
 736
 737                /*
 738                 * send mmap only for thread group leader
 739                 * see thread__init_maps()
 740                 */
 741                if (pid == tgid &&
 742                    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 743                                                       process, machine, mmap_data))
 744                        return -1;
 745
 746                return 0;
 747        }
 748
 749        if (machine__is_default_guest(machine))
 750                return 0;
 751
 752        snprintf(filename, sizeof(filename), "%s/proc/%d/task",
 753                 machine->root_dir, pid);
 754
 755        n = scandir(filename, &dirent, filter_task, alphasort);
 756        if (n < 0)
 757                return n;
 758
 759        for (i = 0; i < n; i++) {
 760                char *end;
 761                pid_t _pid;
 762                bool kernel_thread = false;
 763
 764                _pid = strtol(dirent[i]->d_name, &end, 10);
 765                if (*end)
 766                        continue;
 767
 768                rc = -1;
 769                if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
 770                                             &tgid, &ppid, &kernel_thread) != 0)
 771                        break;
 772
 773                if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
 774                                                ppid, process, machine) < 0)
 775                        break;
 776
 777                if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
 778                                                      tgid, process, machine) < 0)
 779                        break;
 780
 781                /*
 782                 * Send the prepared comm event
 783                 */
 784                if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
 785                        break;
 786
 787                rc = 0;
 788                if (_pid == pid && !kernel_thread) {
 789                        /* process the parent's maps too */
 790                        rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 791                                                process, machine, mmap_data);
 792                        if (rc)
 793                                break;
 794                }
 795        }
 796
 797        for (i = 0; i < n; i++)
 798                zfree(&dirent[i]);
 799        free(dirent);
 800
 801        return rc;
 802}
 803
 804int perf_event__synthesize_thread_map(struct perf_tool *tool,
 805                                      struct perf_thread_map *threads,
 806                                      perf_event__handler_t process,
 807                                      struct machine *machine,
 808                                      bool mmap_data)
 809{
 810        union perf_event *comm_event, *mmap_event, *fork_event;
 811        union perf_event *namespaces_event;
 812        int err = -1, thread, j;
 813
 814        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 815        if (comm_event == NULL)
 816                goto out;
 817
 818        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 819        if (mmap_event == NULL)
 820                goto out_free_comm;
 821
 822        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 823        if (fork_event == NULL)
 824                goto out_free_mmap;
 825
 826        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 827                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 828                                  machine->id_hdr_size);
 829        if (namespaces_event == NULL)
 830                goto out_free_fork;
 831
 832        err = 0;
 833        for (thread = 0; thread < threads->nr; ++thread) {
 834                if (__event__synthesize_thread(comm_event, mmap_event,
 835                                               fork_event, namespaces_event,
 836                                               perf_thread_map__pid(threads, thread), 0,
 837                                               process, tool, machine,
 838                                               mmap_data)) {
 839                        err = -1;
 840                        break;
 841                }
 842
 843                /*
 844                 * comm.pid is set to thread group id by
 845                 * perf_event__synthesize_comm
 846                 */
 847                if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
 848                        bool need_leader = true;
 849
 850                        /* is thread group leader in thread_map? */
 851                        for (j = 0; j < threads->nr; ++j) {
 852                                if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
 853                                        need_leader = false;
 854                                        break;
 855                                }
 856                        }
 857
 858                        /* if not, generate events for it */
 859                        if (need_leader &&
 860                            __event__synthesize_thread(comm_event, mmap_event,
 861                                                       fork_event, namespaces_event,
 862                                                       comm_event->comm.pid, 0,
 863                                                       process, tool, machine,
 864                                                       mmap_data)) {
 865                                err = -1;
 866                                break;
 867                        }
 868                }
 869        }
 870        free(namespaces_event);
 871out_free_fork:
 872        free(fork_event);
 873out_free_mmap:
 874        free(mmap_event);
 875out_free_comm:
 876        free(comm_event);
 877out:
 878        return err;
 879}
 880
 881static int __perf_event__synthesize_threads(struct perf_tool *tool,
 882                                            perf_event__handler_t process,
 883                                            struct machine *machine,
 884                                            bool mmap_data,
 885                                            struct dirent **dirent,
 886                                            int start,
 887                                            int num)
 888{
 889        union perf_event *comm_event, *mmap_event, *fork_event;
 890        union perf_event *namespaces_event;
 891        int err = -1;
 892        char *end;
 893        pid_t pid;
 894        int i;
 895
 896        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 897        if (comm_event == NULL)
 898                goto out;
 899
 900        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 901        if (mmap_event == NULL)
 902                goto out_free_comm;
 903
 904        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 905        if (fork_event == NULL)
 906                goto out_free_mmap;
 907
 908        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 909                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 910                                  machine->id_hdr_size);
 911        if (namespaces_event == NULL)
 912                goto out_free_fork;
 913
 914        for (i = start; i < start + num; i++) {
 915                if (!isdigit(dirent[i]->d_name[0]))
 916                        continue;
 917
 918                pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
 919                /* only interested in proper numerical dirents */
 920                if (*end)
 921                        continue;
 922                /*
 923                 * We may race with exiting thread, so don't stop just because
 924                 * one thread couldn't be synthesized.
 925                 */
 926                __event__synthesize_thread(comm_event, mmap_event, fork_event,
 927                                           namespaces_event, pid, 1, process,
 928                                           tool, machine, mmap_data);
 929        }
 930        err = 0;
 931
 932        free(namespaces_event);
 933out_free_fork:
 934        free(fork_event);
 935out_free_mmap:
 936        free(mmap_event);
 937out_free_comm:
 938        free(comm_event);
 939out:
 940        return err;
 941}
 942
 943struct synthesize_threads_arg {
 944        struct perf_tool *tool;
 945        perf_event__handler_t process;
 946        struct machine *machine;
 947        bool mmap_data;
 948        struct dirent **dirent;
 949        int num;
 950        int start;
 951};
 952
 953static void *synthesize_threads_worker(void *arg)
 954{
 955        struct synthesize_threads_arg *args = arg;
 956
 957        __perf_event__synthesize_threads(args->tool, args->process,
 958                                         args->machine, args->mmap_data,
 959                                         args->dirent,
 960                                         args->start, args->num);
 961        return NULL;
 962}
 963
 964int perf_event__synthesize_threads(struct perf_tool *tool,
 965                                   perf_event__handler_t process,
 966                                   struct machine *machine,
 967                                   bool mmap_data,
 968                                   unsigned int nr_threads_synthesize)
 969{
 970        struct synthesize_threads_arg *args = NULL;
 971        pthread_t *synthesize_threads = NULL;
 972        char proc_path[PATH_MAX];
 973        struct dirent **dirent;
 974        int num_per_thread;
 975        int m, n, i, j;
 976        int thread_nr;
 977        int base = 0;
 978        int err = -1;
 979
 980
 981        if (machine__is_default_guest(machine))
 982                return 0;
 983
 984        snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
 985        n = scandir(proc_path, &dirent, filter_task, alphasort);
 986        if (n < 0)
 987                return err;
 988
 989        if (nr_threads_synthesize == UINT_MAX)
 990                thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
 991        else
 992                thread_nr = nr_threads_synthesize;
 993
 994        if (thread_nr <= 1) {
 995                err = __perf_event__synthesize_threads(tool, process,
 996                                                       machine, mmap_data,
 997                                                       dirent, base, n);
 998                goto free_dirent;
 999        }
1000        if (thread_nr > n)
1001                thread_nr = n;
1002
1003        synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1004        if (synthesize_threads == NULL)
1005                goto free_dirent;
1006
1007        args = calloc(sizeof(*args), thread_nr);
1008        if (args == NULL)
1009                goto free_threads;
1010
1011        num_per_thread = n / thread_nr;
1012        m = n % thread_nr;
1013        for (i = 0; i < thread_nr; i++) {
1014                args[i].tool = tool;
1015                args[i].process = process;
1016                args[i].machine = machine;
1017                args[i].mmap_data = mmap_data;
1018                args[i].dirent = dirent;
1019        }
1020        for (i = 0; i < m; i++) {
1021                args[i].num = num_per_thread + 1;
1022                args[i].start = i * args[i].num;
1023        }
1024        if (i != 0)
1025                base = args[i-1].start + args[i-1].num;
1026        for (j = i; j < thread_nr; j++) {
1027                args[j].num = num_per_thread;
1028                args[j].start = base + (j - i) * args[i].num;
1029        }
1030
1031        for (i = 0; i < thread_nr; i++) {
1032                if (pthread_create(&synthesize_threads[i], NULL,
1033                                   synthesize_threads_worker, &args[i]))
1034                        goto out_join;
1035        }
1036        err = 0;
1037out_join:
1038        for (i = 0; i < thread_nr; i++)
1039                pthread_join(synthesize_threads[i], NULL);
1040        free(args);
1041free_threads:
1042        free(synthesize_threads);
1043free_dirent:
1044        for (i = 0; i < n; i++)
1045                zfree(&dirent[i]);
1046        free(dirent);
1047
1048        return err;
1049}
1050
1051int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1052                                              perf_event__handler_t process __maybe_unused,
1053                                              struct machine *machine __maybe_unused)
1054{
1055        return 0;
1056}
1057
1058static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1059                                                perf_event__handler_t process,
1060                                                struct machine *machine)
1061{
1062        union perf_event *event;
1063        size_t size = symbol_conf.buildid_mmap2 ?
1064                        sizeof(event->mmap2) : sizeof(event->mmap);
1065        struct map *map = machine__kernel_map(machine);
1066        struct kmap *kmap;
1067        int err;
1068
1069        if (map == NULL)
1070                return -1;
1071
1072        kmap = map__kmap(map);
1073        if (!kmap->ref_reloc_sym)
1074                return -1;
1075
1076        /*
1077         * We should get this from /sys/kernel/sections/.text, but till that is
1078         * available use this, and after it is use this as a fallback for older
1079         * kernels.
1080         */
1081        event = zalloc(size + machine->id_hdr_size);
1082        if (event == NULL) {
1083                pr_debug("Not enough memory synthesizing mmap event "
1084                         "for kernel modules\n");
1085                return -1;
1086        }
1087
1088        if (machine__is_host(machine)) {
1089                /*
1090                 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1091                 * see kernel/perf_event.c __perf_event_mmap
1092                 */
1093                event->header.misc = PERF_RECORD_MISC_KERNEL;
1094        } else {
1095                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1096        }
1097
1098        if (symbol_conf.buildid_mmap2) {
1099                size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1100                                "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1101                size = PERF_ALIGN(size, sizeof(u64));
1102                event->mmap2.header.type = PERF_RECORD_MMAP2;
1103                event->mmap2.header.size = (sizeof(event->mmap2) -
1104                                (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1105                event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1106                event->mmap2.start = map->start;
1107                event->mmap2.len   = map->end - event->mmap.start;
1108                event->mmap2.pid   = machine->pid;
1109
1110                perf_record_mmap2__read_build_id(&event->mmap2, true);
1111        } else {
1112                size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1113                                "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1114                size = PERF_ALIGN(size, sizeof(u64));
1115                event->mmap.header.type = PERF_RECORD_MMAP;
1116                event->mmap.header.size = (sizeof(event->mmap) -
1117                                (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1118                event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1119                event->mmap.start = map->start;
1120                event->mmap.len   = map->end - event->mmap.start;
1121                event->mmap.pid   = machine->pid;
1122        }
1123
1124        err = perf_tool__process_synth_event(tool, event, machine, process);
1125        free(event);
1126
1127        return err;
1128}
1129
1130int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1131                                       perf_event__handler_t process,
1132                                       struct machine *machine)
1133{
1134        int err;
1135
1136        err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1137        if (err < 0)
1138                return err;
1139
1140        return perf_event__synthesize_extra_kmaps(tool, process, machine);
1141}
1142
1143int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1144                                      struct perf_thread_map *threads,
1145                                      perf_event__handler_t process,
1146                                      struct machine *machine)
1147{
1148        union perf_event *event;
1149        int i, err, size;
1150
1151        size  = sizeof(event->thread_map);
1152        size += threads->nr * sizeof(event->thread_map.entries[0]);
1153
1154        event = zalloc(size);
1155        if (!event)
1156                return -ENOMEM;
1157
1158        event->header.type = PERF_RECORD_THREAD_MAP;
1159        event->header.size = size;
1160        event->thread_map.nr = threads->nr;
1161
1162        for (i = 0; i < threads->nr; i++) {
1163                struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1164                char *comm = perf_thread_map__comm(threads, i);
1165
1166                if (!comm)
1167                        comm = (char *) "";
1168
1169                entry->pid = perf_thread_map__pid(threads, i);
1170                strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1171        }
1172
1173        err = process(tool, event, NULL, machine);
1174
1175        free(event);
1176        return err;
1177}
1178
1179static void synthesize_cpus(struct cpu_map_entries *cpus,
1180                            struct perf_cpu_map *map)
1181{
1182        int i;
1183
1184        cpus->nr = map->nr;
1185
1186        for (i = 0; i < map->nr; i++)
1187                cpus->cpu[i] = map->map[i];
1188}
1189
1190static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1191                            struct perf_cpu_map *map, int max)
1192{
1193        int i;
1194
1195        mask->nr = BITS_TO_LONGS(max);
1196        mask->long_size = sizeof(long);
1197
1198        for (i = 0; i < map->nr; i++)
1199                set_bit(map->map[i], mask->mask);
1200}
1201
1202static size_t cpus_size(struct perf_cpu_map *map)
1203{
1204        return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1205}
1206
1207static size_t mask_size(struct perf_cpu_map *map, int *max)
1208{
1209        int i;
1210
1211        *max = 0;
1212
1213        for (i = 0; i < map->nr; i++) {
1214                /* bit position of the cpu is + 1 */
1215                int bit = map->map[i] + 1;
1216
1217                if (bit > *max)
1218                        *max = bit;
1219        }
1220
1221        return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1222}
1223
1224void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1225{
1226        size_t size_cpus, size_mask;
1227        bool is_dummy = perf_cpu_map__empty(map);
1228
1229        /*
1230         * Both array and mask data have variable size based
1231         * on the number of cpus and their actual values.
1232         * The size of the 'struct perf_record_cpu_map_data' is:
1233         *
1234         *   array = size of 'struct cpu_map_entries' +
1235         *           number of cpus * sizeof(u64)
1236         *
1237         *   mask  = size of 'struct perf_record_record_cpu_map' +
1238         *           maximum cpu bit converted to size of longs
1239         *
1240         * and finally + the size of 'struct perf_record_cpu_map_data'.
1241         */
1242        size_cpus = cpus_size(map);
1243        size_mask = mask_size(map, max);
1244
1245        if (is_dummy || (size_cpus < size_mask)) {
1246                *size += size_cpus;
1247                *type  = PERF_CPU_MAP__CPUS;
1248        } else {
1249                *size += size_mask;
1250                *type  = PERF_CPU_MAP__MASK;
1251        }
1252
1253        *size += sizeof(struct perf_record_cpu_map_data);
1254        *size = PERF_ALIGN(*size, sizeof(u64));
1255        return zalloc(*size);
1256}
1257
1258void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1259                              u16 type, int max)
1260{
1261        data->type = type;
1262
1263        switch (type) {
1264        case PERF_CPU_MAP__CPUS:
1265                synthesize_cpus((struct cpu_map_entries *) data->data, map);
1266                break;
1267        case PERF_CPU_MAP__MASK:
1268                synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1269        default:
1270                break;
1271        }
1272}
1273
1274static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1275{
1276        size_t size = sizeof(struct perf_record_cpu_map);
1277        struct perf_record_cpu_map *event;
1278        int max;
1279        u16 type;
1280
1281        event = cpu_map_data__alloc(map, &size, &type, &max);
1282        if (!event)
1283                return NULL;
1284
1285        event->header.type = PERF_RECORD_CPU_MAP;
1286        event->header.size = size;
1287        event->data.type   = type;
1288
1289        cpu_map_data__synthesize(&event->data, map, type, max);
1290        return event;
1291}
1292
1293int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1294                                   struct perf_cpu_map *map,
1295                                   perf_event__handler_t process,
1296                                   struct machine *machine)
1297{
1298        struct perf_record_cpu_map *event;
1299        int err;
1300
1301        event = cpu_map_event__new(map);
1302        if (!event)
1303                return -ENOMEM;
1304
1305        err = process(tool, (union perf_event *) event, NULL, machine);
1306
1307        free(event);
1308        return err;
1309}
1310
1311int perf_event__synthesize_stat_config(struct perf_tool *tool,
1312                                       struct perf_stat_config *config,
1313                                       perf_event__handler_t process,
1314                                       struct machine *machine)
1315{
1316        struct perf_record_stat_config *event;
1317        int size, i = 0, err;
1318
1319        size  = sizeof(*event);
1320        size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1321
1322        event = zalloc(size);
1323        if (!event)
1324                return -ENOMEM;
1325
1326        event->header.type = PERF_RECORD_STAT_CONFIG;
1327        event->header.size = size;
1328        event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1329
1330#define ADD(__term, __val)                                      \
1331        event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1332        event->data[i].val = __val;                             \
1333        i++;
1334
1335        ADD(AGGR_MODE,  config->aggr_mode)
1336        ADD(INTERVAL,   config->interval)
1337        ADD(SCALE,      config->scale)
1338
1339        WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1340                  "stat config terms unbalanced\n");
1341#undef ADD
1342
1343        err = process(tool, (union perf_event *) event, NULL, machine);
1344
1345        free(event);
1346        return err;
1347}
1348
1349int perf_event__synthesize_stat(struct perf_tool *tool,
1350                                u32 cpu, u32 thread, u64 id,
1351                                struct perf_counts_values *count,
1352                                perf_event__handler_t process,
1353                                struct machine *machine)
1354{
1355        struct perf_record_stat event;
1356
1357        event.header.type = PERF_RECORD_STAT;
1358        event.header.size = sizeof(event);
1359        event.header.misc = 0;
1360
1361        event.id        = id;
1362        event.cpu       = cpu;
1363        event.thread    = thread;
1364        event.val       = count->val;
1365        event.ena       = count->ena;
1366        event.run       = count->run;
1367
1368        return process(tool, (union perf_event *) &event, NULL, machine);
1369}
1370
1371int perf_event__synthesize_stat_round(struct perf_tool *tool,
1372                                      u64 evtime, u64 type,
1373                                      perf_event__handler_t process,
1374                                      struct machine *machine)
1375{
1376        struct perf_record_stat_round event;
1377
1378        event.header.type = PERF_RECORD_STAT_ROUND;
1379        event.header.size = sizeof(event);
1380        event.header.misc = 0;
1381
1382        event.time = evtime;
1383        event.type = type;
1384
1385        return process(tool, (union perf_event *) &event, NULL, machine);
1386}
1387
1388size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1389{
1390        size_t sz, result = sizeof(struct perf_record_sample);
1391
1392        if (type & PERF_SAMPLE_IDENTIFIER)
1393                result += sizeof(u64);
1394
1395        if (type & PERF_SAMPLE_IP)
1396                result += sizeof(u64);
1397
1398        if (type & PERF_SAMPLE_TID)
1399                result += sizeof(u64);
1400
1401        if (type & PERF_SAMPLE_TIME)
1402                result += sizeof(u64);
1403
1404        if (type & PERF_SAMPLE_ADDR)
1405                result += sizeof(u64);
1406
1407        if (type & PERF_SAMPLE_ID)
1408                result += sizeof(u64);
1409
1410        if (type & PERF_SAMPLE_STREAM_ID)
1411                result += sizeof(u64);
1412
1413        if (type & PERF_SAMPLE_CPU)
1414                result += sizeof(u64);
1415
1416        if (type & PERF_SAMPLE_PERIOD)
1417                result += sizeof(u64);
1418
1419        if (type & PERF_SAMPLE_READ) {
1420                result += sizeof(u64);
1421                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1422                        result += sizeof(u64);
1423                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1424                        result += sizeof(u64);
1425                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1426                if (read_format & PERF_FORMAT_GROUP) {
1427                        sz = sample->read.group.nr *
1428                             sizeof(struct sample_read_value);
1429                        result += sz;
1430                } else {
1431                        result += sizeof(u64);
1432                }
1433        }
1434
1435        if (type & PERF_SAMPLE_CALLCHAIN) {
1436                sz = (sample->callchain->nr + 1) * sizeof(u64);
1437                result += sz;
1438        }
1439
1440        if (type & PERF_SAMPLE_RAW) {
1441                result += sizeof(u32);
1442                result += sample->raw_size;
1443        }
1444
1445        if (type & PERF_SAMPLE_BRANCH_STACK) {
1446                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1447                /* nr, hw_idx */
1448                sz += 2 * sizeof(u64);
1449                result += sz;
1450        }
1451
1452        if (type & PERF_SAMPLE_REGS_USER) {
1453                if (sample->user_regs.abi) {
1454                        result += sizeof(u64);
1455                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1456                        result += sz;
1457                } else {
1458                        result += sizeof(u64);
1459                }
1460        }
1461
1462        if (type & PERF_SAMPLE_STACK_USER) {
1463                sz = sample->user_stack.size;
1464                result += sizeof(u64);
1465                if (sz) {
1466                        result += sz;
1467                        result += sizeof(u64);
1468                }
1469        }
1470
1471        if (type & PERF_SAMPLE_WEIGHT_TYPE)
1472                result += sizeof(u64);
1473
1474        if (type & PERF_SAMPLE_DATA_SRC)
1475                result += sizeof(u64);
1476
1477        if (type & PERF_SAMPLE_TRANSACTION)
1478                result += sizeof(u64);
1479
1480        if (type & PERF_SAMPLE_REGS_INTR) {
1481                if (sample->intr_regs.abi) {
1482                        result += sizeof(u64);
1483                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1484                        result += sz;
1485                } else {
1486                        result += sizeof(u64);
1487                }
1488        }
1489
1490        if (type & PERF_SAMPLE_PHYS_ADDR)
1491                result += sizeof(u64);
1492
1493        if (type & PERF_SAMPLE_CGROUP)
1494                result += sizeof(u64);
1495
1496        if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1497                result += sizeof(u64);
1498
1499        if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1500                result += sizeof(u64);
1501
1502        if (type & PERF_SAMPLE_AUX) {
1503                result += sizeof(u64);
1504                result += sample->aux_sample.size;
1505        }
1506
1507        return result;
1508}
1509
1510void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
1511                                               __u64 *array, u64 type __maybe_unused)
1512{
1513        *array = data->weight;
1514}
1515
1516int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1517                                  const struct perf_sample *sample)
1518{
1519        __u64 *array;
1520        size_t sz;
1521        /*
1522         * used for cross-endian analysis. See git commit 65014ab3
1523         * for why this goofiness is needed.
1524         */
1525        union u64_swap u;
1526
1527        array = event->sample.array;
1528
1529        if (type & PERF_SAMPLE_IDENTIFIER) {
1530                *array = sample->id;
1531                array++;
1532        }
1533
1534        if (type & PERF_SAMPLE_IP) {
1535                *array = sample->ip;
1536                array++;
1537        }
1538
1539        if (type & PERF_SAMPLE_TID) {
1540                u.val32[0] = sample->pid;
1541                u.val32[1] = sample->tid;
1542                *array = u.val64;
1543                array++;
1544        }
1545
1546        if (type & PERF_SAMPLE_TIME) {
1547                *array = sample->time;
1548                array++;
1549        }
1550
1551        if (type & PERF_SAMPLE_ADDR) {
1552                *array = sample->addr;
1553                array++;
1554        }
1555
1556        if (type & PERF_SAMPLE_ID) {
1557                *array = sample->id;
1558                array++;
1559        }
1560
1561        if (type & PERF_SAMPLE_STREAM_ID) {
1562                *array = sample->stream_id;
1563                array++;
1564        }
1565
1566        if (type & PERF_SAMPLE_CPU) {
1567                u.val32[0] = sample->cpu;
1568                u.val32[1] = 0;
1569                *array = u.val64;
1570                array++;
1571        }
1572
1573        if (type & PERF_SAMPLE_PERIOD) {
1574                *array = sample->period;
1575                array++;
1576        }
1577
1578        if (type & PERF_SAMPLE_READ) {
1579                if (read_format & PERF_FORMAT_GROUP)
1580                        *array = sample->read.group.nr;
1581                else
1582                        *array = sample->read.one.value;
1583                array++;
1584
1585                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1586                        *array = sample->read.time_enabled;
1587                        array++;
1588                }
1589
1590                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1591                        *array = sample->read.time_running;
1592                        array++;
1593                }
1594
1595                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1596                if (read_format & PERF_FORMAT_GROUP) {
1597                        sz = sample->read.group.nr *
1598                             sizeof(struct sample_read_value);
1599                        memcpy(array, sample->read.group.values, sz);
1600                        array = (void *)array + sz;
1601                } else {
1602                        *array = sample->read.one.id;
1603                        array++;
1604                }
1605        }
1606
1607        if (type & PERF_SAMPLE_CALLCHAIN) {
1608                sz = (sample->callchain->nr + 1) * sizeof(u64);
1609                memcpy(array, sample->callchain, sz);
1610                array = (void *)array + sz;
1611        }
1612
1613        if (type & PERF_SAMPLE_RAW) {
1614                u.val32[0] = sample->raw_size;
1615                *array = u.val64;
1616                array = (void *)array + sizeof(u32);
1617
1618                memcpy(array, sample->raw_data, sample->raw_size);
1619                array = (void *)array + sample->raw_size;
1620        }
1621
1622        if (type & PERF_SAMPLE_BRANCH_STACK) {
1623                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1624                /* nr, hw_idx */
1625                sz += 2 * sizeof(u64);
1626                memcpy(array, sample->branch_stack, sz);
1627                array = (void *)array + sz;
1628        }
1629
1630        if (type & PERF_SAMPLE_REGS_USER) {
1631                if (sample->user_regs.abi) {
1632                        *array++ = sample->user_regs.abi;
1633                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1634                        memcpy(array, sample->user_regs.regs, sz);
1635                        array = (void *)array + sz;
1636                } else {
1637                        *array++ = 0;
1638                }
1639        }
1640
1641        if (type & PERF_SAMPLE_STACK_USER) {
1642                sz = sample->user_stack.size;
1643                *array++ = sz;
1644                if (sz) {
1645                        memcpy(array, sample->user_stack.data, sz);
1646                        array = (void *)array + sz;
1647                        *array++ = sz;
1648                }
1649        }
1650
1651        if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1652                arch_perf_synthesize_sample_weight(sample, array, type);
1653                array++;
1654        }
1655
1656        if (type & PERF_SAMPLE_DATA_SRC) {
1657                *array = sample->data_src;
1658                array++;
1659        }
1660
1661        if (type & PERF_SAMPLE_TRANSACTION) {
1662                *array = sample->transaction;
1663                array++;
1664        }
1665
1666        if (type & PERF_SAMPLE_REGS_INTR) {
1667                if (sample->intr_regs.abi) {
1668                        *array++ = sample->intr_regs.abi;
1669                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1670                        memcpy(array, sample->intr_regs.regs, sz);
1671                        array = (void *)array + sz;
1672                } else {
1673                        *array++ = 0;
1674                }
1675        }
1676
1677        if (type & PERF_SAMPLE_PHYS_ADDR) {
1678                *array = sample->phys_addr;
1679                array++;
1680        }
1681
1682        if (type & PERF_SAMPLE_CGROUP) {
1683                *array = sample->cgroup;
1684                array++;
1685        }
1686
1687        if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1688                *array = sample->data_page_size;
1689                array++;
1690        }
1691
1692        if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1693                *array = sample->code_page_size;
1694                array++;
1695        }
1696
1697        if (type & PERF_SAMPLE_AUX) {
1698                sz = sample->aux_sample.size;
1699                *array++ = sz;
1700                memcpy(array, sample->aux_sample.data, sz);
1701                array = (void *)array + sz;
1702        }
1703
1704        return 0;
1705}
1706
1707int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1708                                    struct evlist *evlist, struct machine *machine)
1709{
1710        union perf_event *ev;
1711        struct evsel *evsel;
1712        size_t nr = 0, i = 0, sz, max_nr, n;
1713        int err;
1714
1715        pr_debug2("Synthesizing id index\n");
1716
1717        max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1718                 sizeof(struct id_index_entry);
1719
1720        evlist__for_each_entry(evlist, evsel)
1721                nr += evsel->core.ids;
1722
1723        n = nr > max_nr ? max_nr : nr;
1724        sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1725        ev = zalloc(sz);
1726        if (!ev)
1727                return -ENOMEM;
1728
1729        ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1730        ev->id_index.header.size = sz;
1731        ev->id_index.nr = n;
1732
1733        evlist__for_each_entry(evlist, evsel) {
1734                u32 j;
1735
1736                for (j = 0; j < evsel->core.ids; j++) {
1737                        struct id_index_entry *e;
1738                        struct perf_sample_id *sid;
1739
1740                        if (i >= n) {
1741                                err = process(tool, ev, NULL, machine);
1742                                if (err)
1743                                        goto out_err;
1744                                nr -= n;
1745                                i = 0;
1746                        }
1747
1748                        e = &ev->id_index.entries[i++];
1749
1750                        e->id = evsel->core.id[j];
1751
1752                        sid = evlist__id2sid(evlist, e->id);
1753                        if (!sid) {
1754                                free(ev);
1755                                return -ENOENT;
1756                        }
1757
1758                        e->idx = sid->idx;
1759                        e->cpu = sid->cpu;
1760                        e->tid = sid->tid;
1761                }
1762        }
1763
1764        sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1765        ev->id_index.header.size = sz;
1766        ev->id_index.nr = nr;
1767
1768        err = process(tool, ev, NULL, machine);
1769out_err:
1770        free(ev);
1771
1772        return err;
1773}
1774
1775int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1776                                  struct target *target, struct perf_thread_map *threads,
1777                                  perf_event__handler_t process, bool data_mmap,
1778                                  unsigned int nr_threads_synthesize)
1779{
1780        if (target__has_task(target))
1781                return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1782        else if (target__has_cpu(target))
1783                return perf_event__synthesize_threads(tool, process,
1784                                                      machine, data_mmap,
1785                                                      nr_threads_synthesize);
1786        /* command specified */
1787        return 0;
1788}
1789
1790int machine__synthesize_threads(struct machine *machine, struct target *target,
1791                                struct perf_thread_map *threads, bool data_mmap,
1792                                unsigned int nr_threads_synthesize)
1793{
1794        return __machine__synthesize_threads(machine, NULL, target, threads,
1795                                             perf_event__process, data_mmap,
1796                                             nr_threads_synthesize);
1797}
1798
1799static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1800{
1801        struct perf_record_event_update *ev;
1802
1803        size += sizeof(*ev);
1804        size  = PERF_ALIGN(size, sizeof(u64));
1805
1806        ev = zalloc(size);
1807        if (ev) {
1808                ev->header.type = PERF_RECORD_EVENT_UPDATE;
1809                ev->header.size = (u16)size;
1810                ev->type        = type;
1811                ev->id          = id;
1812        }
1813        return ev;
1814}
1815
1816int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1817                                             perf_event__handler_t process)
1818{
1819        size_t size = strlen(evsel->unit);
1820        struct perf_record_event_update *ev;
1821        int err;
1822
1823        ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1824        if (ev == NULL)
1825                return -ENOMEM;
1826
1827        strlcpy(ev->data, evsel->unit, size + 1);
1828        err = process(tool, (union perf_event *)ev, NULL, NULL);
1829        free(ev);
1830        return err;
1831}
1832
1833int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1834                                              perf_event__handler_t process)
1835{
1836        struct perf_record_event_update *ev;
1837        struct perf_record_event_update_scale *ev_data;
1838        int err;
1839
1840        ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1841        if (ev == NULL)
1842                return -ENOMEM;
1843
1844        ev_data = (struct perf_record_event_update_scale *)ev->data;
1845        ev_data->scale = evsel->scale;
1846        err = process(tool, (union perf_event *)ev, NULL, NULL);
1847        free(ev);
1848        return err;
1849}
1850
1851int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1852                                             perf_event__handler_t process)
1853{
1854        struct perf_record_event_update *ev;
1855        size_t len = strlen(evsel->name);
1856        int err;
1857
1858        ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1859        if (ev == NULL)
1860                return -ENOMEM;
1861
1862        strlcpy(ev->data, evsel->name, len + 1);
1863        err = process(tool, (union perf_event *)ev, NULL, NULL);
1864        free(ev);
1865        return err;
1866}
1867
1868int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1869                                             perf_event__handler_t process)
1870{
1871        size_t size = sizeof(struct perf_record_event_update);
1872        struct perf_record_event_update *ev;
1873        int max, err;
1874        u16 type;
1875
1876        if (!evsel->core.own_cpus)
1877                return 0;
1878
1879        ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1880        if (!ev)
1881                return -ENOMEM;
1882
1883        ev->header.type = PERF_RECORD_EVENT_UPDATE;
1884        ev->header.size = (u16)size;
1885        ev->type        = PERF_EVENT_UPDATE__CPUS;
1886        ev->id          = evsel->core.id[0];
1887
1888        cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1889                                 evsel->core.own_cpus, type, max);
1890
1891        err = process(tool, (union perf_event *)ev, NULL, NULL);
1892        free(ev);
1893        return err;
1894}
1895
1896int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1897                                 perf_event__handler_t process)
1898{
1899        struct evsel *evsel;
1900        int err = 0;
1901
1902        evlist__for_each_entry(evlist, evsel) {
1903                err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1904                                                  evsel->core.id, process);
1905                if (err) {
1906                        pr_debug("failed to create perf header attribute\n");
1907                        return err;
1908                }
1909        }
1910
1911        return err;
1912}
1913
1914static bool has_unit(struct evsel *evsel)
1915{
1916        return evsel->unit && *evsel->unit;
1917}
1918
1919static bool has_scale(struct evsel *evsel)
1920{
1921        return evsel->scale != 1;
1922}
1923
1924int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1925                                      perf_event__handler_t process, bool is_pipe)
1926{
1927        struct evsel *evsel;
1928        int err;
1929
1930        /*
1931         * Synthesize other events stuff not carried within
1932         * attr event - unit, scale, name
1933         */
1934        evlist__for_each_entry(evsel_list, evsel) {
1935                if (!evsel->supported)
1936                        continue;
1937
1938                /*
1939                 * Synthesize unit and scale only if it's defined.
1940                 */
1941                if (has_unit(evsel)) {
1942                        err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1943                        if (err < 0) {
1944                                pr_err("Couldn't synthesize evsel unit.\n");
1945                                return err;
1946                        }
1947                }
1948
1949                if (has_scale(evsel)) {
1950                        err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1951                        if (err < 0) {
1952                                pr_err("Couldn't synthesize evsel evsel.\n");
1953                                return err;
1954                        }
1955                }
1956
1957                if (evsel->core.own_cpus) {
1958                        err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1959                        if (err < 0) {
1960                                pr_err("Couldn't synthesize evsel cpus.\n");
1961                                return err;
1962                        }
1963                }
1964
1965                /*
1966                 * Name is needed only for pipe output,
1967                 * perf.data carries event names.
1968                 */
1969                if (is_pipe) {
1970                        err = perf_event__synthesize_event_update_name(tool, evsel, process);
1971                        if (err < 0) {
1972                                pr_err("Couldn't synthesize evsel name.\n");
1973                                return err;
1974                        }
1975                }
1976        }
1977        return 0;
1978}
1979
1980int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1981                                u32 ids, u64 *id, perf_event__handler_t process)
1982{
1983        union perf_event *ev;
1984        size_t size;
1985        int err;
1986
1987        size = sizeof(struct perf_event_attr);
1988        size = PERF_ALIGN(size, sizeof(u64));
1989        size += sizeof(struct perf_event_header);
1990        size += ids * sizeof(u64);
1991
1992        ev = zalloc(size);
1993
1994        if (ev == NULL)
1995                return -ENOMEM;
1996
1997        ev->attr.attr = *attr;
1998        memcpy(ev->attr.id, id, ids * sizeof(u64));
1999
2000        ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2001        ev->attr.header.size = (u16)size;
2002
2003        if (ev->attr.header.size == size)
2004                err = process(tool, ev, NULL, NULL);
2005        else
2006                err = -E2BIG;
2007
2008        free(ev);
2009
2010        return err;
2011}
2012
2013int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2014                                        perf_event__handler_t process)
2015{
2016        union perf_event ev;
2017        struct tracing_data *tdata;
2018        ssize_t size = 0, aligned_size = 0, padding;
2019        struct feat_fd ff;
2020
2021        /*
2022         * We are going to store the size of the data followed
2023         * by the data contents. Since the fd descriptor is a pipe,
2024         * we cannot seek back to store the size of the data once
2025         * we know it. Instead we:
2026         *
2027         * - write the tracing data to the temp file
2028         * - get/write the data size to pipe
2029         * - write the tracing data from the temp file
2030         *   to the pipe
2031         */
2032        tdata = tracing_data_get(&evlist->core.entries, fd, true);
2033        if (!tdata)
2034                return -1;
2035
2036        memset(&ev, 0, sizeof(ev));
2037
2038        ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2039        size = tdata->size;
2040        aligned_size = PERF_ALIGN(size, sizeof(u64));
2041        padding = aligned_size - size;
2042        ev.tracing_data.header.size = sizeof(ev.tracing_data);
2043        ev.tracing_data.size = aligned_size;
2044
2045        process(tool, &ev, NULL, NULL);
2046
2047        /*
2048         * The put function will copy all the tracing data
2049         * stored in temp file to the pipe.
2050         */
2051        tracing_data_put(tdata);
2052
2053        ff = (struct feat_fd){ .fd = fd };
2054        if (write_padded(&ff, NULL, 0, padding))
2055                return -1;
2056
2057        return aligned_size;
2058}
2059
2060int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2061                                    perf_event__handler_t process, struct machine *machine)
2062{
2063        union perf_event ev;
2064        size_t len;
2065
2066        if (!pos->hit)
2067                return 0;
2068
2069        memset(&ev, 0, sizeof(ev));
2070
2071        len = pos->long_name_len + 1;
2072        len = PERF_ALIGN(len, NAME_ALIGN);
2073        memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
2074        ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2075        ev.build_id.header.misc = misc;
2076        ev.build_id.pid = machine->pid;
2077        ev.build_id.header.size = sizeof(ev.build_id) + len;
2078        memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2079
2080        return process(tool, &ev, NULL, machine);
2081}
2082
2083int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2084                                       struct evlist *evlist, perf_event__handler_t process, bool attrs)
2085{
2086        int err;
2087
2088        if (attrs) {
2089                err = perf_event__synthesize_attrs(tool, evlist, process);
2090                if (err < 0) {
2091                        pr_err("Couldn't synthesize attrs.\n");
2092                        return err;
2093                }
2094        }
2095
2096        err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2097        err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2098        if (err < 0) {
2099                pr_err("Couldn't synthesize thread map.\n");
2100                return err;
2101        }
2102
2103        err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
2104        if (err < 0) {
2105                pr_err("Couldn't synthesize thread map.\n");
2106                return err;
2107        }
2108
2109        err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2110        if (err < 0) {
2111                pr_err("Couldn't synthesize config.\n");
2112                return err;
2113        }
2114
2115        return 0;
2116}
2117
2118extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2119
2120int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2121                                    struct evlist *evlist, perf_event__handler_t process)
2122{
2123        struct perf_header *header = &session->header;
2124        struct perf_record_header_feature *fe;
2125        struct feat_fd ff;
2126        size_t sz, sz_hdr;
2127        int feat, ret;
2128
2129        sz_hdr = sizeof(fe->header);
2130        sz = sizeof(union perf_event);
2131        /* get a nice alignment */
2132        sz = PERF_ALIGN(sz, page_size);
2133
2134        memset(&ff, 0, sizeof(ff));
2135
2136        ff.buf = malloc(sz);
2137        if (!ff.buf)
2138                return -ENOMEM;
2139
2140        ff.size = sz - sz_hdr;
2141        ff.ph = &session->header;
2142
2143        for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2144                if (!feat_ops[feat].synthesize) {
2145                        pr_debug("No record header feature for header :%d\n", feat);
2146                        continue;
2147                }
2148
2149                ff.offset = sizeof(*fe);
2150
2151                ret = feat_ops[feat].write(&ff, evlist);
2152                if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2153                        pr_debug("Error writing feature\n");
2154                        continue;
2155                }
2156                /* ff.buf may have changed due to realloc in do_write() */
2157                fe = ff.buf;
2158                memset(fe, 0, sizeof(*fe));
2159
2160                fe->feat_id = feat;
2161                fe->header.type = PERF_RECORD_HEADER_FEATURE;
2162                fe->header.size = ff.offset;
2163
2164                ret = process(tool, ff.buf, NULL, NULL);
2165                if (ret) {
2166                        free(ff.buf);
2167                        return ret;
2168                }
2169        }
2170
2171        /* Send HEADER_LAST_FEATURE mark. */
2172        fe = ff.buf;
2173        fe->feat_id     = HEADER_LAST_FEATURE;
2174        fe->header.type = PERF_RECORD_HEADER_FEATURE;
2175        fe->header.size = sizeof(*fe);
2176
2177        ret = process(tool, ff.buf, NULL, NULL);
2178
2179        free(ff.buf);
2180        return ret;
2181}
2182