linux/tools/perf/util/synthetic-events.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only 
   2
   3#include "util/debug.h"
   4#include "util/dso.h"
   5#include "util/event.h"
   6#include "util/evlist.h"
   7#include "util/machine.h"
   8#include "util/map.h"
   9#include "util/map_symbol.h"
  10#include "util/branch.h"
  11#include "util/memswap.h"
  12#include "util/namespaces.h"
  13#include "util/session.h"
  14#include "util/stat.h"
  15#include "util/symbol.h"
  16#include "util/synthetic-events.h"
  17#include "util/target.h"
  18#include "util/time-utils.h"
  19#include <linux/bitops.h>
  20#include <linux/kernel.h>
  21#include <linux/string.h>
  22#include <linux/zalloc.h>
  23#include <linux/perf_event.h>
  24#include <asm/bug.h>
  25#include <perf/evsel.h>
  26#include <internal/cpumap.h>
  27#include <perf/cpumap.h>
  28#include <internal/lib.h> // page_size
  29#include <internal/threadmap.h>
  30#include <perf/threadmap.h>
  31#include <symbol/kallsyms.h>
  32#include <dirent.h>
  33#include <errno.h>
  34#include <inttypes.h>
  35#include <stdio.h>
  36#include <string.h>
  37#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  38#include <api/fs/fs.h>
  39#include <sys/types.h>
  40#include <sys/stat.h>
  41#include <fcntl.h>
  42#include <unistd.h>
  43
  44#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
  45
  46unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
  47
  48int perf_tool__process_synth_event(struct perf_tool *tool,
  49                                   union perf_event *event,
  50                                   struct machine *machine,
  51                                   perf_event__handler_t process)
  52{
  53        struct perf_sample synth_sample = {
  54                .pid       = -1,
  55                .tid       = -1,
  56                .time      = -1,
  57                .stream_id = -1,
  58                .cpu       = -1,
  59                .period    = 1,
  60                .cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
  61        };
  62
  63        return process(tool, event, &synth_sample, machine);
  64};
  65
  66/*
  67 * Assumes that the first 4095 bytes of /proc/pid/stat contains
  68 * the comm, tgid and ppid.
  69 */
  70static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
  71                                    pid_t *tgid, pid_t *ppid)
  72{
  73        char filename[PATH_MAX];
  74        char bf[4096];
  75        int fd;
  76        size_t size = 0;
  77        ssize_t n;
  78        char *name, *tgids, *ppids;
  79
  80        *tgid = -1;
  81        *ppid = -1;
  82
  83        snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
  84
  85        fd = open(filename, O_RDONLY);
  86        if (fd < 0) {
  87                pr_debug("couldn't open %s\n", filename);
  88                return -1;
  89        }
  90
  91        n = read(fd, bf, sizeof(bf) - 1);
  92        close(fd);
  93        if (n <= 0) {
  94                pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
  95                           pid);
  96                return -1;
  97        }
  98        bf[n] = '\0';
  99
 100        name = strstr(bf, "Name:");
 101        tgids = strstr(bf, "Tgid:");
 102        ppids = strstr(bf, "PPid:");
 103
 104        if (name) {
 105                char *nl;
 106
 107                name = skip_spaces(name + 5);  /* strlen("Name:") */
 108                nl = strchr(name, '\n');
 109                if (nl)
 110                        *nl = '\0';
 111
 112                size = strlen(name);
 113                if (size >= len)
 114                        size = len - 1;
 115                memcpy(comm, name, size);
 116                comm[size] = '\0';
 117        } else {
 118                pr_debug("Name: string not found for pid %d\n", pid);
 119        }
 120
 121        if (tgids) {
 122                tgids += 5;  /* strlen("Tgid:") */
 123                *tgid = atoi(tgids);
 124        } else {
 125                pr_debug("Tgid: string not found for pid %d\n", pid);
 126        }
 127
 128        if (ppids) {
 129                ppids += 5;  /* strlen("PPid:") */
 130                *ppid = atoi(ppids);
 131        } else {
 132                pr_debug("PPid: string not found for pid %d\n", pid);
 133        }
 134
 135        return 0;
 136}
 137
 138static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
 139                                    struct machine *machine,
 140                                    pid_t *tgid, pid_t *ppid)
 141{
 142        size_t size;
 143
 144        *ppid = -1;
 145
 146        memset(&event->comm, 0, sizeof(event->comm));
 147
 148        if (machine__is_host(machine)) {
 149                if (perf_event__get_comm_ids(pid, event->comm.comm,
 150                                             sizeof(event->comm.comm),
 151                                             tgid, ppid) != 0) {
 152                        return -1;
 153                }
 154        } else {
 155                *tgid = machine->pid;
 156        }
 157
 158        if (*tgid < 0)
 159                return -1;
 160
 161        event->comm.pid = *tgid;
 162        event->comm.header.type = PERF_RECORD_COMM;
 163
 164        size = strlen(event->comm.comm) + 1;
 165        size = PERF_ALIGN(size, sizeof(u64));
 166        memset(event->comm.comm + size, 0, machine->id_hdr_size);
 167        event->comm.header.size = (sizeof(event->comm) -
 168                                (sizeof(event->comm.comm) - size) +
 169                                machine->id_hdr_size);
 170        event->comm.tid = pid;
 171
 172        return 0;
 173}
 174
 175pid_t perf_event__synthesize_comm(struct perf_tool *tool,
 176                                         union perf_event *event, pid_t pid,
 177                                         perf_event__handler_t process,
 178                                         struct machine *machine)
 179{
 180        pid_t tgid, ppid;
 181
 182        if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
 183                return -1;
 184
 185        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 186                return -1;
 187
 188        return tgid;
 189}
 190
 191static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
 192                                         struct perf_ns_link_info *ns_link_info)
 193{
 194        struct stat64 st;
 195        char proc_ns[128];
 196
 197        sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
 198        if (stat64(proc_ns, &st) == 0) {
 199                ns_link_info->dev = st.st_dev;
 200                ns_link_info->ino = st.st_ino;
 201        }
 202}
 203
 204int perf_event__synthesize_namespaces(struct perf_tool *tool,
 205                                      union perf_event *event,
 206                                      pid_t pid, pid_t tgid,
 207                                      perf_event__handler_t process,
 208                                      struct machine *machine)
 209{
 210        u32 idx;
 211        struct perf_ns_link_info *ns_link_info;
 212
 213        if (!tool || !tool->namespace_events)
 214                return 0;
 215
 216        memset(&event->namespaces, 0, (sizeof(event->namespaces) +
 217               (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 218               machine->id_hdr_size));
 219
 220        event->namespaces.pid = tgid;
 221        event->namespaces.tid = pid;
 222
 223        event->namespaces.nr_namespaces = NR_NAMESPACES;
 224
 225        ns_link_info = event->namespaces.link_info;
 226
 227        for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
 228                perf_event__get_ns_link_info(pid, perf_ns__name(idx),
 229                                             &ns_link_info[idx]);
 230
 231        event->namespaces.header.type = PERF_RECORD_NAMESPACES;
 232
 233        event->namespaces.header.size = (sizeof(event->namespaces) +
 234                        (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 235                        machine->id_hdr_size);
 236
 237        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 238                return -1;
 239
 240        return 0;
 241}
 242
 243static int perf_event__synthesize_fork(struct perf_tool *tool,
 244                                       union perf_event *event,
 245                                       pid_t pid, pid_t tgid, pid_t ppid,
 246                                       perf_event__handler_t process,
 247                                       struct machine *machine)
 248{
 249        memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
 250
 251        /*
 252         * for main thread set parent to ppid from status file. For other
 253         * threads set parent pid to main thread. ie., assume main thread
 254         * spawns all threads in a process
 255        */
 256        if (tgid == pid) {
 257                event->fork.ppid = ppid;
 258                event->fork.ptid = ppid;
 259        } else {
 260                event->fork.ppid = tgid;
 261                event->fork.ptid = tgid;
 262        }
 263        event->fork.pid  = tgid;
 264        event->fork.tid  = pid;
 265        event->fork.header.type = PERF_RECORD_FORK;
 266        event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
 267
 268        event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 269
 270        if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
 271                return -1;
 272
 273        return 0;
 274}
 275
 276int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 277                                       union perf_event *event,
 278                                       pid_t pid, pid_t tgid,
 279                                       perf_event__handler_t process,
 280                                       struct machine *machine,
 281                                       bool mmap_data)
 282{
 283        char filename[PATH_MAX];
 284        FILE *fp;
 285        unsigned long long t;
 286        bool truncation = false;
 287        unsigned long long timeout = proc_map_timeout * 1000000ULL;
 288        int rc = 0;
 289        const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
 290        int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
 291
 292        if (machine__is_default_guest(machine))
 293                return 0;
 294
 295        snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
 296                 machine->root_dir, pid, pid);
 297
 298        fp = fopen(filename, "r");
 299        if (fp == NULL) {
 300                /*
 301                 * We raced with a task exiting - just return:
 302                 */
 303                pr_debug("couldn't open %s\n", filename);
 304                return -1;
 305        }
 306
 307        event->header.type = PERF_RECORD_MMAP2;
 308        t = rdclock();
 309
 310        while (1) {
 311                char bf[BUFSIZ];
 312                char prot[5];
 313                char execname[PATH_MAX];
 314                char anonstr[] = "//anon";
 315                unsigned int ino;
 316                size_t size;
 317                ssize_t n;
 318
 319                if (fgets(bf, sizeof(bf), fp) == NULL)
 320                        break;
 321
 322                if ((rdclock() - t) > timeout) {
 323                        pr_warning("Reading %s time out. "
 324                                   "You may want to increase "
 325                                   "the time limit by --proc-map-timeout\n",
 326                                   filename);
 327                        truncation = true;
 328                        goto out;
 329                }
 330
 331                /* ensure null termination since stack will be reused. */
 332                strcpy(execname, "");
 333
 334                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
 335                n = sscanf(bf, "%"PRI_lx64"-%"PRI_lx64" %s %"PRI_lx64" %x:%x %u %[^\n]\n",
 336                       &event->mmap2.start, &event->mmap2.len, prot,
 337                       &event->mmap2.pgoff, &event->mmap2.maj,
 338                       &event->mmap2.min,
 339                       &ino, execname);
 340
 341                /*
 342                 * Anon maps don't have the execname.
 343                 */
 344                if (n < 7)
 345                        continue;
 346
 347                event->mmap2.ino = (u64)ino;
 348
 349                /*
 350                 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
 351                 */
 352                if (machine__is_host(machine))
 353                        event->header.misc = PERF_RECORD_MISC_USER;
 354                else
 355                        event->header.misc = PERF_RECORD_MISC_GUEST_USER;
 356
 357                /* map protection and flags bits */
 358                event->mmap2.prot = 0;
 359                event->mmap2.flags = 0;
 360                if (prot[0] == 'r')
 361                        event->mmap2.prot |= PROT_READ;
 362                if (prot[1] == 'w')
 363                        event->mmap2.prot |= PROT_WRITE;
 364                if (prot[2] == 'x')
 365                        event->mmap2.prot |= PROT_EXEC;
 366
 367                if (prot[3] == 's')
 368                        event->mmap2.flags |= MAP_SHARED;
 369                else
 370                        event->mmap2.flags |= MAP_PRIVATE;
 371
 372                if (prot[2] != 'x') {
 373                        if (!mmap_data || prot[0] != 'r')
 374                                continue;
 375
 376                        event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
 377                }
 378
 379out:
 380                if (truncation)
 381                        event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
 382
 383                if (!strcmp(execname, ""))
 384                        strcpy(execname, anonstr);
 385
 386                if (hugetlbfs_mnt_len &&
 387                    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
 388                        strcpy(execname, anonstr);
 389                        event->mmap2.flags |= MAP_HUGETLB;
 390                }
 391
 392                size = strlen(execname) + 1;
 393                memcpy(event->mmap2.filename, execname, size);
 394                size = PERF_ALIGN(size, sizeof(u64));
 395                event->mmap2.len -= event->mmap.start;
 396                event->mmap2.header.size = (sizeof(event->mmap2) -
 397                                        (sizeof(event->mmap2.filename) - size));
 398                memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
 399                event->mmap2.header.size += machine->id_hdr_size;
 400                event->mmap2.pid = tgid;
 401                event->mmap2.tid = pid;
 402
 403                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 404                        rc = -1;
 405                        break;
 406                }
 407
 408                if (truncation)
 409                        break;
 410        }
 411
 412        fclose(fp);
 413        return rc;
 414}
 415
 416int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
 417                                   struct machine *machine)
 418{
 419        int rc = 0;
 420        struct map *pos;
 421        struct maps *maps = machine__kernel_maps(machine);
 422        union perf_event *event = zalloc((sizeof(event->mmap) +
 423                                          machine->id_hdr_size));
 424        if (event == NULL) {
 425                pr_debug("Not enough memory synthesizing mmap event "
 426                         "for kernel modules\n");
 427                return -1;
 428        }
 429
 430        event->header.type = PERF_RECORD_MMAP;
 431
 432        /*
 433         * kernel uses 0 for user space maps, see kernel/perf_event.c
 434         * __perf_event_mmap
 435         */
 436        if (machine__is_host(machine))
 437                event->header.misc = PERF_RECORD_MISC_KERNEL;
 438        else
 439                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 440
 441        for (pos = maps__first(maps); pos; pos = map__next(pos)) {
 442                size_t size;
 443
 444                if (!__map__is_kmodule(pos))
 445                        continue;
 446
 447                size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
 448                event->mmap.header.type = PERF_RECORD_MMAP;
 449                event->mmap.header.size = (sizeof(event->mmap) -
 450                                        (sizeof(event->mmap.filename) - size));
 451                memset(event->mmap.filename + size, 0, machine->id_hdr_size);
 452                event->mmap.header.size += machine->id_hdr_size;
 453                event->mmap.start = pos->start;
 454                event->mmap.len   = pos->end - pos->start;
 455                event->mmap.pid   = machine->pid;
 456
 457                memcpy(event->mmap.filename, pos->dso->long_name,
 458                       pos->dso->long_name_len + 1);
 459                if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
 460                        rc = -1;
 461                        break;
 462                }
 463        }
 464
 465        free(event);
 466        return rc;
 467}
 468
 469static int __event__synthesize_thread(union perf_event *comm_event,
 470                                      union perf_event *mmap_event,
 471                                      union perf_event *fork_event,
 472                                      union perf_event *namespaces_event,
 473                                      pid_t pid, int full, perf_event__handler_t process,
 474                                      struct perf_tool *tool, struct machine *machine, bool mmap_data)
 475{
 476        char filename[PATH_MAX];
 477        DIR *tasks;
 478        struct dirent *dirent;
 479        pid_t tgid, ppid;
 480        int rc = 0;
 481
 482        /* special case: only send one comm event using passed in pid */
 483        if (!full) {
 484                tgid = perf_event__synthesize_comm(tool, comm_event, pid,
 485                                                   process, machine);
 486
 487                if (tgid == -1)
 488                        return -1;
 489
 490                if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
 491                                                      tgid, process, machine) < 0)
 492                        return -1;
 493
 494                /*
 495                 * send mmap only for thread group leader
 496                 * see thread__init_map_groups
 497                 */
 498                if (pid == tgid &&
 499                    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 500                                                       process, machine, mmap_data))
 501                        return -1;
 502
 503                return 0;
 504        }
 505
 506        if (machine__is_default_guest(machine))
 507                return 0;
 508
 509        snprintf(filename, sizeof(filename), "%s/proc/%d/task",
 510                 machine->root_dir, pid);
 511
 512        tasks = opendir(filename);
 513        if (tasks == NULL) {
 514                pr_debug("couldn't open %s\n", filename);
 515                return 0;
 516        }
 517
 518        while ((dirent = readdir(tasks)) != NULL) {
 519                char *end;
 520                pid_t _pid;
 521
 522                _pid = strtol(dirent->d_name, &end, 10);
 523                if (*end)
 524                        continue;
 525
 526                rc = -1;
 527                if (perf_event__prepare_comm(comm_event, _pid, machine,
 528                                             &tgid, &ppid) != 0)
 529                        break;
 530
 531                if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
 532                                                ppid, process, machine) < 0)
 533                        break;
 534
 535                if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
 536                                                      tgid, process, machine) < 0)
 537                        break;
 538
 539                /*
 540                 * Send the prepared comm event
 541                 */
 542                if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
 543                        break;
 544
 545                rc = 0;
 546                if (_pid == pid) {
 547                        /* process the parent's maps too */
 548                        rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
 549                                                process, machine, mmap_data);
 550                        if (rc)
 551                                break;
 552                }
 553        }
 554
 555        closedir(tasks);
 556        return rc;
 557}
 558
 559int perf_event__synthesize_thread_map(struct perf_tool *tool,
 560                                      struct perf_thread_map *threads,
 561                                      perf_event__handler_t process,
 562                                      struct machine *machine,
 563                                      bool mmap_data)
 564{
 565        union perf_event *comm_event, *mmap_event, *fork_event;
 566        union perf_event *namespaces_event;
 567        int err = -1, thread, j;
 568
 569        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 570        if (comm_event == NULL)
 571                goto out;
 572
 573        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 574        if (mmap_event == NULL)
 575                goto out_free_comm;
 576
 577        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 578        if (fork_event == NULL)
 579                goto out_free_mmap;
 580
 581        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 582                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 583                                  machine->id_hdr_size);
 584        if (namespaces_event == NULL)
 585                goto out_free_fork;
 586
 587        err = 0;
 588        for (thread = 0; thread < threads->nr; ++thread) {
 589                if (__event__synthesize_thread(comm_event, mmap_event,
 590                                               fork_event, namespaces_event,
 591                                               perf_thread_map__pid(threads, thread), 0,
 592                                               process, tool, machine,
 593                                               mmap_data)) {
 594                        err = -1;
 595                        break;
 596                }
 597
 598                /*
 599                 * comm.pid is set to thread group id by
 600                 * perf_event__synthesize_comm
 601                 */
 602                if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
 603                        bool need_leader = true;
 604
 605                        /* is thread group leader in thread_map? */
 606                        for (j = 0; j < threads->nr; ++j) {
 607                                if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
 608                                        need_leader = false;
 609                                        break;
 610                                }
 611                        }
 612
 613                        /* if not, generate events for it */
 614                        if (need_leader &&
 615                            __event__synthesize_thread(comm_event, mmap_event,
 616                                                       fork_event, namespaces_event,
 617                                                       comm_event->comm.pid, 0,
 618                                                       process, tool, machine,
 619                                                       mmap_data)) {
 620                                err = -1;
 621                                break;
 622                        }
 623                }
 624        }
 625        free(namespaces_event);
 626out_free_fork:
 627        free(fork_event);
 628out_free_mmap:
 629        free(mmap_event);
 630out_free_comm:
 631        free(comm_event);
 632out:
 633        return err;
 634}
 635
 636static int __perf_event__synthesize_threads(struct perf_tool *tool,
 637                                            perf_event__handler_t process,
 638                                            struct machine *machine,
 639                                            bool mmap_data,
 640                                            struct dirent **dirent,
 641                                            int start,
 642                                            int num)
 643{
 644        union perf_event *comm_event, *mmap_event, *fork_event;
 645        union perf_event *namespaces_event;
 646        int err = -1;
 647        char *end;
 648        pid_t pid;
 649        int i;
 650
 651        comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
 652        if (comm_event == NULL)
 653                goto out;
 654
 655        mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
 656        if (mmap_event == NULL)
 657                goto out_free_comm;
 658
 659        fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
 660        if (fork_event == NULL)
 661                goto out_free_mmap;
 662
 663        namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
 664                                  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
 665                                  machine->id_hdr_size);
 666        if (namespaces_event == NULL)
 667                goto out_free_fork;
 668
 669        for (i = start; i < start + num; i++) {
 670                if (!isdigit(dirent[i]->d_name[0]))
 671                        continue;
 672
 673                pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
 674                /* only interested in proper numerical dirents */
 675                if (*end)
 676                        continue;
 677                /*
 678                 * We may race with exiting thread, so don't stop just because
 679                 * one thread couldn't be synthesized.
 680                 */
 681                __event__synthesize_thread(comm_event, mmap_event, fork_event,
 682                                           namespaces_event, pid, 1, process,
 683                                           tool, machine, mmap_data);
 684        }
 685        err = 0;
 686
 687        free(namespaces_event);
 688out_free_fork:
 689        free(fork_event);
 690out_free_mmap:
 691        free(mmap_event);
 692out_free_comm:
 693        free(comm_event);
 694out:
 695        return err;
 696}
 697
 698struct synthesize_threads_arg {
 699        struct perf_tool *tool;
 700        perf_event__handler_t process;
 701        struct machine *machine;
 702        bool mmap_data;
 703        struct dirent **dirent;
 704        int num;
 705        int start;
 706};
 707
 708static void *synthesize_threads_worker(void *arg)
 709{
 710        struct synthesize_threads_arg *args = arg;
 711
 712        __perf_event__synthesize_threads(args->tool, args->process,
 713                                         args->machine, args->mmap_data,
 714                                         args->dirent,
 715                                         args->start, args->num);
 716        return NULL;
 717}
 718
 719int perf_event__synthesize_threads(struct perf_tool *tool,
 720                                   perf_event__handler_t process,
 721                                   struct machine *machine,
 722                                   bool mmap_data,
 723                                   unsigned int nr_threads_synthesize)
 724{
 725        struct synthesize_threads_arg *args = NULL;
 726        pthread_t *synthesize_threads = NULL;
 727        char proc_path[PATH_MAX];
 728        struct dirent **dirent;
 729        int num_per_thread;
 730        int m, n, i, j;
 731        int thread_nr;
 732        int base = 0;
 733        int err = -1;
 734
 735
 736        if (machine__is_default_guest(machine))
 737                return 0;
 738
 739        snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
 740        n = scandir(proc_path, &dirent, 0, alphasort);
 741        if (n < 0)
 742                return err;
 743
 744        if (nr_threads_synthesize == UINT_MAX)
 745                thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
 746        else
 747                thread_nr = nr_threads_synthesize;
 748
 749        if (thread_nr <= 1) {
 750                err = __perf_event__synthesize_threads(tool, process,
 751                                                       machine, mmap_data,
 752                                                       dirent, base, n);
 753                goto free_dirent;
 754        }
 755        if (thread_nr > n)
 756                thread_nr = n;
 757
 758        synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
 759        if (synthesize_threads == NULL)
 760                goto free_dirent;
 761
 762        args = calloc(sizeof(*args), thread_nr);
 763        if (args == NULL)
 764                goto free_threads;
 765
 766        num_per_thread = n / thread_nr;
 767        m = n % thread_nr;
 768        for (i = 0; i < thread_nr; i++) {
 769                args[i].tool = tool;
 770                args[i].process = process;
 771                args[i].machine = machine;
 772                args[i].mmap_data = mmap_data;
 773                args[i].dirent = dirent;
 774        }
 775        for (i = 0; i < m; i++) {
 776                args[i].num = num_per_thread + 1;
 777                args[i].start = i * args[i].num;
 778        }
 779        if (i != 0)
 780                base = args[i-1].start + args[i-1].num;
 781        for (j = i; j < thread_nr; j++) {
 782                args[j].num = num_per_thread;
 783                args[j].start = base + (j - i) * args[i].num;
 784        }
 785
 786        for (i = 0; i < thread_nr; i++) {
 787                if (pthread_create(&synthesize_threads[i], NULL,
 788                                   synthesize_threads_worker, &args[i]))
 789                        goto out_join;
 790        }
 791        err = 0;
 792out_join:
 793        for (i = 0; i < thread_nr; i++)
 794                pthread_join(synthesize_threads[i], NULL);
 795        free(args);
 796free_threads:
 797        free(synthesize_threads);
 798free_dirent:
 799        for (i = 0; i < n; i++)
 800                zfree(&dirent[i]);
 801        free(dirent);
 802
 803        return err;
 804}
 805
 806int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
 807                                              perf_event__handler_t process __maybe_unused,
 808                                              struct machine *machine __maybe_unused)
 809{
 810        return 0;
 811}
 812
 813static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 814                                                perf_event__handler_t process,
 815                                                struct machine *machine)
 816{
 817        size_t size;
 818        struct map *map = machine__kernel_map(machine);
 819        struct kmap *kmap;
 820        int err;
 821        union perf_event *event;
 822
 823        if (map == NULL)
 824                return -1;
 825
 826        kmap = map__kmap(map);
 827        if (!kmap->ref_reloc_sym)
 828                return -1;
 829
 830        /*
 831         * We should get this from /sys/kernel/sections/.text, but till that is
 832         * available use this, and after it is use this as a fallback for older
 833         * kernels.
 834         */
 835        event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
 836        if (event == NULL) {
 837                pr_debug("Not enough memory synthesizing mmap event "
 838                         "for kernel modules\n");
 839                return -1;
 840        }
 841
 842        if (machine__is_host(machine)) {
 843                /*
 844                 * kernel uses PERF_RECORD_MISC_USER for user space maps,
 845                 * see kernel/perf_event.c __perf_event_mmap
 846                 */
 847                event->header.misc = PERF_RECORD_MISC_KERNEL;
 848        } else {
 849                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 850        }
 851
 852        size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
 853                        "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
 854        size = PERF_ALIGN(size, sizeof(u64));
 855        event->mmap.header.type = PERF_RECORD_MMAP;
 856        event->mmap.header.size = (sizeof(event->mmap) -
 857                        (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
 858        event->mmap.pgoff = kmap->ref_reloc_sym->addr;
 859        event->mmap.start = map->start;
 860        event->mmap.len   = map->end - event->mmap.start;
 861        event->mmap.pid   = machine->pid;
 862
 863        err = perf_tool__process_synth_event(tool, event, machine, process);
 864        free(event);
 865
 866        return err;
 867}
 868
 869int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 870                                       perf_event__handler_t process,
 871                                       struct machine *machine)
 872{
 873        int err;
 874
 875        err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
 876        if (err < 0)
 877                return err;
 878
 879        return perf_event__synthesize_extra_kmaps(tool, process, machine);
 880}
 881
 882int perf_event__synthesize_thread_map2(struct perf_tool *tool,
 883                                      struct perf_thread_map *threads,
 884                                      perf_event__handler_t process,
 885                                      struct machine *machine)
 886{
 887        union perf_event *event;
 888        int i, err, size;
 889
 890        size  = sizeof(event->thread_map);
 891        size += threads->nr * sizeof(event->thread_map.entries[0]);
 892
 893        event = zalloc(size);
 894        if (!event)
 895                return -ENOMEM;
 896
 897        event->header.type = PERF_RECORD_THREAD_MAP;
 898        event->header.size = size;
 899        event->thread_map.nr = threads->nr;
 900
 901        for (i = 0; i < threads->nr; i++) {
 902                struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
 903                char *comm = perf_thread_map__comm(threads, i);
 904
 905                if (!comm)
 906                        comm = (char *) "";
 907
 908                entry->pid = perf_thread_map__pid(threads, i);
 909                strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
 910        }
 911
 912        err = process(tool, event, NULL, machine);
 913
 914        free(event);
 915        return err;
 916}
 917
 918static void synthesize_cpus(struct cpu_map_entries *cpus,
 919                            struct perf_cpu_map *map)
 920{
 921        int i;
 922
 923        cpus->nr = map->nr;
 924
 925        for (i = 0; i < map->nr; i++)
 926                cpus->cpu[i] = map->map[i];
 927}
 928
 929static void synthesize_mask(struct perf_record_record_cpu_map *mask,
 930                            struct perf_cpu_map *map, int max)
 931{
 932        int i;
 933
 934        mask->nr = BITS_TO_LONGS(max);
 935        mask->long_size = sizeof(long);
 936
 937        for (i = 0; i < map->nr; i++)
 938                set_bit(map->map[i], mask->mask);
 939}
 940
 941static size_t cpus_size(struct perf_cpu_map *map)
 942{
 943        return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
 944}
 945
 946static size_t mask_size(struct perf_cpu_map *map, int *max)
 947{
 948        int i;
 949
 950        *max = 0;
 951
 952        for (i = 0; i < map->nr; i++) {
 953                /* bit possition of the cpu is + 1 */
 954                int bit = map->map[i] + 1;
 955
 956                if (bit > *max)
 957                        *max = bit;
 958        }
 959
 960        return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
 961}
 962
 963void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
 964{
 965        size_t size_cpus, size_mask;
 966        bool is_dummy = perf_cpu_map__empty(map);
 967
 968        /*
 969         * Both array and mask data have variable size based
 970         * on the number of cpus and their actual values.
 971         * The size of the 'struct perf_record_cpu_map_data' is:
 972         *
 973         *   array = size of 'struct cpu_map_entries' +
 974         *           number of cpus * sizeof(u64)
 975         *
 976         *   mask  = size of 'struct perf_record_record_cpu_map' +
 977         *           maximum cpu bit converted to size of longs
 978         *
 979         * and finaly + the size of 'struct perf_record_cpu_map_data'.
 980         */
 981        size_cpus = cpus_size(map);
 982        size_mask = mask_size(map, max);
 983
 984        if (is_dummy || (size_cpus < size_mask)) {
 985                *size += size_cpus;
 986                *type  = PERF_CPU_MAP__CPUS;
 987        } else {
 988                *size += size_mask;
 989                *type  = PERF_CPU_MAP__MASK;
 990        }
 991
 992        *size += sizeof(struct perf_record_cpu_map_data);
 993        *size = PERF_ALIGN(*size, sizeof(u64));
 994        return zalloc(*size);
 995}
 996
 997void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
 998                              u16 type, int max)
 999{
1000        data->type = type;
1001
1002        switch (type) {
1003        case PERF_CPU_MAP__CPUS:
1004                synthesize_cpus((struct cpu_map_entries *) data->data, map);
1005                break;
1006        case PERF_CPU_MAP__MASK:
1007                synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1008        default:
1009                break;
1010        };
1011}
1012
1013static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1014{
1015        size_t size = sizeof(struct perf_record_cpu_map);
1016        struct perf_record_cpu_map *event;
1017        int max;
1018        u16 type;
1019
1020        event = cpu_map_data__alloc(map, &size, &type, &max);
1021        if (!event)
1022                return NULL;
1023
1024        event->header.type = PERF_RECORD_CPU_MAP;
1025        event->header.size = size;
1026        event->data.type   = type;
1027
1028        cpu_map_data__synthesize(&event->data, map, type, max);
1029        return event;
1030}
1031
1032int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1033                                   struct perf_cpu_map *map,
1034                                   perf_event__handler_t process,
1035                                   struct machine *machine)
1036{
1037        struct perf_record_cpu_map *event;
1038        int err;
1039
1040        event = cpu_map_event__new(map);
1041        if (!event)
1042                return -ENOMEM;
1043
1044        err = process(tool, (union perf_event *) event, NULL, machine);
1045
1046        free(event);
1047        return err;
1048}
1049
1050int perf_event__synthesize_stat_config(struct perf_tool *tool,
1051                                       struct perf_stat_config *config,
1052                                       perf_event__handler_t process,
1053                                       struct machine *machine)
1054{
1055        struct perf_record_stat_config *event;
1056        int size, i = 0, err;
1057
1058        size  = sizeof(*event);
1059        size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1060
1061        event = zalloc(size);
1062        if (!event)
1063                return -ENOMEM;
1064
1065        event->header.type = PERF_RECORD_STAT_CONFIG;
1066        event->header.size = size;
1067        event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1068
1069#define ADD(__term, __val)                                      \
1070        event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;   \
1071        event->data[i].val = __val;                             \
1072        i++;
1073
1074        ADD(AGGR_MODE,  config->aggr_mode)
1075        ADD(INTERVAL,   config->interval)
1076        ADD(SCALE,      config->scale)
1077
1078        WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1079                  "stat config terms unbalanced\n");
1080#undef ADD
1081
1082        err = process(tool, (union perf_event *) event, NULL, machine);
1083
1084        free(event);
1085        return err;
1086}
1087
1088int perf_event__synthesize_stat(struct perf_tool *tool,
1089                                u32 cpu, u32 thread, u64 id,
1090                                struct perf_counts_values *count,
1091                                perf_event__handler_t process,
1092                                struct machine *machine)
1093{
1094        struct perf_record_stat event;
1095
1096        event.header.type = PERF_RECORD_STAT;
1097        event.header.size = sizeof(event);
1098        event.header.misc = 0;
1099
1100        event.id        = id;
1101        event.cpu       = cpu;
1102        event.thread    = thread;
1103        event.val       = count->val;
1104        event.ena       = count->ena;
1105        event.run       = count->run;
1106
1107        return process(tool, (union perf_event *) &event, NULL, machine);
1108}
1109
1110int perf_event__synthesize_stat_round(struct perf_tool *tool,
1111                                      u64 evtime, u64 type,
1112                                      perf_event__handler_t process,
1113                                      struct machine *machine)
1114{
1115        struct perf_record_stat_round event;
1116
1117        event.header.type = PERF_RECORD_STAT_ROUND;
1118        event.header.size = sizeof(event);
1119        event.header.misc = 0;
1120
1121        event.time = evtime;
1122        event.type = type;
1123
1124        return process(tool, (union perf_event *) &event, NULL, machine);
1125}
1126
1127size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1128{
1129        size_t sz, result = sizeof(struct perf_record_sample);
1130
1131        if (type & PERF_SAMPLE_IDENTIFIER)
1132                result += sizeof(u64);
1133
1134        if (type & PERF_SAMPLE_IP)
1135                result += sizeof(u64);
1136
1137        if (type & PERF_SAMPLE_TID)
1138                result += sizeof(u64);
1139
1140        if (type & PERF_SAMPLE_TIME)
1141                result += sizeof(u64);
1142
1143        if (type & PERF_SAMPLE_ADDR)
1144                result += sizeof(u64);
1145
1146        if (type & PERF_SAMPLE_ID)
1147                result += sizeof(u64);
1148
1149        if (type & PERF_SAMPLE_STREAM_ID)
1150                result += sizeof(u64);
1151
1152        if (type & PERF_SAMPLE_CPU)
1153                result += sizeof(u64);
1154
1155        if (type & PERF_SAMPLE_PERIOD)
1156                result += sizeof(u64);
1157
1158        if (type & PERF_SAMPLE_READ) {
1159                result += sizeof(u64);
1160                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1161                        result += sizeof(u64);
1162                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1163                        result += sizeof(u64);
1164                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1165                if (read_format & PERF_FORMAT_GROUP) {
1166                        sz = sample->read.group.nr *
1167                             sizeof(struct sample_read_value);
1168                        result += sz;
1169                } else {
1170                        result += sizeof(u64);
1171                }
1172        }
1173
1174        if (type & PERF_SAMPLE_CALLCHAIN) {
1175                sz = (sample->callchain->nr + 1) * sizeof(u64);
1176                result += sz;
1177        }
1178
1179        if (type & PERF_SAMPLE_RAW) {
1180                result += sizeof(u32);
1181                result += sample->raw_size;
1182        }
1183
1184        if (type & PERF_SAMPLE_BRANCH_STACK) {
1185                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1186                sz += sizeof(u64);
1187                result += sz;
1188        }
1189
1190        if (type & PERF_SAMPLE_REGS_USER) {
1191                if (sample->user_regs.abi) {
1192                        result += sizeof(u64);
1193                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1194                        result += sz;
1195                } else {
1196                        result += sizeof(u64);
1197                }
1198        }
1199
1200        if (type & PERF_SAMPLE_STACK_USER) {
1201                sz = sample->user_stack.size;
1202                result += sizeof(u64);
1203                if (sz) {
1204                        result += sz;
1205                        result += sizeof(u64);
1206                }
1207        }
1208
1209        if (type & PERF_SAMPLE_WEIGHT)
1210                result += sizeof(u64);
1211
1212        if (type & PERF_SAMPLE_DATA_SRC)
1213                result += sizeof(u64);
1214
1215        if (type & PERF_SAMPLE_TRANSACTION)
1216                result += sizeof(u64);
1217
1218        if (type & PERF_SAMPLE_REGS_INTR) {
1219                if (sample->intr_regs.abi) {
1220                        result += sizeof(u64);
1221                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1222                        result += sz;
1223                } else {
1224                        result += sizeof(u64);
1225                }
1226        }
1227
1228        if (type & PERF_SAMPLE_PHYS_ADDR)
1229                result += sizeof(u64);
1230
1231        return result;
1232}
1233
1234int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1235                                  const struct perf_sample *sample)
1236{
1237        __u64 *array;
1238        size_t sz;
1239        /*
1240         * used for cross-endian analysis. See git commit 65014ab3
1241         * for why this goofiness is needed.
1242         */
1243        union u64_swap u;
1244
1245        array = event->sample.array;
1246
1247        if (type & PERF_SAMPLE_IDENTIFIER) {
1248                *array = sample->id;
1249                array++;
1250        }
1251
1252        if (type & PERF_SAMPLE_IP) {
1253                *array = sample->ip;
1254                array++;
1255        }
1256
1257        if (type & PERF_SAMPLE_TID) {
1258                u.val32[0] = sample->pid;
1259                u.val32[1] = sample->tid;
1260                *array = u.val64;
1261                array++;
1262        }
1263
1264        if (type & PERF_SAMPLE_TIME) {
1265                *array = sample->time;
1266                array++;
1267        }
1268
1269        if (type & PERF_SAMPLE_ADDR) {
1270                *array = sample->addr;
1271                array++;
1272        }
1273
1274        if (type & PERF_SAMPLE_ID) {
1275                *array = sample->id;
1276                array++;
1277        }
1278
1279        if (type & PERF_SAMPLE_STREAM_ID) {
1280                *array = sample->stream_id;
1281                array++;
1282        }
1283
1284        if (type & PERF_SAMPLE_CPU) {
1285                u.val32[0] = sample->cpu;
1286                u.val32[1] = 0;
1287                *array = u.val64;
1288                array++;
1289        }
1290
1291        if (type & PERF_SAMPLE_PERIOD) {
1292                *array = sample->period;
1293                array++;
1294        }
1295
1296        if (type & PERF_SAMPLE_READ) {
1297                if (read_format & PERF_FORMAT_GROUP)
1298                        *array = sample->read.group.nr;
1299                else
1300                        *array = sample->read.one.value;
1301                array++;
1302
1303                if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1304                        *array = sample->read.time_enabled;
1305                        array++;
1306                }
1307
1308                if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1309                        *array = sample->read.time_running;
1310                        array++;
1311                }
1312
1313                /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1314                if (read_format & PERF_FORMAT_GROUP) {
1315                        sz = sample->read.group.nr *
1316                             sizeof(struct sample_read_value);
1317                        memcpy(array, sample->read.group.values, sz);
1318                        array = (void *)array + sz;
1319                } else {
1320                        *array = sample->read.one.id;
1321                        array++;
1322                }
1323        }
1324
1325        if (type & PERF_SAMPLE_CALLCHAIN) {
1326                sz = (sample->callchain->nr + 1) * sizeof(u64);
1327                memcpy(array, sample->callchain, sz);
1328                array = (void *)array + sz;
1329        }
1330
1331        if (type & PERF_SAMPLE_RAW) {
1332                u.val32[0] = sample->raw_size;
1333                *array = u.val64;
1334                array = (void *)array + sizeof(u32);
1335
1336                memcpy(array, sample->raw_data, sample->raw_size);
1337                array = (void *)array + sample->raw_size;
1338        }
1339
1340        if (type & PERF_SAMPLE_BRANCH_STACK) {
1341                sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1342                sz += sizeof(u64);
1343                memcpy(array, sample->branch_stack, sz);
1344                array = (void *)array + sz;
1345        }
1346
1347        if (type & PERF_SAMPLE_REGS_USER) {
1348                if (sample->user_regs.abi) {
1349                        *array++ = sample->user_regs.abi;
1350                        sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1351                        memcpy(array, sample->user_regs.regs, sz);
1352                        array = (void *)array + sz;
1353                } else {
1354                        *array++ = 0;
1355                }
1356        }
1357
1358        if (type & PERF_SAMPLE_STACK_USER) {
1359                sz = sample->user_stack.size;
1360                *array++ = sz;
1361                if (sz) {
1362                        memcpy(array, sample->user_stack.data, sz);
1363                        array = (void *)array + sz;
1364                        *array++ = sz;
1365                }
1366        }
1367
1368        if (type & PERF_SAMPLE_WEIGHT) {
1369                *array = sample->weight;
1370                array++;
1371        }
1372
1373        if (type & PERF_SAMPLE_DATA_SRC) {
1374                *array = sample->data_src;
1375                array++;
1376        }
1377
1378        if (type & PERF_SAMPLE_TRANSACTION) {
1379                *array = sample->transaction;
1380                array++;
1381        }
1382
1383        if (type & PERF_SAMPLE_REGS_INTR) {
1384                if (sample->intr_regs.abi) {
1385                        *array++ = sample->intr_regs.abi;
1386                        sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1387                        memcpy(array, sample->intr_regs.regs, sz);
1388                        array = (void *)array + sz;
1389                } else {
1390                        *array++ = 0;
1391                }
1392        }
1393
1394        if (type & PERF_SAMPLE_PHYS_ADDR) {
1395                *array = sample->phys_addr;
1396                array++;
1397        }
1398
1399        return 0;
1400}
1401
1402int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1403                                    struct evlist *evlist, struct machine *machine)
1404{
1405        union perf_event *ev;
1406        struct evsel *evsel;
1407        size_t nr = 0, i = 0, sz, max_nr, n;
1408        int err;
1409
1410        pr_debug2("Synthesizing id index\n");
1411
1412        max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1413                 sizeof(struct id_index_entry);
1414
1415        evlist__for_each_entry(evlist, evsel)
1416                nr += evsel->core.ids;
1417
1418        n = nr > max_nr ? max_nr : nr;
1419        sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1420        ev = zalloc(sz);
1421        if (!ev)
1422                return -ENOMEM;
1423
1424        ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1425        ev->id_index.header.size = sz;
1426        ev->id_index.nr = n;
1427
1428        evlist__for_each_entry(evlist, evsel) {
1429                u32 j;
1430
1431                for (j = 0; j < evsel->core.ids; j++) {
1432                        struct id_index_entry *e;
1433                        struct perf_sample_id *sid;
1434
1435                        if (i >= n) {
1436                                err = process(tool, ev, NULL, machine);
1437                                if (err)
1438                                        goto out_err;
1439                                nr -= n;
1440                                i = 0;
1441                        }
1442
1443                        e = &ev->id_index.entries[i++];
1444
1445                        e->id = evsel->core.id[j];
1446
1447                        sid = perf_evlist__id2sid(evlist, e->id);
1448                        if (!sid) {
1449                                free(ev);
1450                                return -ENOENT;
1451                        }
1452
1453                        e->idx = sid->idx;
1454                        e->cpu = sid->cpu;
1455                        e->tid = sid->tid;
1456                }
1457        }
1458
1459        sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1460        ev->id_index.header.size = sz;
1461        ev->id_index.nr = nr;
1462
1463        err = process(tool, ev, NULL, machine);
1464out_err:
1465        free(ev);
1466
1467        return err;
1468}
1469
1470int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1471                                  struct target *target, struct perf_thread_map *threads,
1472                                  perf_event__handler_t process, bool data_mmap,
1473                                  unsigned int nr_threads_synthesize)
1474{
1475        if (target__has_task(target))
1476                return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1477        else if (target__has_cpu(target))
1478                return perf_event__synthesize_threads(tool, process,
1479                                                      machine, data_mmap,
1480                                                      nr_threads_synthesize);
1481        /* command specified */
1482        return 0;
1483}
1484
1485int machine__synthesize_threads(struct machine *machine, struct target *target,
1486                                struct perf_thread_map *threads, bool data_mmap,
1487                                unsigned int nr_threads_synthesize)
1488{
1489        return __machine__synthesize_threads(machine, NULL, target, threads,
1490                                             perf_event__process, data_mmap,
1491                                             nr_threads_synthesize);
1492}
1493
1494static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1495{
1496        struct perf_record_event_update *ev;
1497
1498        size += sizeof(*ev);
1499        size  = PERF_ALIGN(size, sizeof(u64));
1500
1501        ev = zalloc(size);
1502        if (ev) {
1503                ev->header.type = PERF_RECORD_EVENT_UPDATE;
1504                ev->header.size = (u16)size;
1505                ev->type        = type;
1506                ev->id          = id;
1507        }
1508        return ev;
1509}
1510
1511int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1512                                             perf_event__handler_t process)
1513{
1514        size_t size = strlen(evsel->unit);
1515        struct perf_record_event_update *ev;
1516        int err;
1517
1518        ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1519        if (ev == NULL)
1520                return -ENOMEM;
1521
1522        strlcpy(ev->data, evsel->unit, size + 1);
1523        err = process(tool, (union perf_event *)ev, NULL, NULL);
1524        free(ev);
1525        return err;
1526}
1527
1528int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1529                                              perf_event__handler_t process)
1530{
1531        struct perf_record_event_update *ev;
1532        struct perf_record_event_update_scale *ev_data;
1533        int err;
1534
1535        ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1536        if (ev == NULL)
1537                return -ENOMEM;
1538
1539        ev_data = (struct perf_record_event_update_scale *)ev->data;
1540        ev_data->scale = evsel->scale;
1541        err = process(tool, (union perf_event *)ev, NULL, NULL);
1542        free(ev);
1543        return err;
1544}
1545
1546int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1547                                             perf_event__handler_t process)
1548{
1549        struct perf_record_event_update *ev;
1550        size_t len = strlen(evsel->name);
1551        int err;
1552
1553        ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1554        if (ev == NULL)
1555                return -ENOMEM;
1556
1557        strlcpy(ev->data, evsel->name, len + 1);
1558        err = process(tool, (union perf_event *)ev, NULL, NULL);
1559        free(ev);
1560        return err;
1561}
1562
1563int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1564                                             perf_event__handler_t process)
1565{
1566        size_t size = sizeof(struct perf_record_event_update);
1567        struct perf_record_event_update *ev;
1568        int max, err;
1569        u16 type;
1570
1571        if (!evsel->core.own_cpus)
1572                return 0;
1573
1574        ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1575        if (!ev)
1576                return -ENOMEM;
1577
1578        ev->header.type = PERF_RECORD_EVENT_UPDATE;
1579        ev->header.size = (u16)size;
1580        ev->type        = PERF_EVENT_UPDATE__CPUS;
1581        ev->id          = evsel->core.id[0];
1582
1583        cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1584                                 evsel->core.own_cpus, type, max);
1585
1586        err = process(tool, (union perf_event *)ev, NULL, NULL);
1587        free(ev);
1588        return err;
1589}
1590
1591int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1592                                 perf_event__handler_t process)
1593{
1594        struct evsel *evsel;
1595        int err = 0;
1596
1597        evlist__for_each_entry(evlist, evsel) {
1598                err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1599                                                  evsel->core.id, process);
1600                if (err) {
1601                        pr_debug("failed to create perf header attribute\n");
1602                        return err;
1603                }
1604        }
1605
1606        return err;
1607}
1608
1609static bool has_unit(struct evsel *evsel)
1610{
1611        return evsel->unit && *evsel->unit;
1612}
1613
1614static bool has_scale(struct evsel *evsel)
1615{
1616        return evsel->scale != 1;
1617}
1618
1619int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1620                                      perf_event__handler_t process, bool is_pipe)
1621{
1622        struct evsel *evsel;
1623        int err;
1624
1625        /*
1626         * Synthesize other events stuff not carried within
1627         * attr event - unit, scale, name
1628         */
1629        evlist__for_each_entry(evsel_list, evsel) {
1630                if (!evsel->supported)
1631                        continue;
1632
1633                /*
1634                 * Synthesize unit and scale only if it's defined.
1635                 */
1636                if (has_unit(evsel)) {
1637                        err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1638                        if (err < 0) {
1639                                pr_err("Couldn't synthesize evsel unit.\n");
1640                                return err;
1641                        }
1642                }
1643
1644                if (has_scale(evsel)) {
1645                        err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1646                        if (err < 0) {
1647                                pr_err("Couldn't synthesize evsel evsel.\n");
1648                                return err;
1649                        }
1650                }
1651
1652                if (evsel->core.own_cpus) {
1653                        err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1654                        if (err < 0) {
1655                                pr_err("Couldn't synthesize evsel cpus.\n");
1656                                return err;
1657                        }
1658                }
1659
1660                /*
1661                 * Name is needed only for pipe output,
1662                 * perf.data carries event names.
1663                 */
1664                if (is_pipe) {
1665                        err = perf_event__synthesize_event_update_name(tool, evsel, process);
1666                        if (err < 0) {
1667                                pr_err("Couldn't synthesize evsel name.\n");
1668                                return err;
1669                        }
1670                }
1671        }
1672        return 0;
1673}
1674
1675int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1676                                u32 ids, u64 *id, perf_event__handler_t process)
1677{
1678        union perf_event *ev;
1679        size_t size;
1680        int err;
1681
1682        size = sizeof(struct perf_event_attr);
1683        size = PERF_ALIGN(size, sizeof(u64));
1684        size += sizeof(struct perf_event_header);
1685        size += ids * sizeof(u64);
1686
1687        ev = zalloc(size);
1688
1689        if (ev == NULL)
1690                return -ENOMEM;
1691
1692        ev->attr.attr = *attr;
1693        memcpy(ev->attr.id, id, ids * sizeof(u64));
1694
1695        ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
1696        ev->attr.header.size = (u16)size;
1697
1698        if (ev->attr.header.size == size)
1699                err = process(tool, ev, NULL, NULL);
1700        else
1701                err = -E2BIG;
1702
1703        free(ev);
1704
1705        return err;
1706}
1707
1708int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
1709                                        perf_event__handler_t process)
1710{
1711        union perf_event ev;
1712        struct tracing_data *tdata;
1713        ssize_t size = 0, aligned_size = 0, padding;
1714        struct feat_fd ff;
1715
1716        /*
1717         * We are going to store the size of the data followed
1718         * by the data contents. Since the fd descriptor is a pipe,
1719         * we cannot seek back to store the size of the data once
1720         * we know it. Instead we:
1721         *
1722         * - write the tracing data to the temp file
1723         * - get/write the data size to pipe
1724         * - write the tracing data from the temp file
1725         *   to the pipe
1726         */
1727        tdata = tracing_data_get(&evlist->core.entries, fd, true);
1728        if (!tdata)
1729                return -1;
1730
1731        memset(&ev, 0, sizeof(ev));
1732
1733        ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1734        size = tdata->size;
1735        aligned_size = PERF_ALIGN(size, sizeof(u64));
1736        padding = aligned_size - size;
1737        ev.tracing_data.header.size = sizeof(ev.tracing_data);
1738        ev.tracing_data.size = aligned_size;
1739
1740        process(tool, &ev, NULL, NULL);
1741
1742        /*
1743         * The put function will copy all the tracing data
1744         * stored in temp file to the pipe.
1745         */
1746        tracing_data_put(tdata);
1747
1748        ff = (struct feat_fd){ .fd = fd };
1749        if (write_padded(&ff, NULL, 0, padding))
1750                return -1;
1751
1752        return aligned_size;
1753}
1754
1755int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
1756                                    perf_event__handler_t process, struct machine *machine)
1757{
1758        union perf_event ev;
1759        size_t len;
1760
1761        if (!pos->hit)
1762                return 0;
1763
1764        memset(&ev, 0, sizeof(ev));
1765
1766        len = pos->long_name_len + 1;
1767        len = PERF_ALIGN(len, NAME_ALIGN);
1768        memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1769        ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1770        ev.build_id.header.misc = misc;
1771        ev.build_id.pid = machine->pid;
1772        ev.build_id.header.size = sizeof(ev.build_id) + len;
1773        memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1774
1775        return process(tool, &ev, NULL, machine);
1776}
1777
1778int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
1779                                       struct evlist *evlist, perf_event__handler_t process, bool attrs)
1780{
1781        int err;
1782
1783        if (attrs) {
1784                err = perf_event__synthesize_attrs(tool, evlist, process);
1785                if (err < 0) {
1786                        pr_err("Couldn't synthesize attrs.\n");
1787                        return err;
1788                }
1789        }
1790
1791        err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
1792        err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
1793        if (err < 0) {
1794                pr_err("Couldn't synthesize thread map.\n");
1795                return err;
1796        }
1797
1798        err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
1799        if (err < 0) {
1800                pr_err("Couldn't synthesize thread map.\n");
1801                return err;
1802        }
1803
1804        err = perf_event__synthesize_stat_config(tool, config, process, NULL);
1805        if (err < 0) {
1806                pr_err("Couldn't synthesize config.\n");
1807                return err;
1808        }
1809
1810        return 0;
1811}
1812
1813int __weak perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
1814                                       struct perf_tool *tool __maybe_unused,
1815                                       perf_event__handler_t process __maybe_unused,
1816                                       struct machine *machine __maybe_unused)
1817{
1818        return 0;
1819}
1820
1821extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
1822
1823int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
1824                                    struct evlist *evlist, perf_event__handler_t process)
1825{
1826        struct perf_header *header = &session->header;
1827        struct perf_record_header_feature *fe;
1828        struct feat_fd ff;
1829        size_t sz, sz_hdr;
1830        int feat, ret;
1831
1832        sz_hdr = sizeof(fe->header);
1833        sz = sizeof(union perf_event);
1834        /* get a nice alignment */
1835        sz = PERF_ALIGN(sz, page_size);
1836
1837        memset(&ff, 0, sizeof(ff));
1838
1839        ff.buf = malloc(sz);
1840        if (!ff.buf)
1841                return -ENOMEM;
1842
1843        ff.size = sz - sz_hdr;
1844        ff.ph = &session->header;
1845
1846        for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
1847                if (!feat_ops[feat].synthesize) {
1848                        pr_debug("No record header feature for header :%d\n", feat);
1849                        continue;
1850                }
1851
1852                ff.offset = sizeof(*fe);
1853
1854                ret = feat_ops[feat].write(&ff, evlist);
1855                if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
1856                        pr_debug("Error writing feature\n");
1857                        continue;
1858                }
1859                /* ff.buf may have changed due to realloc in do_write() */
1860                fe = ff.buf;
1861                memset(fe, 0, sizeof(*fe));
1862
1863                fe->feat_id = feat;
1864                fe->header.type = PERF_RECORD_HEADER_FEATURE;
1865                fe->header.size = ff.offset;
1866
1867                ret = process(tool, ff.buf, NULL, NULL);
1868                if (ret) {
1869                        free(ff.buf);
1870                        return ret;
1871                }
1872        }
1873
1874        /* Send HEADER_LAST_FEATURE mark. */
1875        fe = ff.buf;
1876        fe->feat_id     = HEADER_LAST_FEATURE;
1877        fe->header.type = PERF_RECORD_HEADER_FEATURE;
1878        fe->header.size = sizeof(*fe);
1879
1880        ret = process(tool, ff.buf, NULL, NULL);
1881
1882        free(ff.buf);
1883        return ret;
1884}
1885