1
2#include <errno.h>
3#include <inttypes.h>
4#include "string2.h"
5#include <sys/param.h>
6#include <sys/types.h>
7#include <byteswap.h>
8#include <unistd.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <linux/compiler.h>
12#include <linux/list.h>
13#include <linux/kernel.h>
14#include <linux/bitops.h>
15#include <linux/string.h>
16#include <linux/stringify.h>
17#include <linux/zalloc.h>
18#include <sys/stat.h>
19#include <sys/utsname.h>
20#include <linux/time64.h>
21#include <dirent.h>
22#include <bpf/libbpf.h>
23#include <perf/cpumap.h>
24
25#include "dso.h"
26#include "evlist.h"
27#include "evsel.h"
28#include "util/evsel_fprintf.h"
29#include "header.h"
30#include "memswap.h"
31#include "trace-event.h"
32#include "session.h"
33#include "symbol.h"
34#include "debug.h"
35#include "cpumap.h"
36#include "pmu.h"
37#include "vdso.h"
38#include "strbuf.h"
39#include "build-id.h"
40#include "data.h"
41#include <api/fs/fs.h>
42#include "asm/bug.h"
43#include "tool.h"
44#include "time-utils.h"
45#include "units.h"
46#include "util/util.h"
47#include "cputopo.h"
48#include "bpf-event.h"
49#include "clockid.h"
50
51#include <linux/ctype.h>
52#include <internal/lib.h>
53
54
55
56
57
58
59
60
61
62
63static const char *__perf_magic1 = "PERFFILE";
64static const u64 __perf_magic2 = 0x32454c4946524550ULL;
65static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
66
67#define PERF_MAGIC __perf_magic2
68
69const char perf_version_string[] = PERF_VERSION;
70
71struct perf_file_attr {
72 struct perf_event_attr attr;
73 struct perf_file_section ids;
74};
75
76void perf_header__set_feat(struct perf_header *header, int feat)
77{
78 set_bit(feat, header->adds_features);
79}
80
81void perf_header__clear_feat(struct perf_header *header, int feat)
82{
83 clear_bit(feat, header->adds_features);
84}
85
86bool perf_header__has_feat(const struct perf_header *header, int feat)
87{
88 return test_bit(feat, header->adds_features);
89}
90
91static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
92{
93 ssize_t ret = writen(ff->fd, buf, size);
94
95 if (ret != (ssize_t)size)
96 return ret < 0 ? (int)ret : -1;
97 return 0;
98}
99
100static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
101{
102
103 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
104 size_t new_size = ff->size;
105 void *addr;
106
107 if (size + ff->offset > max_size)
108 return -E2BIG;
109
110 while (size > (new_size - ff->offset))
111 new_size <<= 1;
112 new_size = min(max_size, new_size);
113
114 if (ff->size < new_size) {
115 addr = realloc(ff->buf, new_size);
116 if (!addr)
117 return -ENOMEM;
118 ff->buf = addr;
119 ff->size = new_size;
120 }
121
122 memcpy(ff->buf + ff->offset, buf, size);
123 ff->offset += size;
124
125 return 0;
126}
127
128
129int do_write(struct feat_fd *ff, const void *buf, size_t size)
130{
131 if (!ff->buf)
132 return __do_write_fd(ff, buf, size);
133 return __do_write_buf(ff, buf, size);
134}
135
136
137static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
138{
139 u64 *p = (u64 *) set;
140 int i, ret;
141
142 ret = do_write(ff, &size, sizeof(size));
143 if (ret < 0)
144 return ret;
145
146 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
147 ret = do_write(ff, p + i, sizeof(*p));
148 if (ret < 0)
149 return ret;
150 }
151
152 return 0;
153}
154
155
156int write_padded(struct feat_fd *ff, const void *bf,
157 size_t count, size_t count_aligned)
158{
159 static const char zero_buf[NAME_ALIGN];
160 int err = do_write(ff, bf, count);
161
162 if (!err)
163 err = do_write(ff, zero_buf, count_aligned - count);
164
165 return err;
166}
167
168#define string_size(str) \
169 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
170
171
172static int do_write_string(struct feat_fd *ff, const char *str)
173{
174 u32 len, olen;
175 int ret;
176
177 olen = strlen(str) + 1;
178 len = PERF_ALIGN(olen, NAME_ALIGN);
179
180
181 ret = do_write(ff, &len, sizeof(len));
182 if (ret < 0)
183 return ret;
184
185 return write_padded(ff, str, olen, len);
186}
187
188static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
189{
190 ssize_t ret = readn(ff->fd, addr, size);
191
192 if (ret != size)
193 return ret < 0 ? (int)ret : -1;
194 return 0;
195}
196
197static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
198{
199 if (size > (ssize_t)ff->size - ff->offset)
200 return -1;
201
202 memcpy(addr, ff->buf + ff->offset, size);
203 ff->offset += size;
204
205 return 0;
206
207}
208
209static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
210{
211 if (!ff->buf)
212 return __do_read_fd(ff, addr, size);
213 return __do_read_buf(ff, addr, size);
214}
215
216static int do_read_u32(struct feat_fd *ff, u32 *addr)
217{
218 int ret;
219
220 ret = __do_read(ff, addr, sizeof(*addr));
221 if (ret)
222 return ret;
223
224 if (ff->ph->needs_swap)
225 *addr = bswap_32(*addr);
226 return 0;
227}
228
229static int do_read_u64(struct feat_fd *ff, u64 *addr)
230{
231 int ret;
232
233 ret = __do_read(ff, addr, sizeof(*addr));
234 if (ret)
235 return ret;
236
237 if (ff->ph->needs_swap)
238 *addr = bswap_64(*addr);
239 return 0;
240}
241
242static char *do_read_string(struct feat_fd *ff)
243{
244 u32 len;
245 char *buf;
246
247 if (do_read_u32(ff, &len))
248 return NULL;
249
250 buf = malloc(len);
251 if (!buf)
252 return NULL;
253
254 if (!__do_read(ff, buf, len)) {
255
256
257
258
259
260 return buf;
261 }
262
263 free(buf);
264 return NULL;
265}
266
267
268static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
269{
270 unsigned long *set;
271 u64 size, *p;
272 int i, ret;
273
274 ret = do_read_u64(ff, &size);
275 if (ret)
276 return ret;
277
278 set = bitmap_alloc(size);
279 if (!set)
280 return -ENOMEM;
281
282 p = (u64 *) set;
283
284 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
285 ret = do_read_u64(ff, p + i);
286 if (ret < 0) {
287 free(set);
288 return ret;
289 }
290 }
291
292 *pset = set;
293 *psize = size;
294 return 0;
295}
296
297static int write_tracing_data(struct feat_fd *ff,
298 struct evlist *evlist)
299{
300 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
301 return -1;
302
303 return read_tracing_data(ff->fd, &evlist->core.entries);
304}
305
306static int write_build_id(struct feat_fd *ff,
307 struct evlist *evlist __maybe_unused)
308{
309 struct perf_session *session;
310 int err;
311
312 session = container_of(ff->ph, struct perf_session, header);
313
314 if (!perf_session__read_build_ids(session, true))
315 return -1;
316
317 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
318 return -1;
319
320 err = perf_session__write_buildid_table(session, ff);
321 if (err < 0) {
322 pr_debug("failed to write buildid table\n");
323 return err;
324 }
325 perf_session__cache_build_ids(session);
326
327 return 0;
328}
329
330static int write_hostname(struct feat_fd *ff,
331 struct evlist *evlist __maybe_unused)
332{
333 struct utsname uts;
334 int ret;
335
336 ret = uname(&uts);
337 if (ret < 0)
338 return -1;
339
340 return do_write_string(ff, uts.nodename);
341}
342
343static int write_osrelease(struct feat_fd *ff,
344 struct evlist *evlist __maybe_unused)
345{
346 struct utsname uts;
347 int ret;
348
349 ret = uname(&uts);
350 if (ret < 0)
351 return -1;
352
353 return do_write_string(ff, uts.release);
354}
355
356static int write_arch(struct feat_fd *ff,
357 struct evlist *evlist __maybe_unused)
358{
359 struct utsname uts;
360 int ret;
361
362 ret = uname(&uts);
363 if (ret < 0)
364 return -1;
365
366 return do_write_string(ff, uts.machine);
367}
368
369static int write_version(struct feat_fd *ff,
370 struct evlist *evlist __maybe_unused)
371{
372 return do_write_string(ff, perf_version_string);
373}
374
375static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
376{
377 FILE *file;
378 char *buf = NULL;
379 char *s, *p;
380 const char *search = cpuinfo_proc;
381 size_t len = 0;
382 int ret = -1;
383
384 if (!search)
385 return -1;
386
387 file = fopen("/proc/cpuinfo", "r");
388 if (!file)
389 return -1;
390
391 while (getline(&buf, &len, file) > 0) {
392 ret = strncmp(buf, search, strlen(search));
393 if (!ret)
394 break;
395 }
396
397 if (ret) {
398 ret = -1;
399 goto done;
400 }
401
402 s = buf;
403
404 p = strchr(buf, ':');
405 if (p && *(p+1) == ' ' && *(p+2))
406 s = p + 2;
407 p = strchr(s, '\n');
408 if (p)
409 *p = '\0';
410
411
412 p = s;
413 while (*p) {
414 if (isspace(*p)) {
415 char *r = p + 1;
416 char *q = skip_spaces(r);
417 *p = ' ';
418 if (q != (p+1))
419 while ((*r++ = *q++));
420 }
421 p++;
422 }
423 ret = do_write_string(ff, s);
424done:
425 free(buf);
426 fclose(file);
427 return ret;
428}
429
430static int write_cpudesc(struct feat_fd *ff,
431 struct evlist *evlist __maybe_unused)
432{
433#if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
434#define CPUINFO_PROC { "cpu", }
435#elif defined(__s390__)
436#define CPUINFO_PROC { "vendor_id", }
437#elif defined(__sh__)
438#define CPUINFO_PROC { "cpu type", }
439#elif defined(__alpha__) || defined(__mips__)
440#define CPUINFO_PROC { "cpu model", }
441#elif defined(__arm__)
442#define CPUINFO_PROC { "model name", "Processor", }
443#elif defined(__arc__)
444#define CPUINFO_PROC { "Processor", }
445#elif defined(__xtensa__)
446#define CPUINFO_PROC { "core ID", }
447#else
448#define CPUINFO_PROC { "model name", }
449#endif
450 const char *cpuinfo_procs[] = CPUINFO_PROC;
451#undef CPUINFO_PROC
452 unsigned int i;
453
454 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
455 int ret;
456 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
457 if (ret >= 0)
458 return ret;
459 }
460 return -1;
461}
462
463
464static int write_nrcpus(struct feat_fd *ff,
465 struct evlist *evlist __maybe_unused)
466{
467 long nr;
468 u32 nrc, nra;
469 int ret;
470
471 nrc = cpu__max_present_cpu();
472
473 nr = sysconf(_SC_NPROCESSORS_ONLN);
474 if (nr < 0)
475 return -1;
476
477 nra = (u32)(nr & UINT_MAX);
478
479 ret = do_write(ff, &nrc, sizeof(nrc));
480 if (ret < 0)
481 return ret;
482
483 return do_write(ff, &nra, sizeof(nra));
484}
485
486static int write_event_desc(struct feat_fd *ff,
487 struct evlist *evlist)
488{
489 struct evsel *evsel;
490 u32 nre, nri, sz;
491 int ret;
492
493 nre = evlist->core.nr_entries;
494
495
496
497
498 ret = do_write(ff, &nre, sizeof(nre));
499 if (ret < 0)
500 return ret;
501
502
503
504
505 sz = (u32)sizeof(evsel->core.attr);
506 ret = do_write(ff, &sz, sizeof(sz));
507 if (ret < 0)
508 return ret;
509
510 evlist__for_each_entry(evlist, evsel) {
511 ret = do_write(ff, &evsel->core.attr, sz);
512 if (ret < 0)
513 return ret;
514
515
516
517
518
519
520
521 nri = evsel->core.ids;
522 ret = do_write(ff, &nri, sizeof(nri));
523 if (ret < 0)
524 return ret;
525
526
527
528
529 ret = do_write_string(ff, evsel__name(evsel));
530 if (ret < 0)
531 return ret;
532
533
534
535 ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
536 if (ret < 0)
537 return ret;
538 }
539 return 0;
540}
541
542static int write_cmdline(struct feat_fd *ff,
543 struct evlist *evlist __maybe_unused)
544{
545 char pbuf[MAXPATHLEN], *buf;
546 int i, ret, n;
547
548
549 buf = perf_exe(pbuf, MAXPATHLEN);
550
551
552 n = perf_env.nr_cmdline + 1;
553
554 ret = do_write(ff, &n, sizeof(n));
555 if (ret < 0)
556 return ret;
557
558 ret = do_write_string(ff, buf);
559 if (ret < 0)
560 return ret;
561
562 for (i = 0 ; i < perf_env.nr_cmdline; i++) {
563 ret = do_write_string(ff, perf_env.cmdline_argv[i]);
564 if (ret < 0)
565 return ret;
566 }
567 return 0;
568}
569
570
571static int write_cpu_topology(struct feat_fd *ff,
572 struct evlist *evlist __maybe_unused)
573{
574 struct cpu_topology *tp;
575 u32 i;
576 int ret, j;
577
578 tp = cpu_topology__new();
579 if (!tp)
580 return -1;
581
582 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
583 if (ret < 0)
584 goto done;
585
586 for (i = 0; i < tp->core_sib; i++) {
587 ret = do_write_string(ff, tp->core_siblings[i]);
588 if (ret < 0)
589 goto done;
590 }
591 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
592 if (ret < 0)
593 goto done;
594
595 for (i = 0; i < tp->thread_sib; i++) {
596 ret = do_write_string(ff, tp->thread_siblings[i]);
597 if (ret < 0)
598 break;
599 }
600
601 ret = perf_env__read_cpu_topology_map(&perf_env);
602 if (ret < 0)
603 goto done;
604
605 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
606 ret = do_write(ff, &perf_env.cpu[j].core_id,
607 sizeof(perf_env.cpu[j].core_id));
608 if (ret < 0)
609 return ret;
610 ret = do_write(ff, &perf_env.cpu[j].socket_id,
611 sizeof(perf_env.cpu[j].socket_id));
612 if (ret < 0)
613 return ret;
614 }
615
616 if (!tp->die_sib)
617 goto done;
618
619 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
620 if (ret < 0)
621 goto done;
622
623 for (i = 0; i < tp->die_sib; i++) {
624 ret = do_write_string(ff, tp->die_siblings[i]);
625 if (ret < 0)
626 goto done;
627 }
628
629 for (j = 0; j < perf_env.nr_cpus_avail; j++) {
630 ret = do_write(ff, &perf_env.cpu[j].die_id,
631 sizeof(perf_env.cpu[j].die_id));
632 if (ret < 0)
633 return ret;
634 }
635
636done:
637 cpu_topology__delete(tp);
638 return ret;
639}
640
641
642
643static int write_total_mem(struct feat_fd *ff,
644 struct evlist *evlist __maybe_unused)
645{
646 char *buf = NULL;
647 FILE *fp;
648 size_t len = 0;
649 int ret = -1, n;
650 uint64_t mem;
651
652 fp = fopen("/proc/meminfo", "r");
653 if (!fp)
654 return -1;
655
656 while (getline(&buf, &len, fp) > 0) {
657 ret = strncmp(buf, "MemTotal:", 9);
658 if (!ret)
659 break;
660 }
661 if (!ret) {
662 n = sscanf(buf, "%*s %"PRIu64, &mem);
663 if (n == 1)
664 ret = do_write(ff, &mem, sizeof(mem));
665 } else
666 ret = -1;
667 free(buf);
668 fclose(fp);
669 return ret;
670}
671
672static int write_numa_topology(struct feat_fd *ff,
673 struct evlist *evlist __maybe_unused)
674{
675 struct numa_topology *tp;
676 int ret = -1;
677 u32 i;
678
679 tp = numa_topology__new();
680 if (!tp)
681 return -ENOMEM;
682
683 ret = do_write(ff, &tp->nr, sizeof(u32));
684 if (ret < 0)
685 goto err;
686
687 for (i = 0; i < tp->nr; i++) {
688 struct numa_topology_node *n = &tp->nodes[i];
689
690 ret = do_write(ff, &n->node, sizeof(u32));
691 if (ret < 0)
692 goto err;
693
694 ret = do_write(ff, &n->mem_total, sizeof(u64));
695 if (ret)
696 goto err;
697
698 ret = do_write(ff, &n->mem_free, sizeof(u64));
699 if (ret)
700 goto err;
701
702 ret = do_write_string(ff, n->cpus);
703 if (ret < 0)
704 goto err;
705 }
706
707 ret = 0;
708
709err:
710 numa_topology__delete(tp);
711 return ret;
712}
713
714
715
716
717
718
719
720
721
722
723
724
725
726static int write_pmu_mappings(struct feat_fd *ff,
727 struct evlist *evlist __maybe_unused)
728{
729 struct perf_pmu *pmu = NULL;
730 u32 pmu_num = 0;
731 int ret;
732
733
734
735
736
737 while ((pmu = perf_pmu__scan(pmu))) {
738 if (!pmu->name)
739 continue;
740 pmu_num++;
741 }
742
743 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
744 if (ret < 0)
745 return ret;
746
747 while ((pmu = perf_pmu__scan(pmu))) {
748 if (!pmu->name)
749 continue;
750
751 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
752 if (ret < 0)
753 return ret;
754
755 ret = do_write_string(ff, pmu->name);
756 if (ret < 0)
757 return ret;
758 }
759
760 return 0;
761}
762
763
764
765
766
767
768
769
770
771
772
773
774
775static int write_group_desc(struct feat_fd *ff,
776 struct evlist *evlist)
777{
778 u32 nr_groups = evlist->nr_groups;
779 struct evsel *evsel;
780 int ret;
781
782 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
783 if (ret < 0)
784 return ret;
785
786 evlist__for_each_entry(evlist, evsel) {
787 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
788 const char *name = evsel->group_name ?: "{anon_group}";
789 u32 leader_idx = evsel->idx;
790 u32 nr_members = evsel->core.nr_members;
791
792 ret = do_write_string(ff, name);
793 if (ret < 0)
794 return ret;
795
796 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
797 if (ret < 0)
798 return ret;
799
800 ret = do_write(ff, &nr_members, sizeof(nr_members));
801 if (ret < 0)
802 return ret;
803 }
804 }
805 return 0;
806}
807
808
809
810
811
812
813
814char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
815{
816 return NULL;
817}
818
819
820
821
822
823int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
824{
825 regex_t re;
826 regmatch_t pmatch[1];
827 int match;
828
829 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
830
831 pr_info("Invalid regular expression %s\n", mapcpuid);
832 return 1;
833 }
834
835 match = !regexec(&re, cpuid, 1, pmatch, 0);
836 regfree(&re);
837 if (match) {
838 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
839
840
841 if (match_len == strlen(cpuid))
842 return 0;
843 }
844 return 1;
845}
846
847
848
849
850
851int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
852{
853 return ENOSYS;
854}
855
856static int write_cpuid(struct feat_fd *ff,
857 struct evlist *evlist __maybe_unused)
858{
859 char buffer[64];
860 int ret;
861
862 ret = get_cpuid(buffer, sizeof(buffer));
863 if (ret)
864 return -1;
865
866 return do_write_string(ff, buffer);
867}
868
869static int write_branch_stack(struct feat_fd *ff __maybe_unused,
870 struct evlist *evlist __maybe_unused)
871{
872 return 0;
873}
874
875static int write_auxtrace(struct feat_fd *ff,
876 struct evlist *evlist __maybe_unused)
877{
878 struct perf_session *session;
879 int err;
880
881 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
882 return -1;
883
884 session = container_of(ff->ph, struct perf_session, header);
885
886 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
887 if (err < 0)
888 pr_err("Failed to write auxtrace index\n");
889 return err;
890}
891
892static int write_clockid(struct feat_fd *ff,
893 struct evlist *evlist __maybe_unused)
894{
895 return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
896 sizeof(ff->ph->env.clock.clockid_res_ns));
897}
898
899static int write_clock_data(struct feat_fd *ff,
900 struct evlist *evlist __maybe_unused)
901{
902 u64 *data64;
903 u32 data32;
904 int ret;
905
906
907 data32 = 1;
908
909 ret = do_write(ff, &data32, sizeof(data32));
910 if (ret < 0)
911 return ret;
912
913
914 data32 = ff->ph->env.clock.clockid;
915
916 ret = do_write(ff, &data32, sizeof(data32));
917 if (ret < 0)
918 return ret;
919
920
921 data64 = &ff->ph->env.clock.tod_ns;
922
923 ret = do_write(ff, data64, sizeof(*data64));
924 if (ret < 0)
925 return ret;
926
927
928 data64 = &ff->ph->env.clock.clockid_ns;
929
930 return do_write(ff, data64, sizeof(*data64));
931}
932
933static int write_dir_format(struct feat_fd *ff,
934 struct evlist *evlist __maybe_unused)
935{
936 struct perf_session *session;
937 struct perf_data *data;
938
939 session = container_of(ff->ph, struct perf_session, header);
940 data = session->data;
941
942 if (WARN_ON(!perf_data__is_dir(data)))
943 return -1;
944
945 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
946}
947
948#ifdef HAVE_LIBBPF_SUPPORT
949static int write_bpf_prog_info(struct feat_fd *ff,
950 struct evlist *evlist __maybe_unused)
951{
952 struct perf_env *env = &ff->ph->env;
953 struct rb_root *root;
954 struct rb_node *next;
955 int ret;
956
957 down_read(&env->bpf_progs.lock);
958
959 ret = do_write(ff, &env->bpf_progs.infos_cnt,
960 sizeof(env->bpf_progs.infos_cnt));
961 if (ret < 0)
962 goto out;
963
964 root = &env->bpf_progs.infos;
965 next = rb_first(root);
966 while (next) {
967 struct bpf_prog_info_node *node;
968 size_t len;
969
970 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
971 next = rb_next(&node->rb_node);
972 len = sizeof(struct bpf_prog_info_linear) +
973 node->info_linear->data_len;
974
975
976 bpf_program__bpil_addr_to_offs(node->info_linear);
977 ret = do_write(ff, node->info_linear, len);
978
979
980
981
982 bpf_program__bpil_offs_to_addr(node->info_linear);
983 if (ret < 0)
984 goto out;
985 }
986out:
987 up_read(&env->bpf_progs.lock);
988 return ret;
989}
990#else
991static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
992 struct evlist *evlist __maybe_unused)
993{
994 return 0;
995}
996#endif
997
998static int write_bpf_btf(struct feat_fd *ff,
999 struct evlist *evlist __maybe_unused)
1000{
1001 struct perf_env *env = &ff->ph->env;
1002 struct rb_root *root;
1003 struct rb_node *next;
1004 int ret;
1005
1006 down_read(&env->bpf_progs.lock);
1007
1008 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
1009 sizeof(env->bpf_progs.btfs_cnt));
1010
1011 if (ret < 0)
1012 goto out;
1013
1014 root = &env->bpf_progs.btfs;
1015 next = rb_first(root);
1016 while (next) {
1017 struct btf_node *node;
1018
1019 node = rb_entry(next, struct btf_node, rb_node);
1020 next = rb_next(&node->rb_node);
1021 ret = do_write(ff, &node->id,
1022 sizeof(u32) * 2 + node->data_size);
1023 if (ret < 0)
1024 goto out;
1025 }
1026out:
1027 up_read(&env->bpf_progs.lock);
1028 return ret;
1029}
1030
1031static int cpu_cache_level__sort(const void *a, const void *b)
1032{
1033 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1034 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1035
1036 return cache_a->level - cache_b->level;
1037}
1038
1039static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1040{
1041 if (a->level != b->level)
1042 return false;
1043
1044 if (a->line_size != b->line_size)
1045 return false;
1046
1047 if (a->sets != b->sets)
1048 return false;
1049
1050 if (a->ways != b->ways)
1051 return false;
1052
1053 if (strcmp(a->type, b->type))
1054 return false;
1055
1056 if (strcmp(a->size, b->size))
1057 return false;
1058
1059 if (strcmp(a->map, b->map))
1060 return false;
1061
1062 return true;
1063}
1064
1065static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1066{
1067 char path[PATH_MAX], file[PATH_MAX];
1068 struct stat st;
1069 size_t len;
1070
1071 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1072 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1073
1074 if (stat(file, &st))
1075 return 1;
1076
1077 scnprintf(file, PATH_MAX, "%s/level", path);
1078 if (sysfs__read_int(file, (int *) &cache->level))
1079 return -1;
1080
1081 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1082 if (sysfs__read_int(file, (int *) &cache->line_size))
1083 return -1;
1084
1085 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1086 if (sysfs__read_int(file, (int *) &cache->sets))
1087 return -1;
1088
1089 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1090 if (sysfs__read_int(file, (int *) &cache->ways))
1091 return -1;
1092
1093 scnprintf(file, PATH_MAX, "%s/type", path);
1094 if (sysfs__read_str(file, &cache->type, &len))
1095 return -1;
1096
1097 cache->type[len] = 0;
1098 cache->type = strim(cache->type);
1099
1100 scnprintf(file, PATH_MAX, "%s/size", path);
1101 if (sysfs__read_str(file, &cache->size, &len)) {
1102 zfree(&cache->type);
1103 return -1;
1104 }
1105
1106 cache->size[len] = 0;
1107 cache->size = strim(cache->size);
1108
1109 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1110 if (sysfs__read_str(file, &cache->map, &len)) {
1111 zfree(&cache->size);
1112 zfree(&cache->type);
1113 return -1;
1114 }
1115
1116 cache->map[len] = 0;
1117 cache->map = strim(cache->map);
1118 return 0;
1119}
1120
1121static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1122{
1123 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1124}
1125
1126#define MAX_CACHE_LVL 4
1127
1128static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
1129{
1130 u32 i, cnt = 0;
1131 u32 nr, cpu;
1132 u16 level;
1133
1134 nr = cpu__max_cpu();
1135
1136 for (cpu = 0; cpu < nr; cpu++) {
1137 for (level = 0; level < MAX_CACHE_LVL; level++) {
1138 struct cpu_cache_level c;
1139 int err;
1140
1141 err = cpu_cache_level__read(&c, cpu, level);
1142 if (err < 0)
1143 return err;
1144
1145 if (err == 1)
1146 break;
1147
1148 for (i = 0; i < cnt; i++) {
1149 if (cpu_cache_level__cmp(&c, &caches[i]))
1150 break;
1151 }
1152
1153 if (i == cnt)
1154 caches[cnt++] = c;
1155 else
1156 cpu_cache_level__free(&c);
1157 }
1158 }
1159 *cntp = cnt;
1160 return 0;
1161}
1162
1163static int write_cache(struct feat_fd *ff,
1164 struct evlist *evlist __maybe_unused)
1165{
1166 u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL;
1167 struct cpu_cache_level caches[max_caches];
1168 u32 cnt = 0, i, version = 1;
1169 int ret;
1170
1171 ret = build_caches(caches, &cnt);
1172 if (ret)
1173 goto out;
1174
1175 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1176
1177 ret = do_write(ff, &version, sizeof(u32));
1178 if (ret < 0)
1179 goto out;
1180
1181 ret = do_write(ff, &cnt, sizeof(u32));
1182 if (ret < 0)
1183 goto out;
1184
1185 for (i = 0; i < cnt; i++) {
1186 struct cpu_cache_level *c = &caches[i];
1187
1188 #define _W(v) \
1189 ret = do_write(ff, &c->v, sizeof(u32)); \
1190 if (ret < 0) \
1191 goto out;
1192
1193 _W(level)
1194 _W(line_size)
1195 _W(sets)
1196 _W(ways)
1197 #undef _W
1198
1199 #define _W(v) \
1200 ret = do_write_string(ff, (const char *) c->v); \
1201 if (ret < 0) \
1202 goto out;
1203
1204 _W(type)
1205 _W(size)
1206 _W(map)
1207 #undef _W
1208 }
1209
1210out:
1211 for (i = 0; i < cnt; i++)
1212 cpu_cache_level__free(&caches[i]);
1213 return ret;
1214}
1215
1216static int write_stat(struct feat_fd *ff __maybe_unused,
1217 struct evlist *evlist __maybe_unused)
1218{
1219 return 0;
1220}
1221
1222static int write_sample_time(struct feat_fd *ff,
1223 struct evlist *evlist)
1224{
1225 int ret;
1226
1227 ret = do_write(ff, &evlist->first_sample_time,
1228 sizeof(evlist->first_sample_time));
1229 if (ret < 0)
1230 return ret;
1231
1232 return do_write(ff, &evlist->last_sample_time,
1233 sizeof(evlist->last_sample_time));
1234}
1235
1236
1237static int memory_node__read(struct memory_node *n, unsigned long idx)
1238{
1239 unsigned int phys, size = 0;
1240 char path[PATH_MAX];
1241 struct dirent *ent;
1242 DIR *dir;
1243
1244#define for_each_memory(mem, dir) \
1245 while ((ent = readdir(dir))) \
1246 if (strcmp(ent->d_name, ".") && \
1247 strcmp(ent->d_name, "..") && \
1248 sscanf(ent->d_name, "memory%u", &mem) == 1)
1249
1250 scnprintf(path, PATH_MAX,
1251 "%s/devices/system/node/node%lu",
1252 sysfs__mountpoint(), idx);
1253
1254 dir = opendir(path);
1255 if (!dir) {
1256 pr_warning("failed: cant' open memory sysfs data\n");
1257 return -1;
1258 }
1259
1260 for_each_memory(phys, dir) {
1261 size = max(phys, size);
1262 }
1263
1264 size++;
1265
1266 n->set = bitmap_alloc(size);
1267 if (!n->set) {
1268 closedir(dir);
1269 return -ENOMEM;
1270 }
1271
1272 n->node = idx;
1273 n->size = size;
1274
1275 rewinddir(dir);
1276
1277 for_each_memory(phys, dir) {
1278 set_bit(phys, n->set);
1279 }
1280
1281 closedir(dir);
1282 return 0;
1283}
1284
1285static int memory_node__sort(const void *a, const void *b)
1286{
1287 const struct memory_node *na = a;
1288 const struct memory_node *nb = b;
1289
1290 return na->node - nb->node;
1291}
1292
1293static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1294{
1295 char path[PATH_MAX];
1296 struct dirent *ent;
1297 DIR *dir;
1298 u64 cnt = 0;
1299 int ret = 0;
1300
1301 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1302 sysfs__mountpoint());
1303
1304 dir = opendir(path);
1305 if (!dir) {
1306 pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1307 __func__, path);
1308 return -1;
1309 }
1310
1311 while (!ret && (ent = readdir(dir))) {
1312 unsigned int idx;
1313 int r;
1314
1315 if (!strcmp(ent->d_name, ".") ||
1316 !strcmp(ent->d_name, ".."))
1317 continue;
1318
1319 r = sscanf(ent->d_name, "node%u", &idx);
1320 if (r != 1)
1321 continue;
1322
1323 if (WARN_ONCE(cnt >= size,
1324 "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
1325 closedir(dir);
1326 return -1;
1327 }
1328
1329 ret = memory_node__read(&nodes[cnt++], idx);
1330 }
1331
1332 *cntp = cnt;
1333 closedir(dir);
1334
1335 if (!ret)
1336 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1337
1338 return ret;
1339}
1340
1341#define MAX_MEMORY_NODES 2000
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1359 struct evlist *evlist __maybe_unused)
1360{
1361 static struct memory_node nodes[MAX_MEMORY_NODES];
1362 u64 bsize, version = 1, i, nr;
1363 int ret;
1364
1365 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1366 (unsigned long long *) &bsize);
1367 if (ret)
1368 return ret;
1369
1370 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1371 if (ret)
1372 return ret;
1373
1374 ret = do_write(ff, &version, sizeof(version));
1375 if (ret < 0)
1376 goto out;
1377
1378 ret = do_write(ff, &bsize, sizeof(bsize));
1379 if (ret < 0)
1380 goto out;
1381
1382 ret = do_write(ff, &nr, sizeof(nr));
1383 if (ret < 0)
1384 goto out;
1385
1386 for (i = 0; i < nr; i++) {
1387 struct memory_node *n = &nodes[i];
1388
1389 #define _W(v) \
1390 ret = do_write(ff, &n->v, sizeof(n->v)); \
1391 if (ret < 0) \
1392 goto out;
1393
1394 _W(node)
1395 _W(size)
1396
1397 #undef _W
1398
1399 ret = do_write_bitmap(ff, n->set, n->size);
1400 if (ret < 0)
1401 goto out;
1402 }
1403
1404out:
1405 return ret;
1406}
1407
1408static int write_compressed(struct feat_fd *ff __maybe_unused,
1409 struct evlist *evlist __maybe_unused)
1410{
1411 int ret;
1412
1413 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1414 if (ret)
1415 return ret;
1416
1417 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1418 if (ret)
1419 return ret;
1420
1421 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1422 if (ret)
1423 return ret;
1424
1425 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1426 if (ret)
1427 return ret;
1428
1429 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1430}
1431
1432static int write_cpu_pmu_caps(struct feat_fd *ff,
1433 struct evlist *evlist __maybe_unused)
1434{
1435 struct perf_pmu *cpu_pmu = perf_pmu__find("cpu");
1436 struct perf_pmu_caps *caps = NULL;
1437 int nr_caps;
1438 int ret;
1439
1440 if (!cpu_pmu)
1441 return -ENOENT;
1442
1443 nr_caps = perf_pmu__caps_parse(cpu_pmu);
1444 if (nr_caps < 0)
1445 return nr_caps;
1446
1447 ret = do_write(ff, &nr_caps, sizeof(nr_caps));
1448 if (ret < 0)
1449 return ret;
1450
1451 list_for_each_entry(caps, &cpu_pmu->caps, list) {
1452 ret = do_write_string(ff, caps->name);
1453 if (ret < 0)
1454 return ret;
1455
1456 ret = do_write_string(ff, caps->value);
1457 if (ret < 0)
1458 return ret;
1459 }
1460
1461 return ret;
1462}
1463
1464static void print_hostname(struct feat_fd *ff, FILE *fp)
1465{
1466 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1467}
1468
1469static void print_osrelease(struct feat_fd *ff, FILE *fp)
1470{
1471 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1472}
1473
1474static void print_arch(struct feat_fd *ff, FILE *fp)
1475{
1476 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1477}
1478
1479static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1480{
1481 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1482}
1483
1484static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1485{
1486 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1487 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1488}
1489
1490static void print_version(struct feat_fd *ff, FILE *fp)
1491{
1492 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1493}
1494
1495static void print_cmdline(struct feat_fd *ff, FILE *fp)
1496{
1497 int nr, i;
1498
1499 nr = ff->ph->env.nr_cmdline;
1500
1501 fprintf(fp, "# cmdline : ");
1502
1503 for (i = 0; i < nr; i++) {
1504 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1505 if (!argv_i) {
1506 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1507 } else {
1508 char *mem = argv_i;
1509 do {
1510 char *quote = strchr(argv_i, '\'');
1511 if (!quote)
1512 break;
1513 *quote++ = '\0';
1514 fprintf(fp, "%s\\\'", argv_i);
1515 argv_i = quote;
1516 } while (1);
1517 fprintf(fp, "%s ", argv_i);
1518 free(mem);
1519 }
1520 }
1521 fputc('\n', fp);
1522}
1523
1524static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1525{
1526 struct perf_header *ph = ff->ph;
1527 int cpu_nr = ph->env.nr_cpus_avail;
1528 int nr, i;
1529 char *str;
1530
1531 nr = ph->env.nr_sibling_cores;
1532 str = ph->env.sibling_cores;
1533
1534 for (i = 0; i < nr; i++) {
1535 fprintf(fp, "# sibling sockets : %s\n", str);
1536 str += strlen(str) + 1;
1537 }
1538
1539 if (ph->env.nr_sibling_dies) {
1540 nr = ph->env.nr_sibling_dies;
1541 str = ph->env.sibling_dies;
1542
1543 for (i = 0; i < nr; i++) {
1544 fprintf(fp, "# sibling dies : %s\n", str);
1545 str += strlen(str) + 1;
1546 }
1547 }
1548
1549 nr = ph->env.nr_sibling_threads;
1550 str = ph->env.sibling_threads;
1551
1552 for (i = 0; i < nr; i++) {
1553 fprintf(fp, "# sibling threads : %s\n", str);
1554 str += strlen(str) + 1;
1555 }
1556
1557 if (ph->env.nr_sibling_dies) {
1558 if (ph->env.cpu != NULL) {
1559 for (i = 0; i < cpu_nr; i++)
1560 fprintf(fp, "# CPU %d: Core ID %d, "
1561 "Die ID %d, Socket ID %d\n",
1562 i, ph->env.cpu[i].core_id,
1563 ph->env.cpu[i].die_id,
1564 ph->env.cpu[i].socket_id);
1565 } else
1566 fprintf(fp, "# Core ID, Die ID and Socket ID "
1567 "information is not available\n");
1568 } else {
1569 if (ph->env.cpu != NULL) {
1570 for (i = 0; i < cpu_nr; i++)
1571 fprintf(fp, "# CPU %d: Core ID %d, "
1572 "Socket ID %d\n",
1573 i, ph->env.cpu[i].core_id,
1574 ph->env.cpu[i].socket_id);
1575 } else
1576 fprintf(fp, "# Core ID and Socket ID "
1577 "information is not available\n");
1578 }
1579}
1580
1581static void print_clockid(struct feat_fd *ff, FILE *fp)
1582{
1583 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1584 ff->ph->env.clock.clockid_res_ns * 1000);
1585}
1586
1587static void print_clock_data(struct feat_fd *ff, FILE *fp)
1588{
1589 struct timespec clockid_ns;
1590 char tstr[64], date[64];
1591 struct timeval tod_ns;
1592 clockid_t clockid;
1593 struct tm ltime;
1594 u64 ref;
1595
1596 if (!ff->ph->env.clock.enabled) {
1597 fprintf(fp, "# reference time disabled\n");
1598 return;
1599 }
1600
1601
1602 ref = ff->ph->env.clock.tod_ns;
1603 tod_ns.tv_sec = ref / NSEC_PER_SEC;
1604 ref -= tod_ns.tv_sec * NSEC_PER_SEC;
1605 tod_ns.tv_usec = ref / NSEC_PER_USEC;
1606
1607
1608 ref = ff->ph->env.clock.clockid_ns;
1609 clockid_ns.tv_sec = ref / NSEC_PER_SEC;
1610 ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
1611 clockid_ns.tv_nsec = ref;
1612
1613 clockid = ff->ph->env.clock.clockid;
1614
1615 if (localtime_r(&tod_ns.tv_sec, <ime) == NULL)
1616 snprintf(tstr, sizeof(tstr), "<error>");
1617 else {
1618 strftime(date, sizeof(date), "%F %T", <ime);
1619 scnprintf(tstr, sizeof(tstr), "%s.%06d",
1620 date, (int) tod_ns.tv_usec);
1621 }
1622
1623 fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
1624 fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
1625 tstr, tod_ns.tv_sec, (int) tod_ns.tv_usec,
1626 clockid_ns.tv_sec, clockid_ns.tv_nsec,
1627 clockid_name(clockid));
1628}
1629
1630static void print_dir_format(struct feat_fd *ff, FILE *fp)
1631{
1632 struct perf_session *session;
1633 struct perf_data *data;
1634
1635 session = container_of(ff->ph, struct perf_session, header);
1636 data = session->data;
1637
1638 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1639}
1640
1641static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1642{
1643 struct perf_env *env = &ff->ph->env;
1644 struct rb_root *root;
1645 struct rb_node *next;
1646
1647 down_read(&env->bpf_progs.lock);
1648
1649 root = &env->bpf_progs.infos;
1650 next = rb_first(root);
1651
1652 while (next) {
1653 struct bpf_prog_info_node *node;
1654
1655 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1656 next = rb_next(&node->rb_node);
1657
1658 bpf_event__print_bpf_prog_info(&node->info_linear->info,
1659 env, fp);
1660 }
1661
1662 up_read(&env->bpf_progs.lock);
1663}
1664
1665static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1666{
1667 struct perf_env *env = &ff->ph->env;
1668 struct rb_root *root;
1669 struct rb_node *next;
1670
1671 down_read(&env->bpf_progs.lock);
1672
1673 root = &env->bpf_progs.btfs;
1674 next = rb_first(root);
1675
1676 while (next) {
1677 struct btf_node *node;
1678
1679 node = rb_entry(next, struct btf_node, rb_node);
1680 next = rb_next(&node->rb_node);
1681 fprintf(fp, "# btf info of id %u\n", node->id);
1682 }
1683
1684 up_read(&env->bpf_progs.lock);
1685}
1686
1687static void free_event_desc(struct evsel *events)
1688{
1689 struct evsel *evsel;
1690
1691 if (!events)
1692 return;
1693
1694 for (evsel = events; evsel->core.attr.size; evsel++) {
1695 zfree(&evsel->name);
1696 zfree(&evsel->core.id);
1697 }
1698
1699 free(events);
1700}
1701
1702static bool perf_attr_check(struct perf_event_attr *attr)
1703{
1704 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
1705 pr_warning("Reserved bits are set unexpectedly. "
1706 "Please update perf tool.\n");
1707 return false;
1708 }
1709
1710 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
1711 pr_warning("Unknown sample type (0x%llx) is detected. "
1712 "Please update perf tool.\n",
1713 attr->sample_type);
1714 return false;
1715 }
1716
1717 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
1718 pr_warning("Unknown read format (0x%llx) is detected. "
1719 "Please update perf tool.\n",
1720 attr->read_format);
1721 return false;
1722 }
1723
1724 if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
1725 (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
1726 pr_warning("Unknown branch sample type (0x%llx) is detected. "
1727 "Please update perf tool.\n",
1728 attr->branch_sample_type);
1729
1730 return false;
1731 }
1732
1733 return true;
1734}
1735
1736static struct evsel *read_event_desc(struct feat_fd *ff)
1737{
1738 struct evsel *evsel, *events = NULL;
1739 u64 *id;
1740 void *buf = NULL;
1741 u32 nre, sz, nr, i, j;
1742 size_t msz;
1743
1744
1745 if (do_read_u32(ff, &nre))
1746 goto error;
1747
1748 if (do_read_u32(ff, &sz))
1749 goto error;
1750
1751
1752 buf = malloc(sz);
1753 if (!buf)
1754 goto error;
1755
1756
1757 events = calloc(nre + 1, sizeof(*events));
1758 if (!events)
1759 goto error;
1760
1761 msz = sizeof(evsel->core.attr);
1762 if (sz < msz)
1763 msz = sz;
1764
1765 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1766 evsel->idx = i;
1767
1768
1769
1770
1771
1772 if (__do_read(ff, buf, sz))
1773 goto error;
1774
1775 if (ff->ph->needs_swap)
1776 perf_event__attr_swap(buf);
1777
1778 memcpy(&evsel->core.attr, buf, msz);
1779
1780 if (!perf_attr_check(&evsel->core.attr))
1781 goto error;
1782
1783 if (do_read_u32(ff, &nr))
1784 goto error;
1785
1786 if (ff->ph->needs_swap)
1787 evsel->needs_swap = true;
1788
1789 evsel->name = do_read_string(ff);
1790 if (!evsel->name)
1791 goto error;
1792
1793 if (!nr)
1794 continue;
1795
1796 id = calloc(nr, sizeof(*id));
1797 if (!id)
1798 goto error;
1799 evsel->core.ids = nr;
1800 evsel->core.id = id;
1801
1802 for (j = 0 ; j < nr; j++) {
1803 if (do_read_u64(ff, id))
1804 goto error;
1805 id++;
1806 }
1807 }
1808out:
1809 free(buf);
1810 return events;
1811error:
1812 free_event_desc(events);
1813 events = NULL;
1814 goto out;
1815}
1816
1817static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1818 void *priv __maybe_unused)
1819{
1820 return fprintf(fp, ", %s = %s", name, val);
1821}
1822
1823static void print_event_desc(struct feat_fd *ff, FILE *fp)
1824{
1825 struct evsel *evsel, *events;
1826 u32 j;
1827 u64 *id;
1828
1829 if (ff->events)
1830 events = ff->events;
1831 else
1832 events = read_event_desc(ff);
1833
1834 if (!events) {
1835 fprintf(fp, "# event desc: not available or unable to read\n");
1836 return;
1837 }
1838
1839 for (evsel = events; evsel->core.attr.size; evsel++) {
1840 fprintf(fp, "# event : name = %s, ", evsel->name);
1841
1842 if (evsel->core.ids) {
1843 fprintf(fp, ", id = {");
1844 for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
1845 if (j)
1846 fputc(',', fp);
1847 fprintf(fp, " %"PRIu64, *id);
1848 }
1849 fprintf(fp, " }");
1850 }
1851
1852 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
1853
1854 fputc('\n', fp);
1855 }
1856
1857 free_event_desc(events);
1858 ff->events = NULL;
1859}
1860
1861static void print_total_mem(struct feat_fd *ff, FILE *fp)
1862{
1863 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1864}
1865
1866static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1867{
1868 int i;
1869 struct numa_node *n;
1870
1871 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1872 n = &ff->ph->env.numa_nodes[i];
1873
1874 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
1875 " free = %"PRIu64" kB\n",
1876 n->node, n->mem_total, n->mem_free);
1877
1878 fprintf(fp, "# node%u cpu list : ", n->node);
1879 cpu_map__fprintf(n->map, fp);
1880 }
1881}
1882
1883static void print_cpuid(struct feat_fd *ff, FILE *fp)
1884{
1885 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1886}
1887
1888static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1889{
1890 fprintf(fp, "# contains samples with branch stack\n");
1891}
1892
1893static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1894{
1895 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1896}
1897
1898static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1899{
1900 fprintf(fp, "# contains stat data\n");
1901}
1902
1903static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1904{
1905 int i;
1906
1907 fprintf(fp, "# CPU cache info:\n");
1908 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1909 fprintf(fp, "# ");
1910 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1911 }
1912}
1913
1914static void print_compressed(struct feat_fd *ff, FILE *fp)
1915{
1916 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1917 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1918 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1919}
1920
1921static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
1922{
1923 const char *delimiter = "# cpu pmu capabilities: ";
1924 u32 nr_caps = ff->ph->env.nr_cpu_pmu_caps;
1925 char *str;
1926
1927 if (!nr_caps) {
1928 fprintf(fp, "# cpu pmu capabilities: not available\n");
1929 return;
1930 }
1931
1932 str = ff->ph->env.cpu_pmu_caps;
1933 while (nr_caps--) {
1934 fprintf(fp, "%s%s", delimiter, str);
1935 delimiter = ", ";
1936 str += strlen(str) + 1;
1937 }
1938
1939 fprintf(fp, "\n");
1940}
1941
1942static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1943{
1944 const char *delimiter = "# pmu mappings: ";
1945 char *str, *tmp;
1946 u32 pmu_num;
1947 u32 type;
1948
1949 pmu_num = ff->ph->env.nr_pmu_mappings;
1950 if (!pmu_num) {
1951 fprintf(fp, "# pmu mappings: not available\n");
1952 return;
1953 }
1954
1955 str = ff->ph->env.pmu_mappings;
1956
1957 while (pmu_num) {
1958 type = strtoul(str, &tmp, 0);
1959 if (*tmp != ':')
1960 goto error;
1961
1962 str = tmp + 1;
1963 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1964
1965 delimiter = ", ";
1966 str += strlen(str) + 1;
1967 pmu_num--;
1968 }
1969
1970 fprintf(fp, "\n");
1971
1972 if (!pmu_num)
1973 return;
1974error:
1975 fprintf(fp, "# pmu mappings: unable to read\n");
1976}
1977
1978static void print_group_desc(struct feat_fd *ff, FILE *fp)
1979{
1980 struct perf_session *session;
1981 struct evsel *evsel;
1982 u32 nr = 0;
1983
1984 session = container_of(ff->ph, struct perf_session, header);
1985
1986 evlist__for_each_entry(session->evlist, evsel) {
1987 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
1988 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
1989
1990 nr = evsel->core.nr_members - 1;
1991 } else if (nr) {
1992 fprintf(fp, ",%s", evsel__name(evsel));
1993
1994 if (--nr == 0)
1995 fprintf(fp, "}\n");
1996 }
1997 }
1998}
1999
2000static void print_sample_time(struct feat_fd *ff, FILE *fp)
2001{
2002 struct perf_session *session;
2003 char time_buf[32];
2004 double d;
2005
2006 session = container_of(ff->ph, struct perf_session, header);
2007
2008 timestamp__scnprintf_usec(session->evlist->first_sample_time,
2009 time_buf, sizeof(time_buf));
2010 fprintf(fp, "# time of first sample : %s\n", time_buf);
2011
2012 timestamp__scnprintf_usec(session->evlist->last_sample_time,
2013 time_buf, sizeof(time_buf));
2014 fprintf(fp, "# time of last sample : %s\n", time_buf);
2015
2016 d = (double)(session->evlist->last_sample_time -
2017 session->evlist->first_sample_time) / NSEC_PER_MSEC;
2018
2019 fprintf(fp, "# sample duration : %10.3f ms\n", d);
2020}
2021
2022static void memory_node__fprintf(struct memory_node *n,
2023 unsigned long long bsize, FILE *fp)
2024{
2025 char buf_map[100], buf_size[50];
2026 unsigned long long size;
2027
2028 size = bsize * bitmap_weight(n->set, n->size);
2029 unit_number__scnprintf(buf_size, 50, size);
2030
2031 bitmap_scnprintf(n->set, n->size, buf_map, 100);
2032 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
2033}
2034
2035static void print_mem_topology(struct feat_fd *ff, FILE *fp)
2036{
2037 struct memory_node *nodes;
2038 int i, nr;
2039
2040 nodes = ff->ph->env.memory_nodes;
2041 nr = ff->ph->env.nr_memory_nodes;
2042
2043 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
2044 nr, ff->ph->env.memory_bsize);
2045
2046 for (i = 0; i < nr; i++) {
2047 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
2048 }
2049}
2050
2051static int __event_process_build_id(struct perf_record_header_build_id *bev,
2052 char *filename,
2053 struct perf_session *session)
2054{
2055 int err = -1;
2056 struct machine *machine;
2057 u16 cpumode;
2058 struct dso *dso;
2059 enum dso_space_type dso_space;
2060
2061 machine = perf_session__findnew_machine(session, bev->pid);
2062 if (!machine)
2063 goto out;
2064
2065 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2066
2067 switch (cpumode) {
2068 case PERF_RECORD_MISC_KERNEL:
2069 dso_space = DSO_SPACE__KERNEL;
2070 break;
2071 case PERF_RECORD_MISC_GUEST_KERNEL:
2072 dso_space = DSO_SPACE__KERNEL_GUEST;
2073 break;
2074 case PERF_RECORD_MISC_USER:
2075 case PERF_RECORD_MISC_GUEST_USER:
2076 dso_space = DSO_SPACE__USER;
2077 break;
2078 default:
2079 goto out;
2080 }
2081
2082 dso = machine__findnew_dso(machine, filename);
2083 if (dso != NULL) {
2084 char sbuild_id[SBUILD_ID_SIZE];
2085 struct build_id bid;
2086 size_t size = BUILD_ID_SIZE;
2087
2088 if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
2089 size = bev->size;
2090
2091 build_id__init(&bid, bev->data, size);
2092 dso__set_build_id(dso, &bid);
2093
2094 if (dso_space != DSO_SPACE__USER) {
2095 struct kmod_path m = { .name = NULL, };
2096
2097 if (!kmod_path__parse_name(&m, filename) && m.kmod)
2098 dso__set_module_info(dso, &m, machine);
2099
2100 dso->kernel = dso_space;
2101 free(m.name);
2102 }
2103
2104 build_id__sprintf(&dso->bid, sbuild_id);
2105 pr_debug("build id event received for %s: %s [%zu]\n",
2106 dso->long_name, sbuild_id, size);
2107 dso__put(dso);
2108 }
2109
2110 err = 0;
2111out:
2112 return err;
2113}
2114
2115static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
2116 int input, u64 offset, u64 size)
2117{
2118 struct perf_session *session = container_of(header, struct perf_session, header);
2119 struct {
2120 struct perf_event_header header;
2121 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
2122 char filename[0];
2123 } old_bev;
2124 struct perf_record_header_build_id bev;
2125 char filename[PATH_MAX];
2126 u64 limit = offset + size;
2127
2128 while (offset < limit) {
2129 ssize_t len;
2130
2131 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
2132 return -1;
2133
2134 if (header->needs_swap)
2135 perf_event_header__bswap(&old_bev.header);
2136
2137 len = old_bev.header.size - sizeof(old_bev);
2138 if (readn(input, filename, len) != len)
2139 return -1;
2140
2141 bev.header = old_bev.header;
2142
2143
2144
2145
2146
2147 bev.pid = HOST_KERNEL_ID;
2148 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
2149 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
2150 bev.pid = DEFAULT_GUEST_KERNEL_ID;
2151
2152 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
2153 __event_process_build_id(&bev, filename, session);
2154
2155 offset += bev.header.size;
2156 }
2157
2158 return 0;
2159}
2160
2161static int perf_header__read_build_ids(struct perf_header *header,
2162 int input, u64 offset, u64 size)
2163{
2164 struct perf_session *session = container_of(header, struct perf_session, header);
2165 struct perf_record_header_build_id bev;
2166 char filename[PATH_MAX];
2167 u64 limit = offset + size, orig_offset = offset;
2168 int err = -1;
2169
2170 while (offset < limit) {
2171 ssize_t len;
2172
2173 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2174 goto out;
2175
2176 if (header->needs_swap)
2177 perf_event_header__bswap(&bev.header);
2178
2179 len = bev.header.size - sizeof(bev);
2180 if (readn(input, filename, len) != len)
2181 goto out;
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2196 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2197 return -1;
2198 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2199 }
2200
2201 __event_process_build_id(&bev, filename, session);
2202
2203 offset += bev.header.size;
2204 }
2205 err = 0;
2206out:
2207 return err;
2208}
2209
2210
2211#define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2212static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2213{\
2214 ff->ph->env.__feat_env = do_read_string(ff); \
2215 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2216}
2217
2218FEAT_PROCESS_STR_FUN(hostname, hostname);
2219FEAT_PROCESS_STR_FUN(osrelease, os_release);
2220FEAT_PROCESS_STR_FUN(version, version);
2221FEAT_PROCESS_STR_FUN(arch, arch);
2222FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2223FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2224
2225static int process_tracing_data(struct feat_fd *ff, void *data)
2226{
2227 ssize_t ret = trace_report(ff->fd, data, false);
2228
2229 return ret < 0 ? -1 : 0;
2230}
2231
2232static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2233{
2234 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2235 pr_debug("Failed to read buildids, continuing...\n");
2236 return 0;
2237}
2238
2239static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2240{
2241 int ret;
2242 u32 nr_cpus_avail, nr_cpus_online;
2243
2244 ret = do_read_u32(ff, &nr_cpus_avail);
2245 if (ret)
2246 return ret;
2247
2248 ret = do_read_u32(ff, &nr_cpus_online);
2249 if (ret)
2250 return ret;
2251 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2252 ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2253 return 0;
2254}
2255
2256static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2257{
2258 u64 total_mem;
2259 int ret;
2260
2261 ret = do_read_u64(ff, &total_mem);
2262 if (ret)
2263 return -1;
2264 ff->ph->env.total_mem = (unsigned long long)total_mem;
2265 return 0;
2266}
2267
2268static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
2269{
2270 struct evsel *evsel;
2271
2272 evlist__for_each_entry(evlist, evsel) {
2273 if (evsel->idx == idx)
2274 return evsel;
2275 }
2276
2277 return NULL;
2278}
2279
2280static void
2281perf_evlist__set_event_name(struct evlist *evlist,
2282 struct evsel *event)
2283{
2284 struct evsel *evsel;
2285
2286 if (!event->name)
2287 return;
2288
2289 evsel = evlist__find_by_index(evlist, event->idx);
2290 if (!evsel)
2291 return;
2292
2293 if (evsel->name)
2294 return;
2295
2296 evsel->name = strdup(event->name);
2297}
2298
2299static int
2300process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2301{
2302 struct perf_session *session;
2303 struct evsel *evsel, *events = read_event_desc(ff);
2304
2305 if (!events)
2306 return 0;
2307
2308 session = container_of(ff->ph, struct perf_session, header);
2309
2310 if (session->data->is_pipe) {
2311
2312
2313 ff->events = events;
2314 }
2315
2316 for (evsel = events; evsel->core.attr.size; evsel++)
2317 perf_evlist__set_event_name(session->evlist, evsel);
2318
2319 if (!session->data->is_pipe)
2320 free_event_desc(events);
2321
2322 return 0;
2323}
2324
2325static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2326{
2327 char *str, *cmdline = NULL, **argv = NULL;
2328 u32 nr, i, len = 0;
2329
2330 if (do_read_u32(ff, &nr))
2331 return -1;
2332
2333 ff->ph->env.nr_cmdline = nr;
2334
2335 cmdline = zalloc(ff->size + nr + 1);
2336 if (!cmdline)
2337 return -1;
2338
2339 argv = zalloc(sizeof(char *) * (nr + 1));
2340 if (!argv)
2341 goto error;
2342
2343 for (i = 0; i < nr; i++) {
2344 str = do_read_string(ff);
2345 if (!str)
2346 goto error;
2347
2348 argv[i] = cmdline + len;
2349 memcpy(argv[i], str, strlen(str) + 1);
2350 len += strlen(str) + 1;
2351 free(str);
2352 }
2353 ff->ph->env.cmdline = cmdline;
2354 ff->ph->env.cmdline_argv = (const char **) argv;
2355 return 0;
2356
2357error:
2358 free(argv);
2359 free(cmdline);
2360 return -1;
2361}
2362
2363static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2364{
2365 u32 nr, i;
2366 char *str;
2367 struct strbuf sb;
2368 int cpu_nr = ff->ph->env.nr_cpus_avail;
2369 u64 size = 0;
2370 struct perf_header *ph = ff->ph;
2371 bool do_core_id_test = true;
2372
2373 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2374 if (!ph->env.cpu)
2375 return -1;
2376
2377 if (do_read_u32(ff, &nr))
2378 goto free_cpu;
2379
2380 ph->env.nr_sibling_cores = nr;
2381 size += sizeof(u32);
2382 if (strbuf_init(&sb, 128) < 0)
2383 goto free_cpu;
2384
2385 for (i = 0; i < nr; i++) {
2386 str = do_read_string(ff);
2387 if (!str)
2388 goto error;
2389
2390
2391 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2392 goto error;
2393 size += string_size(str);
2394 free(str);
2395 }
2396 ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2397
2398 if (do_read_u32(ff, &nr))
2399 return -1;
2400
2401 ph->env.nr_sibling_threads = nr;
2402 size += sizeof(u32);
2403
2404 for (i = 0; i < nr; i++) {
2405 str = do_read_string(ff);
2406 if (!str)
2407 goto error;
2408
2409
2410 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2411 goto error;
2412 size += string_size(str);
2413 free(str);
2414 }
2415 ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2416
2417
2418
2419
2420
2421 if (ff->size <= size) {
2422 zfree(&ph->env.cpu);
2423 return 0;
2424 }
2425
2426
2427
2428
2429
2430
2431 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2432 || !strncmp(ph->env.arch, "aarch64", 7)))
2433 do_core_id_test = false;
2434
2435 for (i = 0; i < (u32)cpu_nr; i++) {
2436 if (do_read_u32(ff, &nr))
2437 goto free_cpu;
2438
2439 ph->env.cpu[i].core_id = nr;
2440 size += sizeof(u32);
2441
2442 if (do_read_u32(ff, &nr))
2443 goto free_cpu;
2444
2445 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2446 pr_debug("socket_id number is too big."
2447 "You may need to upgrade the perf tool.\n");
2448 goto free_cpu;
2449 }
2450
2451 ph->env.cpu[i].socket_id = nr;
2452 size += sizeof(u32);
2453 }
2454
2455
2456
2457
2458
2459 if (ff->size <= size)
2460 return 0;
2461
2462 if (do_read_u32(ff, &nr))
2463 return -1;
2464
2465 ph->env.nr_sibling_dies = nr;
2466 size += sizeof(u32);
2467
2468 for (i = 0; i < nr; i++) {
2469 str = do_read_string(ff);
2470 if (!str)
2471 goto error;
2472
2473
2474 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2475 goto error;
2476 size += string_size(str);
2477 free(str);
2478 }
2479 ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2480
2481 for (i = 0; i < (u32)cpu_nr; i++) {
2482 if (do_read_u32(ff, &nr))
2483 goto free_cpu;
2484
2485 ph->env.cpu[i].die_id = nr;
2486 }
2487
2488 return 0;
2489
2490error:
2491 strbuf_release(&sb);
2492free_cpu:
2493 zfree(&ph->env.cpu);
2494 return -1;
2495}
2496
2497static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2498{
2499 struct numa_node *nodes, *n;
2500 u32 nr, i;
2501 char *str;
2502
2503
2504 if (do_read_u32(ff, &nr))
2505 return -1;
2506
2507 nodes = zalloc(sizeof(*nodes) * nr);
2508 if (!nodes)
2509 return -ENOMEM;
2510
2511 for (i = 0; i < nr; i++) {
2512 n = &nodes[i];
2513
2514
2515 if (do_read_u32(ff, &n->node))
2516 goto error;
2517
2518 if (do_read_u64(ff, &n->mem_total))
2519 goto error;
2520
2521 if (do_read_u64(ff, &n->mem_free))
2522 goto error;
2523
2524 str = do_read_string(ff);
2525 if (!str)
2526 goto error;
2527
2528 n->map = perf_cpu_map__new(str);
2529 if (!n->map)
2530 goto error;
2531
2532 free(str);
2533 }
2534 ff->ph->env.nr_numa_nodes = nr;
2535 ff->ph->env.numa_nodes = nodes;
2536 return 0;
2537
2538error:
2539 free(nodes);
2540 return -1;
2541}
2542
2543static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2544{
2545 char *name;
2546 u32 pmu_num;
2547 u32 type;
2548 struct strbuf sb;
2549
2550 if (do_read_u32(ff, &pmu_num))
2551 return -1;
2552
2553 if (!pmu_num) {
2554 pr_debug("pmu mappings not available\n");
2555 return 0;
2556 }
2557
2558 ff->ph->env.nr_pmu_mappings = pmu_num;
2559 if (strbuf_init(&sb, 128) < 0)
2560 return -1;
2561
2562 while (pmu_num) {
2563 if (do_read_u32(ff, &type))
2564 goto error;
2565
2566 name = do_read_string(ff);
2567 if (!name)
2568 goto error;
2569
2570 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2571 goto error;
2572
2573 if (strbuf_add(&sb, "", 1) < 0)
2574 goto error;
2575
2576 if (!strcmp(name, "msr"))
2577 ff->ph->env.msr_pmu_type = type;
2578
2579 free(name);
2580 pmu_num--;
2581 }
2582 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2583 return 0;
2584
2585error:
2586 strbuf_release(&sb);
2587 return -1;
2588}
2589
2590static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2591{
2592 size_t ret = -1;
2593 u32 i, nr, nr_groups;
2594 struct perf_session *session;
2595 struct evsel *evsel, *leader = NULL;
2596 struct group_desc {
2597 char *name;
2598 u32 leader_idx;
2599 u32 nr_members;
2600 } *desc;
2601
2602 if (do_read_u32(ff, &nr_groups))
2603 return -1;
2604
2605 ff->ph->env.nr_groups = nr_groups;
2606 if (!nr_groups) {
2607 pr_debug("group desc not available\n");
2608 return 0;
2609 }
2610
2611 desc = calloc(nr_groups, sizeof(*desc));
2612 if (!desc)
2613 return -1;
2614
2615 for (i = 0; i < nr_groups; i++) {
2616 desc[i].name = do_read_string(ff);
2617 if (!desc[i].name)
2618 goto out_free;
2619
2620 if (do_read_u32(ff, &desc[i].leader_idx))
2621 goto out_free;
2622
2623 if (do_read_u32(ff, &desc[i].nr_members))
2624 goto out_free;
2625 }
2626
2627
2628
2629
2630 session = container_of(ff->ph, struct perf_session, header);
2631 session->evlist->nr_groups = nr_groups;
2632
2633 i = nr = 0;
2634 evlist__for_each_entry(session->evlist, evsel) {
2635 if (evsel->idx == (int) desc[i].leader_idx) {
2636 evsel->leader = evsel;
2637
2638 if (strcmp(desc[i].name, "{anon_group}")) {
2639 evsel->group_name = desc[i].name;
2640 desc[i].name = NULL;
2641 }
2642 evsel->core.nr_members = desc[i].nr_members;
2643
2644 if (i >= nr_groups || nr > 0) {
2645 pr_debug("invalid group desc\n");
2646 goto out_free;
2647 }
2648
2649 leader = evsel;
2650 nr = evsel->core.nr_members - 1;
2651 i++;
2652 } else if (nr) {
2653
2654 evsel->leader = leader;
2655
2656 nr--;
2657 }
2658 }
2659
2660 if (i != nr_groups || nr != 0) {
2661 pr_debug("invalid group desc\n");
2662 goto out_free;
2663 }
2664
2665 ret = 0;
2666out_free:
2667 for (i = 0; i < nr_groups; i++)
2668 zfree(&desc[i].name);
2669 free(desc);
2670
2671 return ret;
2672}
2673
2674static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2675{
2676 struct perf_session *session;
2677 int err;
2678
2679 session = container_of(ff->ph, struct perf_session, header);
2680
2681 err = auxtrace_index__process(ff->fd, ff->size, session,
2682 ff->ph->needs_swap);
2683 if (err < 0)
2684 pr_err("Failed to process auxtrace index\n");
2685 return err;
2686}
2687
2688static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2689{
2690 struct cpu_cache_level *caches;
2691 u32 cnt, i, version;
2692
2693 if (do_read_u32(ff, &version))
2694 return -1;
2695
2696 if (version != 1)
2697 return -1;
2698
2699 if (do_read_u32(ff, &cnt))
2700 return -1;
2701
2702 caches = zalloc(sizeof(*caches) * cnt);
2703 if (!caches)
2704 return -1;
2705
2706 for (i = 0; i < cnt; i++) {
2707 struct cpu_cache_level c;
2708
2709 #define _R(v) \
2710 if (do_read_u32(ff, &c.v))\
2711 goto out_free_caches; \
2712
2713 _R(level)
2714 _R(line_size)
2715 _R(sets)
2716 _R(ways)
2717 #undef _R
2718
2719 #define _R(v) \
2720 c.v = do_read_string(ff); \
2721 if (!c.v) \
2722 goto out_free_caches;
2723
2724 _R(type)
2725 _R(size)
2726 _R(map)
2727 #undef _R
2728
2729 caches[i] = c;
2730 }
2731
2732 ff->ph->env.caches = caches;
2733 ff->ph->env.caches_cnt = cnt;
2734 return 0;
2735out_free_caches:
2736 free(caches);
2737 return -1;
2738}
2739
2740static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2741{
2742 struct perf_session *session;
2743 u64 first_sample_time, last_sample_time;
2744 int ret;
2745
2746 session = container_of(ff->ph, struct perf_session, header);
2747
2748 ret = do_read_u64(ff, &first_sample_time);
2749 if (ret)
2750 return -1;
2751
2752 ret = do_read_u64(ff, &last_sample_time);
2753 if (ret)
2754 return -1;
2755
2756 session->evlist->first_sample_time = first_sample_time;
2757 session->evlist->last_sample_time = last_sample_time;
2758 return 0;
2759}
2760
2761static int process_mem_topology(struct feat_fd *ff,
2762 void *data __maybe_unused)
2763{
2764 struct memory_node *nodes;
2765 u64 version, i, nr, bsize;
2766 int ret = -1;
2767
2768 if (do_read_u64(ff, &version))
2769 return -1;
2770
2771 if (version != 1)
2772 return -1;
2773
2774 if (do_read_u64(ff, &bsize))
2775 return -1;
2776
2777 if (do_read_u64(ff, &nr))
2778 return -1;
2779
2780 nodes = zalloc(sizeof(*nodes) * nr);
2781 if (!nodes)
2782 return -1;
2783
2784 for (i = 0; i < nr; i++) {
2785 struct memory_node n;
2786
2787 #define _R(v) \
2788 if (do_read_u64(ff, &n.v)) \
2789 goto out; \
2790
2791 _R(node)
2792 _R(size)
2793
2794 #undef _R
2795
2796 if (do_read_bitmap(ff, &n.set, &n.size))
2797 goto out;
2798
2799 nodes[i] = n;
2800 }
2801
2802 ff->ph->env.memory_bsize = bsize;
2803 ff->ph->env.memory_nodes = nodes;
2804 ff->ph->env.nr_memory_nodes = nr;
2805 ret = 0;
2806
2807out:
2808 if (ret)
2809 free(nodes);
2810 return ret;
2811}
2812
2813static int process_clockid(struct feat_fd *ff,
2814 void *data __maybe_unused)
2815{
2816 if (do_read_u64(ff, &ff->ph->env.clock.clockid_res_ns))
2817 return -1;
2818
2819 return 0;
2820}
2821
2822static int process_clock_data(struct feat_fd *ff,
2823 void *_data __maybe_unused)
2824{
2825 u32 data32;
2826 u64 data64;
2827
2828
2829 if (do_read_u32(ff, &data32))
2830 return -1;
2831
2832 if (data32 != 1)
2833 return -1;
2834
2835
2836 if (do_read_u32(ff, &data32))
2837 return -1;
2838
2839 ff->ph->env.clock.clockid = data32;
2840
2841
2842 if (do_read_u64(ff, &data64))
2843 return -1;
2844
2845 ff->ph->env.clock.tod_ns = data64;
2846
2847
2848 if (do_read_u64(ff, &data64))
2849 return -1;
2850
2851 ff->ph->env.clock.clockid_ns = data64;
2852 ff->ph->env.clock.enabled = true;
2853 return 0;
2854}
2855
2856static int process_dir_format(struct feat_fd *ff,
2857 void *_data __maybe_unused)
2858{
2859 struct perf_session *session;
2860 struct perf_data *data;
2861
2862 session = container_of(ff->ph, struct perf_session, header);
2863 data = session->data;
2864
2865 if (WARN_ON(!perf_data__is_dir(data)))
2866 return -1;
2867
2868 return do_read_u64(ff, &data->dir.version);
2869}
2870
2871#ifdef HAVE_LIBBPF_SUPPORT
2872static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2873{
2874 struct bpf_prog_info_linear *info_linear;
2875 struct bpf_prog_info_node *info_node;
2876 struct perf_env *env = &ff->ph->env;
2877 u32 count, i;
2878 int err = -1;
2879
2880 if (ff->ph->needs_swap) {
2881 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2882 return 0;
2883 }
2884
2885 if (do_read_u32(ff, &count))
2886 return -1;
2887
2888 down_write(&env->bpf_progs.lock);
2889
2890 for (i = 0; i < count; ++i) {
2891 u32 info_len, data_len;
2892
2893 info_linear = NULL;
2894 info_node = NULL;
2895 if (do_read_u32(ff, &info_len))
2896 goto out;
2897 if (do_read_u32(ff, &data_len))
2898 goto out;
2899
2900 if (info_len > sizeof(struct bpf_prog_info)) {
2901 pr_warning("detected invalid bpf_prog_info\n");
2902 goto out;
2903 }
2904
2905 info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2906 data_len);
2907 if (!info_linear)
2908 goto out;
2909 info_linear->info_len = sizeof(struct bpf_prog_info);
2910 info_linear->data_len = data_len;
2911 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2912 goto out;
2913 if (__do_read(ff, &info_linear->info, info_len))
2914 goto out;
2915 if (info_len < sizeof(struct bpf_prog_info))
2916 memset(((void *)(&info_linear->info)) + info_len, 0,
2917 sizeof(struct bpf_prog_info) - info_len);
2918
2919 if (__do_read(ff, info_linear->data, data_len))
2920 goto out;
2921
2922 info_node = malloc(sizeof(struct bpf_prog_info_node));
2923 if (!info_node)
2924 goto out;
2925
2926
2927 bpf_program__bpil_offs_to_addr(info_linear);
2928 info_node->info_linear = info_linear;
2929 perf_env__insert_bpf_prog_info(env, info_node);
2930 }
2931
2932 up_write(&env->bpf_progs.lock);
2933 return 0;
2934out:
2935 free(info_linear);
2936 free(info_node);
2937 up_write(&env->bpf_progs.lock);
2938 return err;
2939}
2940#else
2941static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2942{
2943 return 0;
2944}
2945#endif
2946
2947static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2948{
2949 struct perf_env *env = &ff->ph->env;
2950 struct btf_node *node = NULL;
2951 u32 count, i;
2952 int err = -1;
2953
2954 if (ff->ph->needs_swap) {
2955 pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2956 return 0;
2957 }
2958
2959 if (do_read_u32(ff, &count))
2960 return -1;
2961
2962 down_write(&env->bpf_progs.lock);
2963
2964 for (i = 0; i < count; ++i) {
2965 u32 id, data_size;
2966
2967 if (do_read_u32(ff, &id))
2968 goto out;
2969 if (do_read_u32(ff, &data_size))
2970 goto out;
2971
2972 node = malloc(sizeof(struct btf_node) + data_size);
2973 if (!node)
2974 goto out;
2975
2976 node->id = id;
2977 node->data_size = data_size;
2978
2979 if (__do_read(ff, node->data, data_size))
2980 goto out;
2981
2982 perf_env__insert_btf(env, node);
2983 node = NULL;
2984 }
2985
2986 err = 0;
2987out:
2988 up_write(&env->bpf_progs.lock);
2989 free(node);
2990 return err;
2991}
2992
2993static int process_compressed(struct feat_fd *ff,
2994 void *data __maybe_unused)
2995{
2996 if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2997 return -1;
2998
2999 if (do_read_u32(ff, &(ff->ph->env.comp_type)))
3000 return -1;
3001
3002 if (do_read_u32(ff, &(ff->ph->env.comp_level)))
3003 return -1;
3004
3005 if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
3006 return -1;
3007
3008 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
3009 return -1;
3010
3011 return 0;
3012}
3013
3014static int process_cpu_pmu_caps(struct feat_fd *ff,
3015 void *data __maybe_unused)
3016{
3017 char *name, *value;
3018 struct strbuf sb;
3019 u32 nr_caps;
3020
3021 if (do_read_u32(ff, &nr_caps))
3022 return -1;
3023
3024 if (!nr_caps) {
3025 pr_debug("cpu pmu capabilities not available\n");
3026 return 0;
3027 }
3028
3029 ff->ph->env.nr_cpu_pmu_caps = nr_caps;
3030
3031 if (strbuf_init(&sb, 128) < 0)
3032 return -1;
3033
3034 while (nr_caps--) {
3035 name = do_read_string(ff);
3036 if (!name)
3037 goto error;
3038
3039 value = do_read_string(ff);
3040 if (!value)
3041 goto free_name;
3042
3043 if (strbuf_addf(&sb, "%s=%s", name, value) < 0)
3044 goto free_value;
3045
3046
3047 if (strbuf_add(&sb, "", 1) < 0)
3048 goto free_value;
3049
3050 if (!strcmp(name, "branches"))
3051 ff->ph->env.max_branches = atoi(value);
3052
3053 free(value);
3054 free(name);
3055 }
3056 ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
3057 return 0;
3058
3059free_value:
3060 free(value);
3061free_name:
3062 free(name);
3063error:
3064 strbuf_release(&sb);
3065 return -1;
3066}
3067
3068#define FEAT_OPR(n, func, __full_only) \
3069 [HEADER_##n] = { \
3070 .name = __stringify(n), \
3071 .write = write_##func, \
3072 .print = print_##func, \
3073 .full_only = __full_only, \
3074 .process = process_##func, \
3075 .synthesize = true \
3076 }
3077
3078#define FEAT_OPN(n, func, __full_only) \
3079 [HEADER_##n] = { \
3080 .name = __stringify(n), \
3081 .write = write_##func, \
3082 .print = print_##func, \
3083 .full_only = __full_only, \
3084 .process = process_##func \
3085 }
3086
3087
3088#define print_tracing_data NULL
3089#define print_build_id NULL
3090
3091#define process_branch_stack NULL
3092#define process_stat NULL
3093
3094
3095const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
3096
3097const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
3098 FEAT_OPN(TRACING_DATA, tracing_data, false),
3099 FEAT_OPN(BUILD_ID, build_id, false),
3100 FEAT_OPR(HOSTNAME, hostname, false),
3101 FEAT_OPR(OSRELEASE, osrelease, false),
3102 FEAT_OPR(VERSION, version, false),
3103 FEAT_OPR(ARCH, arch, false),
3104 FEAT_OPR(NRCPUS, nrcpus, false),
3105 FEAT_OPR(CPUDESC, cpudesc, false),
3106 FEAT_OPR(CPUID, cpuid, false),
3107 FEAT_OPR(TOTAL_MEM, total_mem, false),
3108 FEAT_OPR(EVENT_DESC, event_desc, false),
3109 FEAT_OPR(CMDLINE, cmdline, false),
3110 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
3111 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
3112 FEAT_OPN(BRANCH_STACK, branch_stack, false),
3113 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
3114 FEAT_OPR(GROUP_DESC, group_desc, false),
3115 FEAT_OPN(AUXTRACE, auxtrace, false),
3116 FEAT_OPN(STAT, stat, false),
3117 FEAT_OPN(CACHE, cache, true),
3118 FEAT_OPR(SAMPLE_TIME, sample_time, false),
3119 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
3120 FEAT_OPR(CLOCKID, clockid, false),
3121 FEAT_OPN(DIR_FORMAT, dir_format, false),
3122 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
3123 FEAT_OPR(BPF_BTF, bpf_btf, false),
3124 FEAT_OPR(COMPRESSED, compressed, false),
3125 FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
3126 FEAT_OPR(CLOCK_DATA, clock_data, false),
3127};
3128
3129struct header_print_data {
3130 FILE *fp;
3131 bool full;
3132};
3133
3134static int perf_file_section__fprintf_info(struct perf_file_section *section,
3135 struct perf_header *ph,
3136 int feat, int fd, void *data)
3137{
3138 struct header_print_data *hd = data;
3139 struct feat_fd ff;
3140
3141 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3142 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3143 "%d, continuing...\n", section->offset, feat);
3144 return 0;
3145 }
3146 if (feat >= HEADER_LAST_FEATURE) {
3147 pr_warning("unknown feature %d\n", feat);
3148 return 0;
3149 }
3150 if (!feat_ops[feat].print)
3151 return 0;
3152
3153 ff = (struct feat_fd) {
3154 .fd = fd,
3155 .ph = ph,
3156 };
3157
3158 if (!feat_ops[feat].full_only || hd->full)
3159 feat_ops[feat].print(&ff, hd->fp);
3160 else
3161 fprintf(hd->fp, "# %s info available, use -I to display\n",
3162 feat_ops[feat].name);
3163
3164 return 0;
3165}
3166
3167int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
3168{
3169 struct header_print_data hd;
3170 struct perf_header *header = &session->header;
3171 int fd = perf_data__fd(session->data);
3172 struct stat st;
3173 time_t stctime;
3174 int ret, bit;
3175
3176 hd.fp = fp;
3177 hd.full = full;
3178
3179 ret = fstat(fd, &st);
3180 if (ret == -1)
3181 return -1;
3182
3183 stctime = st.st_mtime;
3184 fprintf(fp, "# captured on : %s", ctime(&stctime));
3185
3186 fprintf(fp, "# header version : %u\n", header->version);
3187 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
3188 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
3189 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
3190
3191 perf_header__process_sections(header, fd, &hd,
3192 perf_file_section__fprintf_info);
3193
3194 if (session->data->is_pipe)
3195 return 0;
3196
3197 fprintf(fp, "# missing features: ");
3198 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
3199 if (bit)
3200 fprintf(fp, "%s ", feat_ops[bit].name);
3201 }
3202
3203 fprintf(fp, "\n");
3204 return 0;
3205}
3206
3207static int do_write_feat(struct feat_fd *ff, int type,
3208 struct perf_file_section **p,
3209 struct evlist *evlist)
3210{
3211 int err;
3212 int ret = 0;
3213
3214 if (perf_header__has_feat(ff->ph, type)) {
3215 if (!feat_ops[type].write)
3216 return -1;
3217
3218 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
3219 return -1;
3220
3221 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
3222
3223 err = feat_ops[type].write(ff, evlist);
3224 if (err < 0) {
3225 pr_debug("failed to write feature %s\n", feat_ops[type].name);
3226
3227
3228 lseek(ff->fd, (*p)->offset, SEEK_SET);
3229
3230 return -1;
3231 }
3232 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
3233 (*p)++;
3234 }
3235 return ret;
3236}
3237
3238static int perf_header__adds_write(struct perf_header *header,
3239 struct evlist *evlist, int fd)
3240{
3241 int nr_sections;
3242 struct feat_fd ff;
3243 struct perf_file_section *feat_sec, *p;
3244 int sec_size;
3245 u64 sec_start;
3246 int feat;
3247 int err;
3248
3249 ff = (struct feat_fd){
3250 .fd = fd,
3251 .ph = header,
3252 };
3253
3254 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3255 if (!nr_sections)
3256 return 0;
3257
3258 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3259 if (feat_sec == NULL)
3260 return -ENOMEM;
3261
3262 sec_size = sizeof(*feat_sec) * nr_sections;
3263
3264 sec_start = header->feat_offset;
3265 lseek(fd, sec_start + sec_size, SEEK_SET);
3266
3267 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3268 if (do_write_feat(&ff, feat, &p, evlist))
3269 perf_header__clear_feat(header, feat);
3270 }
3271
3272 lseek(fd, sec_start, SEEK_SET);
3273
3274
3275
3276
3277 err = do_write(&ff, feat_sec, sec_size);
3278 if (err < 0)
3279 pr_debug("failed to write feature section\n");
3280 free(feat_sec);
3281 return err;
3282}
3283
3284int perf_header__write_pipe(int fd)
3285{
3286 struct perf_pipe_file_header f_header;
3287 struct feat_fd ff;
3288 int err;
3289
3290 ff = (struct feat_fd){ .fd = fd };
3291
3292 f_header = (struct perf_pipe_file_header){
3293 .magic = PERF_MAGIC,
3294 .size = sizeof(f_header),
3295 };
3296
3297 err = do_write(&ff, &f_header, sizeof(f_header));
3298 if (err < 0) {
3299 pr_debug("failed to write perf pipe header\n");
3300 return err;
3301 }
3302
3303 return 0;
3304}
3305
3306int perf_session__write_header(struct perf_session *session,
3307 struct evlist *evlist,
3308 int fd, bool at_exit)
3309{
3310 struct perf_file_header f_header;
3311 struct perf_file_attr f_attr;
3312 struct perf_header *header = &session->header;
3313 struct evsel *evsel;
3314 struct feat_fd ff;
3315 u64 attr_offset;
3316 int err;
3317
3318 ff = (struct feat_fd){ .fd = fd};
3319 lseek(fd, sizeof(f_header), SEEK_SET);
3320
3321 evlist__for_each_entry(session->evlist, evsel) {
3322 evsel->id_offset = lseek(fd, 0, SEEK_CUR);
3323 err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
3324 if (err < 0) {
3325 pr_debug("failed to write perf header\n");
3326 return err;
3327 }
3328 }
3329
3330 attr_offset = lseek(ff.fd, 0, SEEK_CUR);
3331
3332 evlist__for_each_entry(evlist, evsel) {
3333 f_attr = (struct perf_file_attr){
3334 .attr = evsel->core.attr,
3335 .ids = {
3336 .offset = evsel->id_offset,
3337 .size = evsel->core.ids * sizeof(u64),
3338 }
3339 };
3340 err = do_write(&ff, &f_attr, sizeof(f_attr));
3341 if (err < 0) {
3342 pr_debug("failed to write perf header attribute\n");
3343 return err;
3344 }
3345 }
3346
3347 if (!header->data_offset)
3348 header->data_offset = lseek(fd, 0, SEEK_CUR);
3349 header->feat_offset = header->data_offset + header->data_size;
3350
3351 if (at_exit) {
3352 err = perf_header__adds_write(header, evlist, fd);
3353 if (err < 0)
3354 return err;
3355 }
3356
3357 f_header = (struct perf_file_header){
3358 .magic = PERF_MAGIC,
3359 .size = sizeof(f_header),
3360 .attr_size = sizeof(f_attr),
3361 .attrs = {
3362 .offset = attr_offset,
3363 .size = evlist->core.nr_entries * sizeof(f_attr),
3364 },
3365 .data = {
3366 .offset = header->data_offset,
3367 .size = header->data_size,
3368 },
3369
3370 };
3371
3372 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3373
3374 lseek(fd, 0, SEEK_SET);
3375 err = do_write(&ff, &f_header, sizeof(f_header));
3376 if (err < 0) {
3377 pr_debug("failed to write perf header\n");
3378 return err;
3379 }
3380 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
3381
3382 return 0;
3383}
3384
3385static int perf_header__getbuffer64(struct perf_header *header,
3386 int fd, void *buf, size_t size)
3387{
3388 if (readn(fd, buf, size) <= 0)
3389 return -1;
3390
3391 if (header->needs_swap)
3392 mem_bswap_64(buf, size);
3393
3394 return 0;
3395}
3396
3397int perf_header__process_sections(struct perf_header *header, int fd,
3398 void *data,
3399 int (*process)(struct perf_file_section *section,
3400 struct perf_header *ph,
3401 int feat, int fd, void *data))
3402{
3403 struct perf_file_section *feat_sec, *sec;
3404 int nr_sections;
3405 int sec_size;
3406 int feat;
3407 int err;
3408
3409 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3410 if (!nr_sections)
3411 return 0;
3412
3413 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3414 if (!feat_sec)
3415 return -1;
3416
3417 sec_size = sizeof(*feat_sec) * nr_sections;
3418
3419 lseek(fd, header->feat_offset, SEEK_SET);
3420
3421 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3422 if (err < 0)
3423 goto out_free;
3424
3425 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3426 err = process(sec++, header, feat, fd, data);
3427 if (err < 0)
3428 goto out_free;
3429 }
3430 err = 0;
3431out_free:
3432 free(feat_sec);
3433 return err;
3434}
3435
3436static const int attr_file_abi_sizes[] = {
3437 [0] = PERF_ATTR_SIZE_VER0,
3438 [1] = PERF_ATTR_SIZE_VER1,
3439 [2] = PERF_ATTR_SIZE_VER2,
3440 [3] = PERF_ATTR_SIZE_VER3,
3441 [4] = PERF_ATTR_SIZE_VER4,
3442 0,
3443};
3444
3445
3446
3447
3448
3449
3450
3451static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3452{
3453 uint64_t ref_size, attr_size;
3454 int i;
3455
3456 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3457 ref_size = attr_file_abi_sizes[i]
3458 + sizeof(struct perf_file_section);
3459 if (hdr_sz != ref_size) {
3460 attr_size = bswap_64(hdr_sz);
3461 if (attr_size != ref_size)
3462 continue;
3463
3464 ph->needs_swap = true;
3465 }
3466 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3467 i,
3468 ph->needs_swap);
3469 return 0;
3470 }
3471
3472 return -1;
3473}
3474
3475#define PERF_PIPE_HDR_VER0 16
3476
3477static const size_t attr_pipe_abi_sizes[] = {
3478 [0] = PERF_PIPE_HDR_VER0,
3479 0,
3480};
3481
3482
3483
3484
3485
3486
3487
3488
3489static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3490{
3491 u64 attr_size;
3492 int i;
3493
3494 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3495 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3496 attr_size = bswap_64(hdr_sz);
3497 if (attr_size != hdr_sz)
3498 continue;
3499
3500 ph->needs_swap = true;
3501 }
3502 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3503 return 0;
3504 }
3505 return -1;
3506}
3507
3508bool is_perf_magic(u64 magic)
3509{
3510 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3511 || magic == __perf_magic2
3512 || magic == __perf_magic2_sw)
3513 return true;
3514
3515 return false;
3516}
3517
3518static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3519 bool is_pipe, struct perf_header *ph)
3520{
3521 int ret;
3522
3523
3524 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3525 if (ret == 0) {
3526 ph->version = PERF_HEADER_VERSION_1;
3527 pr_debug("legacy perf.data format\n");
3528 if (is_pipe)
3529 return try_all_pipe_abis(hdr_sz, ph);
3530
3531 return try_all_file_abis(hdr_sz, ph);
3532 }
3533
3534
3535
3536
3537
3538 ph->version = PERF_HEADER_VERSION_2;
3539
3540
3541 if (magic == __perf_magic2)
3542 return 0;
3543
3544
3545 if (magic != __perf_magic2_sw)
3546 return -1;
3547
3548 ph->needs_swap = true;
3549
3550 return 0;
3551}
3552
3553int perf_file_header__read(struct perf_file_header *header,
3554 struct perf_header *ph, int fd)
3555{
3556 ssize_t ret;
3557
3558 lseek(fd, 0, SEEK_SET);
3559
3560 ret = readn(fd, header, sizeof(*header));
3561 if (ret <= 0)
3562 return -1;
3563
3564 if (check_magic_endian(header->magic,
3565 header->attr_size, false, ph) < 0) {
3566 pr_debug("magic/endian check failed\n");
3567 return -1;
3568 }
3569
3570 if (ph->needs_swap) {
3571 mem_bswap_64(header, offsetof(struct perf_file_header,
3572 adds_features));
3573 }
3574
3575 if (header->size != sizeof(*header)) {
3576
3577 if (header->size == offsetof(typeof(*header), adds_features))
3578 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3579 else
3580 return -1;
3581 } else if (ph->needs_swap) {
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597 mem_bswap_64(&header->adds_features,
3598 BITS_TO_U64(HEADER_FEAT_BITS));
3599
3600 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3601
3602 mem_bswap_64(&header->adds_features,
3603 BITS_TO_U64(HEADER_FEAT_BITS));
3604
3605
3606 mem_bswap_32(&header->adds_features,
3607 BITS_TO_U32(HEADER_FEAT_BITS));
3608 }
3609
3610 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3611 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3612 set_bit(HEADER_BUILD_ID, header->adds_features);
3613 }
3614 }
3615
3616 memcpy(&ph->adds_features, &header->adds_features,
3617 sizeof(ph->adds_features));
3618
3619 ph->data_offset = header->data.offset;
3620 ph->data_size = header->data.size;
3621 ph->feat_offset = header->data.offset + header->data.size;
3622 return 0;
3623}
3624
3625static int perf_file_section__process(struct perf_file_section *section,
3626 struct perf_header *ph,
3627 int feat, int fd, void *data)
3628{
3629 struct feat_fd fdd = {
3630 .fd = fd,
3631 .ph = ph,
3632 .size = section->size,
3633 .offset = section->offset,
3634 };
3635
3636 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3637 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3638 "%d, continuing...\n", section->offset, feat);
3639 return 0;
3640 }
3641
3642 if (feat >= HEADER_LAST_FEATURE) {
3643 pr_debug("unknown feature %d, continuing...\n", feat);
3644 return 0;
3645 }
3646
3647 if (!feat_ops[feat].process)
3648 return 0;
3649
3650 return feat_ops[feat].process(&fdd, data);
3651}
3652
3653static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3654 struct perf_header *ph, int fd,
3655 bool repipe)
3656{
3657 struct feat_fd ff = {
3658 .fd = STDOUT_FILENO,
3659 .ph = ph,
3660 };
3661 ssize_t ret;
3662
3663 ret = readn(fd, header, sizeof(*header));
3664 if (ret <= 0)
3665 return -1;
3666
3667 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3668 pr_debug("endian/magic failed\n");
3669 return -1;
3670 }
3671
3672 if (ph->needs_swap)
3673 header->size = bswap_64(header->size);
3674
3675 if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3676 return -1;
3677
3678 return 0;
3679}
3680
3681static int perf_header__read_pipe(struct perf_session *session)
3682{
3683 struct perf_header *header = &session->header;
3684 struct perf_pipe_file_header f_header;
3685
3686 if (perf_file_header__read_pipe(&f_header, header,
3687 perf_data__fd(session->data),
3688 session->repipe) < 0) {
3689 pr_debug("incompatible file format\n");
3690 return -EINVAL;
3691 }
3692
3693 return f_header.size == sizeof(f_header) ? 0 : -1;
3694}
3695
3696static int read_attr(int fd, struct perf_header *ph,
3697 struct perf_file_attr *f_attr)
3698{
3699 struct perf_event_attr *attr = &f_attr->attr;
3700 size_t sz, left;
3701 size_t our_sz = sizeof(f_attr->attr);
3702 ssize_t ret;
3703
3704 memset(f_attr, 0, sizeof(*f_attr));
3705
3706
3707 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3708 if (ret <= 0) {
3709 pr_debug("cannot read %d bytes of header attr\n",
3710 PERF_ATTR_SIZE_VER0);
3711 return -1;
3712 }
3713
3714
3715 sz = attr->size;
3716
3717 if (ph->needs_swap)
3718 sz = bswap_32(sz);
3719
3720 if (sz == 0) {
3721
3722 sz = PERF_ATTR_SIZE_VER0;
3723 } else if (sz > our_sz) {
3724 pr_debug("file uses a more recent and unsupported ABI"
3725 " (%zu bytes extra)\n", sz - our_sz);
3726 return -1;
3727 }
3728
3729 left = sz - PERF_ATTR_SIZE_VER0;
3730 if (left) {
3731 void *ptr = attr;
3732 ptr += PERF_ATTR_SIZE_VER0;
3733
3734 ret = readn(fd, ptr, left);
3735 }
3736
3737 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3738
3739 return ret <= 0 ? -1 : 0;
3740}
3741
3742static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel,
3743 struct tep_handle *pevent)
3744{
3745 struct tep_event *event;
3746 char bf[128];
3747
3748
3749 if (evsel->tp_format)
3750 return 0;
3751
3752 if (pevent == NULL) {
3753 pr_debug("broken or missing trace data\n");
3754 return -1;
3755 }
3756
3757 event = tep_find_event(pevent, evsel->core.attr.config);
3758 if (event == NULL) {
3759 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
3760 return -1;
3761 }
3762
3763 if (!evsel->name) {
3764 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3765 evsel->name = strdup(bf);
3766 if (evsel->name == NULL)
3767 return -1;
3768 }
3769
3770 evsel->tp_format = event;
3771 return 0;
3772}
3773
3774static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist,
3775 struct tep_handle *pevent)
3776{
3777 struct evsel *pos;
3778
3779 evlist__for_each_entry(evlist, pos) {
3780 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
3781 perf_evsel__prepare_tracepoint_event(pos, pevent))
3782 return -1;
3783 }
3784
3785 return 0;
3786}
3787
3788int perf_session__read_header(struct perf_session *session)
3789{
3790 struct perf_data *data = session->data;
3791 struct perf_header *header = &session->header;
3792 struct perf_file_header f_header;
3793 struct perf_file_attr f_attr;
3794 u64 f_id;
3795 int nr_attrs, nr_ids, i, j, err;
3796 int fd = perf_data__fd(data);
3797
3798 session->evlist = evlist__new();
3799 if (session->evlist == NULL)
3800 return -ENOMEM;
3801
3802 session->evlist->env = &header->env;
3803 session->machines.host.env = &header->env;
3804
3805
3806
3807
3808
3809 err = perf_header__read_pipe(session);
3810 if (!err || (err && perf_data__is_pipe(data))) {
3811 data->is_pipe = true;
3812 return err;
3813 }
3814
3815 if (perf_file_header__read(&f_header, header, fd) < 0)
3816 return -EINVAL;
3817
3818
3819
3820
3821
3822
3823
3824 if (f_header.data.size == 0) {
3825 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3826 "Was the 'perf record' command properly terminated?\n",
3827 data->file.path);
3828 }
3829
3830 if (f_header.attr_size == 0) {
3831 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3832 "Was the 'perf record' command properly terminated?\n",
3833 data->file.path);
3834 return -EINVAL;
3835 }
3836
3837 nr_attrs = f_header.attrs.size / f_header.attr_size;
3838 lseek(fd, f_header.attrs.offset, SEEK_SET);
3839
3840 for (i = 0; i < nr_attrs; i++) {
3841 struct evsel *evsel;
3842 off_t tmp;
3843
3844 if (read_attr(fd, header, &f_attr) < 0)
3845 goto out_errno;
3846
3847 if (header->needs_swap) {
3848 f_attr.ids.size = bswap_64(f_attr.ids.size);
3849 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3850 perf_event__attr_swap(&f_attr.attr);
3851 }
3852
3853 tmp = lseek(fd, 0, SEEK_CUR);
3854 evsel = evsel__new(&f_attr.attr);
3855
3856 if (evsel == NULL)
3857 goto out_delete_evlist;
3858
3859 evsel->needs_swap = header->needs_swap;
3860
3861
3862
3863
3864 evlist__add(session->evlist, evsel);
3865
3866 nr_ids = f_attr.ids.size / sizeof(u64);
3867
3868
3869
3870
3871
3872 if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
3873 goto out_delete_evlist;
3874
3875 lseek(fd, f_attr.ids.offset, SEEK_SET);
3876
3877 for (j = 0; j < nr_ids; j++) {
3878 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3879 goto out_errno;
3880
3881 perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
3882 }
3883
3884 lseek(fd, tmp, SEEK_SET);
3885 }
3886
3887 perf_header__process_sections(header, fd, &session->tevent,
3888 perf_file_section__process);
3889
3890 if (perf_evlist__prepare_tracepoint_events(session->evlist,
3891 session->tevent.pevent))
3892 goto out_delete_evlist;
3893
3894 return 0;
3895out_errno:
3896 return -errno;
3897
3898out_delete_evlist:
3899 evlist__delete(session->evlist);
3900 session->evlist = NULL;
3901 return -ENOMEM;
3902}
3903
3904int perf_event__process_feature(struct perf_session *session,
3905 union perf_event *event)
3906{
3907 struct perf_tool *tool = session->tool;
3908 struct feat_fd ff = { .fd = 0 };
3909 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
3910 int type = fe->header.type;
3911 u64 feat = fe->feat_id;
3912
3913 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3914 pr_warning("invalid record type %d in pipe-mode\n", type);
3915 return 0;
3916 }
3917 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3918 pr_warning("invalid record type %d in pipe-mode\n", type);
3919 return -1;
3920 }
3921
3922 if (!feat_ops[feat].process)
3923 return 0;
3924
3925 ff.buf = (void *)fe->data;
3926 ff.size = event->header.size - sizeof(*fe);
3927 ff.ph = &session->header;
3928
3929 if (feat_ops[feat].process(&ff, NULL))
3930 return -1;
3931
3932 if (!feat_ops[feat].print || !tool->show_feat_hdr)
3933 return 0;
3934
3935 if (!feat_ops[feat].full_only ||
3936 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3937 feat_ops[feat].print(&ff, stdout);
3938 } else {
3939 fprintf(stdout, "# %s info available, use -I to display\n",
3940 feat_ops[feat].name);
3941 }
3942
3943 return 0;
3944}
3945
3946size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3947{
3948 struct perf_record_event_update *ev = &event->event_update;
3949 struct perf_record_event_update_scale *ev_scale;
3950 struct perf_record_event_update_cpus *ev_cpus;
3951 struct perf_cpu_map *map;
3952 size_t ret;
3953
3954 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
3955
3956 switch (ev->type) {
3957 case PERF_EVENT_UPDATE__SCALE:
3958 ev_scale = (struct perf_record_event_update_scale *)ev->data;
3959 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3960 break;
3961 case PERF_EVENT_UPDATE__UNIT:
3962 ret += fprintf(fp, "... unit: %s\n", ev->data);
3963 break;
3964 case PERF_EVENT_UPDATE__NAME:
3965 ret += fprintf(fp, "... name: %s\n", ev->data);
3966 break;
3967 case PERF_EVENT_UPDATE__CPUS:
3968 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
3969 ret += fprintf(fp, "... ");
3970
3971 map = cpu_map__new_data(&ev_cpus->cpus);
3972 if (map)
3973 ret += cpu_map__fprintf(map, fp);
3974 else
3975 ret += fprintf(fp, "failed to get cpus\n");
3976 break;
3977 default:
3978 ret += fprintf(fp, "... unknown type\n");
3979 break;
3980 }
3981
3982 return ret;
3983}
3984
3985int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3986 union perf_event *event,
3987 struct evlist **pevlist)
3988{
3989 u32 i, ids, n_ids;
3990 struct evsel *evsel;
3991 struct evlist *evlist = *pevlist;
3992
3993 if (evlist == NULL) {
3994 *pevlist = evlist = evlist__new();
3995 if (evlist == NULL)
3996 return -ENOMEM;
3997 }
3998
3999 evsel = evsel__new(&event->attr.attr);
4000 if (evsel == NULL)
4001 return -ENOMEM;
4002
4003 evlist__add(evlist, evsel);
4004
4005 ids = event->header.size;
4006 ids -= (void *)&event->attr.id - (void *)event;
4007 n_ids = ids / sizeof(u64);
4008
4009
4010
4011
4012
4013 if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
4014 return -ENOMEM;
4015
4016 for (i = 0; i < n_ids; i++) {
4017 perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, event->attr.id[i]);
4018 }
4019
4020 return 0;
4021}
4022
4023int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4024 union perf_event *event,
4025 struct evlist **pevlist)
4026{
4027 struct perf_record_event_update *ev = &event->event_update;
4028 struct perf_record_event_update_scale *ev_scale;
4029 struct perf_record_event_update_cpus *ev_cpus;
4030 struct evlist *evlist;
4031 struct evsel *evsel;
4032 struct perf_cpu_map *map;
4033
4034 if (!pevlist || *pevlist == NULL)
4035 return -EINVAL;
4036
4037 evlist = *pevlist;
4038
4039 evsel = perf_evlist__id2evsel(evlist, ev->id);
4040 if (evsel == NULL)
4041 return -EINVAL;
4042
4043 switch (ev->type) {
4044 case PERF_EVENT_UPDATE__UNIT:
4045 evsel->unit = strdup(ev->data);
4046 break;
4047 case PERF_EVENT_UPDATE__NAME:
4048 evsel->name = strdup(ev->data);
4049 break;
4050 case PERF_EVENT_UPDATE__SCALE:
4051 ev_scale = (struct perf_record_event_update_scale *)ev->data;
4052 evsel->scale = ev_scale->scale;
4053 break;
4054 case PERF_EVENT_UPDATE__CPUS:
4055 ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
4056
4057 map = cpu_map__new_data(&ev_cpus->cpus);
4058 if (map)
4059 evsel->core.own_cpus = map;
4060 else
4061 pr_err("failed to get event_update cpus\n");
4062 default:
4063 break;
4064 }
4065
4066 return 0;
4067}
4068
4069int perf_event__process_tracing_data(struct perf_session *session,
4070 union perf_event *event)
4071{
4072 ssize_t size_read, padding, size = event->tracing_data.size;
4073 int fd = perf_data__fd(session->data);
4074 char buf[BUFSIZ];
4075
4076
4077
4078
4079
4080
4081
4082
4083 if (!perf_data__is_pipe(session->data)) {
4084 off_t offset = lseek(fd, 0, SEEK_CUR);
4085
4086
4087 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
4088 SEEK_SET);
4089 }
4090
4091 size_read = trace_report(fd, &session->tevent,
4092 session->repipe);
4093 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4094
4095 if (readn(fd, buf, padding) < 0) {
4096 pr_err("%s: reading input file", __func__);
4097 return -1;
4098 }
4099 if (session->repipe) {
4100 int retw = write(STDOUT_FILENO, buf, padding);
4101 if (retw <= 0 || retw != padding) {
4102 pr_err("%s: repiping tracing data padding", __func__);
4103 return -1;
4104 }
4105 }
4106
4107 if (size_read + padding != size) {
4108 pr_err("%s: tracing data size mismatch", __func__);
4109 return -1;
4110 }
4111
4112 perf_evlist__prepare_tracepoint_events(session->evlist,
4113 session->tevent.pevent);
4114
4115 return size_read + padding;
4116}
4117
4118int perf_event__process_build_id(struct perf_session *session,
4119 union perf_event *event)
4120{
4121 __event_process_build_id(&event->build_id,
4122 event->build_id.filename,
4123 session);
4124 return 0;
4125}
4126