1#include "perf.h"
2#include "util/debug.h"
3#include "util/event.h"
4#include "util/symbol.h"
5#include "util/sort.h"
6#include "util/evsel.h"
7#include "util/evlist.h"
8#include "util/machine.h"
9#include "util/thread.h"
10#include "util/parse-events.h"
11#include "tests/tests.h"
12#include "tests/hists_common.h"
13#include <linux/kernel.h>
14
15struct sample {
16 u32 pid;
17 u64 ip;
18 struct thread *thread;
19 struct map *map;
20 struct symbol *sym;
21};
22
23
24static struct sample fake_samples[] = {
25
26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
27
28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
29
30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
31
32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
33
34 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
35
36 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
37
38 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
39
40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
41
42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
43
44 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
45};
46
47
48
49
50
51static u64 fake_callchains[][10] = {
52
53 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
54
55 { 1, FAKE_IP_PERF_MAIN, },
56
57 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
58
59 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
60 FAKE_IP_PERF_MAIN, },
61
62 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
63 FAKE_IP_PERF_MAIN, },
64
65 { 1, FAKE_IP_PERF_MAIN, },
66
67 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
68 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
69
70 { 1, FAKE_IP_BASH_MAIN, },
71
72 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
73 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
74
75 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
76};
77
78static int add_hist_entries(struct hists *hists, struct machine *machine)
79{
80 struct addr_location al;
81 struct perf_evsel *evsel = hists_to_evsel(hists);
82 struct perf_sample sample = { .period = 1000, };
83 size_t i;
84
85 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
86 struct hist_entry_iter iter = {
87 .evsel = evsel,
88 .sample = &sample,
89 .hide_unresolved = false,
90 };
91
92 if (symbol_conf.cumulate_callchain)
93 iter.ops = &hist_iter_cumulative;
94 else
95 iter.ops = &hist_iter_normal;
96
97 sample.cpumode = PERF_RECORD_MISC_USER;
98 sample.pid = fake_samples[i].pid;
99 sample.tid = fake_samples[i].pid;
100 sample.ip = fake_samples[i].ip;
101 sample.callchain = (struct ip_callchain *)fake_callchains[i];
102
103 if (machine__resolve(machine, &al, &sample) < 0)
104 goto out;
105
106 if (hist_entry_iter__add(&iter, &al, sysctl_perf_event_max_stack,
107 NULL) < 0) {
108 addr_location__put(&al);
109 goto out;
110 }
111
112 fake_samples[i].thread = al.thread;
113 fake_samples[i].map = al.map;
114 fake_samples[i].sym = al.sym;
115 }
116
117 return TEST_OK;
118
119out:
120 pr_debug("Not enough memory for adding a hist entry\n");
121 return TEST_FAIL;
122}
123
124static void del_hist_entries(struct hists *hists)
125{
126 struct hist_entry *he;
127 struct rb_root *root_in;
128 struct rb_root *root_out;
129 struct rb_node *node;
130
131 if (hists__has(hists, need_collapse))
132 root_in = &hists->entries_collapsed;
133 else
134 root_in = hists->entries_in;
135
136 root_out = &hists->entries;
137
138 while (!RB_EMPTY_ROOT(root_out)) {
139 node = rb_first(root_out);
140
141 he = rb_entry(node, struct hist_entry, rb_node);
142 rb_erase(node, root_out);
143 rb_erase(&he->rb_node_in, root_in);
144 hist_entry__delete(he);
145 }
146}
147
148typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
149
150#define COMM(he) (thread__comm_str(he->thread))
151#define DSO(he) (he->ms.map->dso->short_name)
152#define SYM(he) (he->ms.sym->name)
153#define CPU(he) (he->cpu)
154#define PID(he) (he->thread->tid)
155#define DEPTH(he) (he->callchain->max_depth)
156#define CDSO(cl) (cl->ms.map->dso->short_name)
157#define CSYM(cl) (cl->ms.sym->name)
158
159struct result {
160 u64 children;
161 u64 self;
162 const char *comm;
163 const char *dso;
164 const char *sym;
165};
166
167struct callchain_result {
168 u64 nr;
169 struct {
170 const char *dso;
171 const char *sym;
172 } node[10];
173};
174
175static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
176 struct callchain_result *expected_callchain, size_t nr_callchain)
177{
178 char buf[32];
179 size_t i, c;
180 struct hist_entry *he;
181 struct rb_root *root;
182 struct rb_node *node;
183 struct callchain_node *cnode;
184 struct callchain_list *clist;
185
186
187
188
189
190 hists__collapse_resort(hists, NULL);
191 perf_evsel__output_resort(hists_to_evsel(hists), NULL);
192
193 if (verbose > 2) {
194 pr_info("use callchain: %d, cumulate callchain: %d\n",
195 symbol_conf.use_callchain,
196 symbol_conf.cumulate_callchain);
197 print_hists_out(hists);
198 }
199
200 root = &hists->entries;
201 for (node = rb_first(root), i = 0;
202 node && (he = rb_entry(node, struct hist_entry, rb_node));
203 node = rb_next(node), i++) {
204 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
205
206 TEST_ASSERT_VAL("Incorrect number of hist entry",
207 i < nr_expected);
208 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
209 !strcmp(COMM(he), expected[i].comm) &&
210 !strcmp(DSO(he), expected[i].dso) &&
211 !strcmp(SYM(he), expected[i].sym));
212
213 if (symbol_conf.cumulate_callchain)
214 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
215
216 if (!symbol_conf.use_callchain)
217 continue;
218
219
220 root = &he->callchain->node.rb_root;
221
222 TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
223 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
224
225 c = 0;
226 list_for_each_entry(clist, &cnode->val, list) {
227 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
228
229 TEST_ASSERT_VAL("Incorrect number of callchain entry",
230 c < expected_callchain[i].nr);
231 TEST_ASSERT_VAL(buf,
232 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
233 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
234 c++;
235 }
236
237 TEST_ASSERT_VAL("Incorrect number of callchain entry",
238 c <= expected_callchain[i].nr);
239 }
240 TEST_ASSERT_VAL("Incorrect number of hist entry",
241 i == nr_expected);
242 TEST_ASSERT_VAL("Incorrect number of callchain entry",
243 !symbol_conf.use_callchain || nr_expected == nr_callchain);
244 return 0;
245}
246
247
248static int test1(struct perf_evsel *evsel, struct machine *machine)
249{
250 int err;
251 struct hists *hists = evsel__hists(evsel);
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267 struct result expected[] = {
268 { 0, 2000, "perf", "perf", "main" },
269 { 0, 1000, "bash", "[kernel]", "page_fault" },
270 { 0, 1000, "bash", "bash", "main" },
271 { 0, 1000, "bash", "bash", "xmalloc" },
272 { 0, 1000, "perf", "[kernel]", "page_fault" },
273 { 0, 1000, "perf", "[kernel]", "schedule" },
274 { 0, 1000, "perf", "libc", "free" },
275 { 0, 1000, "perf", "libc", "malloc" },
276 { 0, 1000, "perf", "perf", "cmd_record" },
277 };
278
279 symbol_conf.use_callchain = false;
280 symbol_conf.cumulate_callchain = false;
281 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
282
283 setup_sorting(NULL);
284 callchain_register_param(&callchain_param);
285
286 err = add_hist_entries(hists, machine);
287 if (err < 0)
288 goto out;
289
290 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
291
292out:
293 del_hist_entries(hists);
294 reset_output_field();
295 return err;
296}
297
298
299static int test2(struct perf_evsel *evsel, struct machine *machine)
300{
301 int err;
302 struct hists *hists = evsel__hists(evsel);
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365 struct result expected[] = {
366 { 0, 2000, "perf", "perf", "main" },
367 { 0, 1000, "bash", "[kernel]", "page_fault" },
368 { 0, 1000, "bash", "bash", "main" },
369 { 0, 1000, "bash", "bash", "xmalloc" },
370 { 0, 1000, "perf", "[kernel]", "page_fault" },
371 { 0, 1000, "perf", "[kernel]", "schedule" },
372 { 0, 1000, "perf", "libc", "free" },
373 { 0, 1000, "perf", "libc", "malloc" },
374 { 0, 1000, "perf", "perf", "cmd_record" },
375 };
376 struct callchain_result expected_callchain[] = {
377 {
378 1, { { "perf", "main" }, },
379 },
380 {
381 3, { { "[kernel]", "page_fault" },
382 { "libc", "malloc" },
383 { "bash", "main" }, },
384 },
385 {
386 1, { { "bash", "main" }, },
387 },
388 {
389 6, { { "bash", "xmalloc" },
390 { "libc", "malloc" },
391 { "bash", "xmalloc" },
392 { "libc", "malloc" },
393 { "bash", "xmalloc" },
394 { "bash", "main" }, },
395 },
396 {
397 4, { { "[kernel]", "page_fault" },
398 { "[kernel]", "sys_perf_event_open" },
399 { "perf", "run_command" },
400 { "perf", "main" }, },
401 },
402 {
403 3, { { "[kernel]", "schedule" },
404 { "perf", "run_command" },
405 { "perf", "main" }, },
406 },
407 {
408 4, { { "libc", "free" },
409 { "perf", "cmd_record" },
410 { "perf", "run_command" },
411 { "perf", "main" }, },
412 },
413 {
414 4, { { "libc", "malloc" },
415 { "perf", "cmd_record" },
416 { "perf", "run_command" },
417 { "perf", "main" }, },
418 },
419 {
420 3, { { "perf", "cmd_record" },
421 { "perf", "run_command" },
422 { "perf", "main" }, },
423 },
424 };
425
426 symbol_conf.use_callchain = true;
427 symbol_conf.cumulate_callchain = false;
428 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
429
430 setup_sorting(NULL);
431 callchain_register_param(&callchain_param);
432
433 err = add_hist_entries(hists, machine);
434 if (err < 0)
435 goto out;
436
437 err = do_test(hists, expected, ARRAY_SIZE(expected),
438 expected_callchain, ARRAY_SIZE(expected_callchain));
439
440out:
441 del_hist_entries(hists);
442 reset_output_field();
443 return err;
444}
445
446
447static int test3(struct perf_evsel *evsel, struct machine *machine)
448{
449 int err;
450 struct hists *hists = evsel__hists(evsel);
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469 struct result expected[] = {
470 { 7000, 2000, "perf", "perf", "main" },
471 { 5000, 0, "perf", "perf", "run_command" },
472 { 3000, 1000, "bash", "bash", "main" },
473 { 3000, 1000, "perf", "perf", "cmd_record" },
474 { 2000, 0, "bash", "libc", "malloc" },
475 { 1000, 1000, "bash", "[kernel]", "page_fault" },
476 { 1000, 1000, "bash", "bash", "xmalloc" },
477 { 1000, 1000, "perf", "[kernel]", "page_fault" },
478 { 1000, 1000, "perf", "[kernel]", "schedule" },
479 { 1000, 1000, "perf", "libc", "free" },
480 { 1000, 1000, "perf", "libc", "malloc" },
481 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
482 };
483
484 symbol_conf.use_callchain = false;
485 symbol_conf.cumulate_callchain = true;
486 perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
487
488 setup_sorting(NULL);
489 callchain_register_param(&callchain_param);
490
491 err = add_hist_entries(hists, machine);
492 if (err < 0)
493 goto out;
494
495 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
496
497out:
498 del_hist_entries(hists);
499 reset_output_field();
500 return err;
501}
502
503
504static int test4(struct perf_evsel *evsel, struct machine *machine)
505{
506 int err;
507 struct hists *hists = evsel__hists(evsel);
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589 struct result expected[] = {
590 { 7000, 2000, "perf", "perf", "main" },
591 { 5000, 0, "perf", "perf", "run_command" },
592 { 3000, 1000, "bash", "bash", "main" },
593 { 3000, 1000, "perf", "perf", "cmd_record" },
594 { 2000, 0, "bash", "libc", "malloc" },
595 { 1000, 1000, "bash", "[kernel]", "page_fault" },
596 { 1000, 1000, "bash", "bash", "xmalloc" },
597 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
598 { 1000, 1000, "perf", "[kernel]", "page_fault" },
599 { 1000, 1000, "perf", "[kernel]", "schedule" },
600 { 1000, 1000, "perf", "libc", "free" },
601 { 1000, 1000, "perf", "libc", "malloc" },
602 };
603 struct callchain_result expected_callchain[] = {
604 {
605 1, { { "perf", "main" }, },
606 },
607 {
608 2, { { "perf", "run_command" },
609 { "perf", "main" }, },
610 },
611 {
612 1, { { "bash", "main" }, },
613 },
614 {
615 3, { { "perf", "cmd_record" },
616 { "perf", "run_command" },
617 { "perf", "main" }, },
618 },
619 {
620 4, { { "libc", "malloc" },
621 { "bash", "xmalloc" },
622 { "bash", "main" },
623 { "bash", "main" }, },
624 },
625 {
626 3, { { "[kernel]", "page_fault" },
627 { "libc", "malloc" },
628 { "bash", "main" }, },
629 },
630 {
631 6, { { "bash", "xmalloc" },
632 { "libc", "malloc" },
633 { "bash", "xmalloc" },
634 { "libc", "malloc" },
635 { "bash", "xmalloc" },
636 { "bash", "main" }, },
637 },
638 {
639 3, { { "[kernel]", "sys_perf_event_open" },
640 { "perf", "run_command" },
641 { "perf", "main" }, },
642 },
643 {
644 4, { { "[kernel]", "page_fault" },
645 { "[kernel]", "sys_perf_event_open" },
646 { "perf", "run_command" },
647 { "perf", "main" }, },
648 },
649 {
650 3, { { "[kernel]", "schedule" },
651 { "perf", "run_command" },
652 { "perf", "main" }, },
653 },
654 {
655 4, { { "libc", "free" },
656 { "perf", "cmd_record" },
657 { "perf", "run_command" },
658 { "perf", "main" }, },
659 },
660 {
661 4, { { "libc", "malloc" },
662 { "perf", "cmd_record" },
663 { "perf", "run_command" },
664 { "perf", "main" }, },
665 },
666 };
667
668 symbol_conf.use_callchain = true;
669 symbol_conf.cumulate_callchain = true;
670 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
671
672 setup_sorting(NULL);
673
674 callchain_param = callchain_param_default;
675 callchain_register_param(&callchain_param);
676
677 err = add_hist_entries(hists, machine);
678 if (err < 0)
679 goto out;
680
681 err = do_test(hists, expected, ARRAY_SIZE(expected),
682 expected_callchain, ARRAY_SIZE(expected_callchain));
683
684out:
685 del_hist_entries(hists);
686 reset_output_field();
687 return err;
688}
689
690int test__hists_cumulate(int subtest __maybe_unused)
691{
692 int err = TEST_FAIL;
693 struct machines machines;
694 struct machine *machine;
695 struct perf_evsel *evsel;
696 struct perf_evlist *evlist = perf_evlist__new();
697 size_t i;
698 test_fn_t testcases[] = {
699 test1,
700 test2,
701 test3,
702 test4,
703 };
704
705 TEST_ASSERT_VAL("No memory", evlist);
706
707 err = parse_events(evlist, "cpu-clock", NULL);
708 if (err)
709 goto out;
710 err = TEST_FAIL;
711
712 machines__init(&machines);
713
714
715 machine = setup_fake_machine(&machines);
716 if (!machine)
717 goto out;
718
719 if (verbose > 1)
720 machine__fprintf(machine, stderr);
721
722 evsel = perf_evlist__first(evlist);
723
724 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
725 err = testcases[i](evsel, machine);
726 if (err < 0)
727 break;
728 }
729
730out:
731
732 perf_evlist__delete(evlist);
733 machines__exit(&machines);
734
735 return err;
736}
737