linux/tools/perf/util/hist.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "callchain.h"
   3#include "debug.h"
   4#include "dso.h"
   5#include "build-id.h"
   6#include "hist.h"
   7#include "map.h"
   8#include "map_symbol.h"
   9#include "branch.h"
  10#include "mem-events.h"
  11#include "session.h"
  12#include "namespaces.h"
  13#include "sort.h"
  14#include "units.h"
  15#include "evlist.h"
  16#include "evsel.h"
  17#include "annotate.h"
  18#include "srcline.h"
  19#include "symbol.h"
  20#include "thread.h"
  21#include "block-info.h"
  22#include "ui/progress.h"
  23#include <errno.h>
  24#include <math.h>
  25#include <inttypes.h>
  26#include <sys/param.h>
  27#include <linux/rbtree.h>
  28#include <linux/string.h>
  29#include <linux/time64.h>
  30#include <linux/zalloc.h>
  31
  32static bool hists__filter_entry_by_dso(struct hists *hists,
  33                                       struct hist_entry *he);
  34static bool hists__filter_entry_by_thread(struct hists *hists,
  35                                          struct hist_entry *he);
  36static bool hists__filter_entry_by_symbol(struct hists *hists,
  37                                          struct hist_entry *he);
  38static bool hists__filter_entry_by_socket(struct hists *hists,
  39                                          struct hist_entry *he);
  40
  41u16 hists__col_len(struct hists *hists, enum hist_column col)
  42{
  43        return hists->col_len[col];
  44}
  45
  46void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  47{
  48        hists->col_len[col] = len;
  49}
  50
  51bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  52{
  53        if (len > hists__col_len(hists, col)) {
  54                hists__set_col_len(hists, col, len);
  55                return true;
  56        }
  57        return false;
  58}
  59
  60void hists__reset_col_len(struct hists *hists)
  61{
  62        enum hist_column col;
  63
  64        for (col = 0; col < HISTC_NR_COLS; ++col)
  65                hists__set_col_len(hists, col, 0);
  66}
  67
  68static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  69{
  70        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  71
  72        if (hists__col_len(hists, dso) < unresolved_col_width &&
  73            !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  74            !symbol_conf.dso_list)
  75                hists__set_col_len(hists, dso, unresolved_col_width);
  76}
  77
  78void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  79{
  80        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  81        int symlen;
  82        u16 len;
  83
  84        if (h->block_info)
  85                return;
  86        /*
  87         * +4 accounts for '[x] ' priv level info
  88         * +2 accounts for 0x prefix on raw addresses
  89         * +3 accounts for ' y ' symtab origin info
  90         */
  91        if (h->ms.sym) {
  92                symlen = h->ms.sym->namelen + 4;
  93                if (verbose > 0)
  94                        symlen += BITS_PER_LONG / 4 + 2 + 3;
  95                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  96        } else {
  97                symlen = unresolved_col_width + 4 + 2;
  98                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  99                hists__set_unres_dso_col_len(hists, HISTC_DSO);
 100        }
 101
 102        len = thread__comm_len(h->thread);
 103        if (hists__new_col_len(hists, HISTC_COMM, len))
 104                hists__set_col_len(hists, HISTC_THREAD, len + 8);
 105
 106        if (h->ms.map) {
 107                len = dso__name_len(h->ms.map->dso);
 108                hists__new_col_len(hists, HISTC_DSO, len);
 109        }
 110
 111        if (h->parent)
 112                hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
 113
 114        if (h->branch_info) {
 115                if (h->branch_info->from.ms.sym) {
 116                        symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
 117                        if (verbose > 0)
 118                                symlen += BITS_PER_LONG / 4 + 2 + 3;
 119                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
 120
 121                        symlen = dso__name_len(h->branch_info->from.ms.map->dso);
 122                        hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
 123                } else {
 124                        symlen = unresolved_col_width + 4 + 2;
 125                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
 126                        hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
 127                }
 128
 129                if (h->branch_info->to.ms.sym) {
 130                        symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
 131                        if (verbose > 0)
 132                                symlen += BITS_PER_LONG / 4 + 2 + 3;
 133                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 134
 135                        symlen = dso__name_len(h->branch_info->to.ms.map->dso);
 136                        hists__new_col_len(hists, HISTC_DSO_TO, symlen);
 137                } else {
 138                        symlen = unresolved_col_width + 4 + 2;
 139                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 140                        hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
 141                }
 142
 143                if (h->branch_info->srcline_from)
 144                        hists__new_col_len(hists, HISTC_SRCLINE_FROM,
 145                                        strlen(h->branch_info->srcline_from));
 146                if (h->branch_info->srcline_to)
 147                        hists__new_col_len(hists, HISTC_SRCLINE_TO,
 148                                        strlen(h->branch_info->srcline_to));
 149        }
 150
 151        if (h->mem_info) {
 152                if (h->mem_info->daddr.ms.sym) {
 153                        symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
 154                               + unresolved_col_width + 2;
 155                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 156                                           symlen);
 157                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 158                                           symlen + 1);
 159                } else {
 160                        symlen = unresolved_col_width + 4 + 2;
 161                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 162                                           symlen);
 163                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 164                                           symlen);
 165                }
 166
 167                if (h->mem_info->iaddr.ms.sym) {
 168                        symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
 169                               + unresolved_col_width + 2;
 170                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 171                                           symlen);
 172                } else {
 173                        symlen = unresolved_col_width + 4 + 2;
 174                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 175                                           symlen);
 176                }
 177
 178                if (h->mem_info->daddr.ms.map) {
 179                        symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
 180                        hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
 181                                           symlen);
 182                } else {
 183                        symlen = unresolved_col_width + 4 + 2;
 184                        hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 185                }
 186
 187                hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
 188                                   unresolved_col_width + 4 + 2);
 189
 190        } else {
 191                symlen = unresolved_col_width + 4 + 2;
 192                hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
 193                hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
 194                hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 195        }
 196
 197        hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
 198        hists__new_col_len(hists, HISTC_CPU, 3);
 199        hists__new_col_len(hists, HISTC_SOCKET, 6);
 200        hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
 201        hists__new_col_len(hists, HISTC_MEM_TLB, 22);
 202        hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
 203        hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
 204        hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
 205        hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
 206        if (symbol_conf.nanosecs)
 207                hists__new_col_len(hists, HISTC_TIME, 16);
 208        else
 209                hists__new_col_len(hists, HISTC_TIME, 12);
 210
 211        if (h->srcline) {
 212                len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
 213                hists__new_col_len(hists, HISTC_SRCLINE, len);
 214        }
 215
 216        if (h->srcfile)
 217                hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
 218
 219        if (h->transaction)
 220                hists__new_col_len(hists, HISTC_TRANSACTION,
 221                                   hist_entry__transaction_len());
 222
 223        if (h->trace_output)
 224                hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
 225}
 226
 227void hists__output_recalc_col_len(struct hists *hists, int max_rows)
 228{
 229        struct rb_node *next = rb_first_cached(&hists->entries);
 230        struct hist_entry *n;
 231        int row = 0;
 232
 233        hists__reset_col_len(hists);
 234
 235        while (next && row++ < max_rows) {
 236                n = rb_entry(next, struct hist_entry, rb_node);
 237                if (!n->filtered)
 238                        hists__calc_col_len(hists, n);
 239                next = rb_next(&n->rb_node);
 240        }
 241}
 242
 243static void he_stat__add_cpumode_period(struct he_stat *he_stat,
 244                                        unsigned int cpumode, u64 period)
 245{
 246        switch (cpumode) {
 247        case PERF_RECORD_MISC_KERNEL:
 248                he_stat->period_sys += period;
 249                break;
 250        case PERF_RECORD_MISC_USER:
 251                he_stat->period_us += period;
 252                break;
 253        case PERF_RECORD_MISC_GUEST_KERNEL:
 254                he_stat->period_guest_sys += period;
 255                break;
 256        case PERF_RECORD_MISC_GUEST_USER:
 257                he_stat->period_guest_us += period;
 258                break;
 259        default:
 260                break;
 261        }
 262}
 263
 264static long hist_time(unsigned long htime)
 265{
 266        unsigned long time_quantum = symbol_conf.time_quantum;
 267        if (time_quantum)
 268                return (htime / time_quantum) * time_quantum;
 269        return htime;
 270}
 271
 272static void he_stat__add_period(struct he_stat *he_stat, u64 period,
 273                                u64 weight)
 274{
 275
 276        he_stat->period         += period;
 277        he_stat->weight         += weight;
 278        he_stat->nr_events      += 1;
 279}
 280
 281static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
 282{
 283        dest->period            += src->period;
 284        dest->period_sys        += src->period_sys;
 285        dest->period_us         += src->period_us;
 286        dest->period_guest_sys  += src->period_guest_sys;
 287        dest->period_guest_us   += src->period_guest_us;
 288        dest->nr_events         += src->nr_events;
 289        dest->weight            += src->weight;
 290}
 291
 292static void he_stat__decay(struct he_stat *he_stat)
 293{
 294        he_stat->period = (he_stat->period * 7) / 8;
 295        he_stat->nr_events = (he_stat->nr_events * 7) / 8;
 296        /* XXX need decay for weight too? */
 297}
 298
 299static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
 300
 301static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
 302{
 303        u64 prev_period = he->stat.period;
 304        u64 diff;
 305
 306        if (prev_period == 0)
 307                return true;
 308
 309        he_stat__decay(&he->stat);
 310        if (symbol_conf.cumulate_callchain)
 311                he_stat__decay(he->stat_acc);
 312        decay_callchain(he->callchain);
 313
 314        diff = prev_period - he->stat.period;
 315
 316        if (!he->depth) {
 317                hists->stats.total_period -= diff;
 318                if (!he->filtered)
 319                        hists->stats.total_non_filtered_period -= diff;
 320        }
 321
 322        if (!he->leaf) {
 323                struct hist_entry *child;
 324                struct rb_node *node = rb_first_cached(&he->hroot_out);
 325                while (node) {
 326                        child = rb_entry(node, struct hist_entry, rb_node);
 327                        node = rb_next(node);
 328
 329                        if (hists__decay_entry(hists, child))
 330                                hists__delete_entry(hists, child);
 331                }
 332        }
 333
 334        return he->stat.period == 0;
 335}
 336
 337static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
 338{
 339        struct rb_root_cached *root_in;
 340        struct rb_root_cached *root_out;
 341
 342        if (he->parent_he) {
 343                root_in  = &he->parent_he->hroot_in;
 344                root_out = &he->parent_he->hroot_out;
 345        } else {
 346                if (hists__has(hists, need_collapse))
 347                        root_in = &hists->entries_collapsed;
 348                else
 349                        root_in = hists->entries_in;
 350                root_out = &hists->entries;
 351        }
 352
 353        rb_erase_cached(&he->rb_node_in, root_in);
 354        rb_erase_cached(&he->rb_node, root_out);
 355
 356        --hists->nr_entries;
 357        if (!he->filtered)
 358                --hists->nr_non_filtered_entries;
 359
 360        hist_entry__delete(he);
 361}
 362
 363void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
 364{
 365        struct rb_node *next = rb_first_cached(&hists->entries);
 366        struct hist_entry *n;
 367
 368        while (next) {
 369                n = rb_entry(next, struct hist_entry, rb_node);
 370                next = rb_next(&n->rb_node);
 371                if (((zap_user && n->level == '.') ||
 372                     (zap_kernel && n->level != '.') ||
 373                     hists__decay_entry(hists, n))) {
 374                        hists__delete_entry(hists, n);
 375                }
 376        }
 377}
 378
 379void hists__delete_entries(struct hists *hists)
 380{
 381        struct rb_node *next = rb_first_cached(&hists->entries);
 382        struct hist_entry *n;
 383
 384        while (next) {
 385                n = rb_entry(next, struct hist_entry, rb_node);
 386                next = rb_next(&n->rb_node);
 387
 388                hists__delete_entry(hists, n);
 389        }
 390}
 391
 392struct hist_entry *hists__get_entry(struct hists *hists, int idx)
 393{
 394        struct rb_node *next = rb_first_cached(&hists->entries);
 395        struct hist_entry *n;
 396        int i = 0;
 397
 398        while (next) {
 399                n = rb_entry(next, struct hist_entry, rb_node);
 400                if (i == idx)
 401                        return n;
 402
 403                next = rb_next(&n->rb_node);
 404                i++;
 405        }
 406
 407        return NULL;
 408}
 409
 410/*
 411 * histogram, sorted on item, collects periods
 412 */
 413
 414static int hist_entry__init(struct hist_entry *he,
 415                            struct hist_entry *template,
 416                            bool sample_self,
 417                            size_t callchain_size)
 418{
 419        *he = *template;
 420        he->callchain_size = callchain_size;
 421
 422        if (symbol_conf.cumulate_callchain) {
 423                he->stat_acc = malloc(sizeof(he->stat));
 424                if (he->stat_acc == NULL)
 425                        return -ENOMEM;
 426                memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
 427                if (!sample_self)
 428                        memset(&he->stat, 0, sizeof(he->stat));
 429        }
 430
 431        map__get(he->ms.map);
 432
 433        if (he->branch_info) {
 434                /*
 435                 * This branch info is (a part of) allocated from
 436                 * sample__resolve_bstack() and will be freed after
 437                 * adding new entries.  So we need to save a copy.
 438                 */
 439                he->branch_info = malloc(sizeof(*he->branch_info));
 440                if (he->branch_info == NULL)
 441                        goto err;
 442
 443                memcpy(he->branch_info, template->branch_info,
 444                       sizeof(*he->branch_info));
 445
 446                map__get(he->branch_info->from.ms.map);
 447                map__get(he->branch_info->to.ms.map);
 448        }
 449
 450        if (he->mem_info) {
 451                map__get(he->mem_info->iaddr.ms.map);
 452                map__get(he->mem_info->daddr.ms.map);
 453        }
 454
 455        if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
 456                callchain_init(he->callchain);
 457
 458        if (he->raw_data) {
 459                he->raw_data = memdup(he->raw_data, he->raw_size);
 460                if (he->raw_data == NULL)
 461                        goto err_infos;
 462        }
 463
 464        if (he->srcline) {
 465                he->srcline = strdup(he->srcline);
 466                if (he->srcline == NULL)
 467                        goto err_rawdata;
 468        }
 469
 470        if (symbol_conf.res_sample) {
 471                he->res_samples = calloc(sizeof(struct res_sample),
 472                                        symbol_conf.res_sample);
 473                if (!he->res_samples)
 474                        goto err_srcline;
 475        }
 476
 477        INIT_LIST_HEAD(&he->pairs.node);
 478        thread__get(he->thread);
 479        he->hroot_in  = RB_ROOT_CACHED;
 480        he->hroot_out = RB_ROOT_CACHED;
 481
 482        if (!symbol_conf.report_hierarchy)
 483                he->leaf = true;
 484
 485        return 0;
 486
 487err_srcline:
 488        zfree(&he->srcline);
 489
 490err_rawdata:
 491        zfree(&he->raw_data);
 492
 493err_infos:
 494        if (he->branch_info) {
 495                map__put(he->branch_info->from.ms.map);
 496                map__put(he->branch_info->to.ms.map);
 497                zfree(&he->branch_info);
 498        }
 499        if (he->mem_info) {
 500                map__put(he->mem_info->iaddr.ms.map);
 501                map__put(he->mem_info->daddr.ms.map);
 502        }
 503err:
 504        map__zput(he->ms.map);
 505        zfree(&he->stat_acc);
 506        return -ENOMEM;
 507}
 508
 509static void *hist_entry__zalloc(size_t size)
 510{
 511        return zalloc(size + sizeof(struct hist_entry));
 512}
 513
 514static void hist_entry__free(void *ptr)
 515{
 516        free(ptr);
 517}
 518
 519static struct hist_entry_ops default_ops = {
 520        .new    = hist_entry__zalloc,
 521        .free   = hist_entry__free,
 522};
 523
 524static struct hist_entry *hist_entry__new(struct hist_entry *template,
 525                                          bool sample_self)
 526{
 527        struct hist_entry_ops *ops = template->ops;
 528        size_t callchain_size = 0;
 529        struct hist_entry *he;
 530        int err = 0;
 531
 532        if (!ops)
 533                ops = template->ops = &default_ops;
 534
 535        if (symbol_conf.use_callchain)
 536                callchain_size = sizeof(struct callchain_root);
 537
 538        he = ops->new(callchain_size);
 539        if (he) {
 540                err = hist_entry__init(he, template, sample_self, callchain_size);
 541                if (err) {
 542                        ops->free(he);
 543                        he = NULL;
 544                }
 545        }
 546
 547        return he;
 548}
 549
 550static u8 symbol__parent_filter(const struct symbol *parent)
 551{
 552        if (symbol_conf.exclude_other && parent == NULL)
 553                return 1 << HIST_FILTER__PARENT;
 554        return 0;
 555}
 556
 557static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
 558{
 559        if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
 560                return;
 561
 562        he->hists->callchain_period += period;
 563        if (!he->filtered)
 564                he->hists->callchain_non_filtered_period += period;
 565}
 566
 567static struct hist_entry *hists__findnew_entry(struct hists *hists,
 568                                               struct hist_entry *entry,
 569                                               struct addr_location *al,
 570                                               bool sample_self)
 571{
 572        struct rb_node **p;
 573        struct rb_node *parent = NULL;
 574        struct hist_entry *he;
 575        int64_t cmp;
 576        u64 period = entry->stat.period;
 577        u64 weight = entry->stat.weight;
 578        bool leftmost = true;
 579
 580        p = &hists->entries_in->rb_root.rb_node;
 581
 582        while (*p != NULL) {
 583                parent = *p;
 584                he = rb_entry(parent, struct hist_entry, rb_node_in);
 585
 586                /*
 587                 * Make sure that it receives arguments in a same order as
 588                 * hist_entry__collapse() so that we can use an appropriate
 589                 * function when searching an entry regardless which sort
 590                 * keys were used.
 591                 */
 592                cmp = hist_entry__cmp(he, entry);
 593
 594                if (!cmp) {
 595                        if (sample_self) {
 596                                he_stat__add_period(&he->stat, period, weight);
 597                                hist_entry__add_callchain_period(he, period);
 598                        }
 599                        if (symbol_conf.cumulate_callchain)
 600                                he_stat__add_period(he->stat_acc, period, weight);
 601
 602                        /*
 603                         * This mem info was allocated from sample__resolve_mem
 604                         * and will not be used anymore.
 605                         */
 606                        mem_info__zput(entry->mem_info);
 607
 608                        block_info__zput(entry->block_info);
 609
 610                        /* If the map of an existing hist_entry has
 611                         * become out-of-date due to an exec() or
 612                         * similar, update it.  Otherwise we will
 613                         * mis-adjust symbol addresses when computing
 614                         * the history counter to increment.
 615                         */
 616                        if (he->ms.map != entry->ms.map) {
 617                                map__put(he->ms.map);
 618                                he->ms.map = map__get(entry->ms.map);
 619                        }
 620                        goto out;
 621                }
 622
 623                if (cmp < 0)
 624                        p = &(*p)->rb_left;
 625                else {
 626                        p = &(*p)->rb_right;
 627                        leftmost = false;
 628                }
 629        }
 630
 631        he = hist_entry__new(entry, sample_self);
 632        if (!he)
 633                return NULL;
 634
 635        if (sample_self)
 636                hist_entry__add_callchain_period(he, period);
 637        hists->nr_entries++;
 638
 639        rb_link_node(&he->rb_node_in, parent, p);
 640        rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
 641out:
 642        if (sample_self)
 643                he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
 644        if (symbol_conf.cumulate_callchain)
 645                he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
 646        return he;
 647}
 648
 649static unsigned random_max(unsigned high)
 650{
 651        unsigned thresh = -high % high;
 652        for (;;) {
 653                unsigned r = random();
 654                if (r >= thresh)
 655                        return r % high;
 656        }
 657}
 658
 659static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
 660{
 661        struct res_sample *r;
 662        int j;
 663
 664        if (he->num_res < symbol_conf.res_sample) {
 665                j = he->num_res++;
 666        } else {
 667                j = random_max(symbol_conf.res_sample);
 668        }
 669        r = &he->res_samples[j];
 670        r->time = sample->time;
 671        r->cpu = sample->cpu;
 672        r->tid = sample->tid;
 673}
 674
 675static struct hist_entry*
 676__hists__add_entry(struct hists *hists,
 677                   struct addr_location *al,
 678                   struct symbol *sym_parent,
 679                   struct branch_info *bi,
 680                   struct mem_info *mi,
 681                   struct block_info *block_info,
 682                   struct perf_sample *sample,
 683                   bool sample_self,
 684                   struct hist_entry_ops *ops)
 685{
 686        struct namespaces *ns = thread__namespaces(al->thread);
 687        struct hist_entry entry = {
 688                .thread = al->thread,
 689                .comm = thread__comm(al->thread),
 690                .cgroup_id = {
 691                        .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
 692                        .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
 693                },
 694                .ms = {
 695                        .maps   = al->maps,
 696                        .map    = al->map,
 697                        .sym    = al->sym,
 698                },
 699                .srcline = (char *) al->srcline,
 700                .socket  = al->socket,
 701                .cpu     = al->cpu,
 702                .cpumode = al->cpumode,
 703                .ip      = al->addr,
 704                .level   = al->level,
 705                .stat = {
 706                        .nr_events = 1,
 707                        .period = sample->period,
 708                        .weight = sample->weight,
 709                },
 710                .parent = sym_parent,
 711                .filtered = symbol__parent_filter(sym_parent) | al->filtered,
 712                .hists  = hists,
 713                .branch_info = bi,
 714                .mem_info = mi,
 715                .block_info = block_info,
 716                .transaction = sample->transaction,
 717                .raw_data = sample->raw_data,
 718                .raw_size = sample->raw_size,
 719                .ops = ops,
 720                .time = hist_time(sample->time),
 721        }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 722
 723        if (!hists->has_callchains && he && he->callchain_size != 0)
 724                hists->has_callchains = true;
 725        if (he && symbol_conf.res_sample)
 726                hists__res_sample(he, sample);
 727        return he;
 728}
 729
 730struct hist_entry *hists__add_entry(struct hists *hists,
 731                                    struct addr_location *al,
 732                                    struct symbol *sym_parent,
 733                                    struct branch_info *bi,
 734                                    struct mem_info *mi,
 735                                    struct perf_sample *sample,
 736                                    bool sample_self)
 737{
 738        return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
 739                                  sample, sample_self, NULL);
 740}
 741
 742struct hist_entry *hists__add_entry_ops(struct hists *hists,
 743                                        struct hist_entry_ops *ops,
 744                                        struct addr_location *al,
 745                                        struct symbol *sym_parent,
 746                                        struct branch_info *bi,
 747                                        struct mem_info *mi,
 748                                        struct perf_sample *sample,
 749                                        bool sample_self)
 750{
 751        return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
 752                                  sample, sample_self, ops);
 753}
 754
 755struct hist_entry *hists__add_entry_block(struct hists *hists,
 756                                          struct addr_location *al,
 757                                          struct block_info *block_info)
 758{
 759        struct hist_entry entry = {
 760                .block_info = block_info,
 761                .hists = hists,
 762                .ms = {
 763                        .maps = al->maps,
 764                        .map = al->map,
 765                        .sym = al->sym,
 766                },
 767        }, *he = hists__findnew_entry(hists, &entry, al, false);
 768
 769        return he;
 770}
 771
 772static int
 773iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 774                    struct addr_location *al __maybe_unused)
 775{
 776        return 0;
 777}
 778
 779static int
 780iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 781                        struct addr_location *al __maybe_unused)
 782{
 783        return 0;
 784}
 785
 786static int
 787iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 788{
 789        struct perf_sample *sample = iter->sample;
 790        struct mem_info *mi;
 791
 792        mi = sample__resolve_mem(sample, al);
 793        if (mi == NULL)
 794                return -ENOMEM;
 795
 796        iter->priv = mi;
 797        return 0;
 798}
 799
 800static int
 801iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 802{
 803        u64 cost;
 804        struct mem_info *mi = iter->priv;
 805        struct hists *hists = evsel__hists(iter->evsel);
 806        struct perf_sample *sample = iter->sample;
 807        struct hist_entry *he;
 808
 809        if (mi == NULL)
 810                return -EINVAL;
 811
 812        cost = sample->weight;
 813        if (!cost)
 814                cost = 1;
 815
 816        /*
 817         * must pass period=weight in order to get the correct
 818         * sorting from hists__collapse_resort() which is solely
 819         * based on periods. We want sorting be done on nr_events * weight
 820         * and this is indirectly achieved by passing period=weight here
 821         * and the he_stat__add_period() function.
 822         */
 823        sample->period = cost;
 824
 825        he = hists__add_entry(hists, al, iter->parent, NULL, mi,
 826                              sample, true);
 827        if (!he)
 828                return -ENOMEM;
 829
 830        iter->he = he;
 831        return 0;
 832}
 833
 834static int
 835iter_finish_mem_entry(struct hist_entry_iter *iter,
 836                      struct addr_location *al __maybe_unused)
 837{
 838        struct evsel *evsel = iter->evsel;
 839        struct hists *hists = evsel__hists(evsel);
 840        struct hist_entry *he = iter->he;
 841        int err = -EINVAL;
 842
 843        if (he == NULL)
 844                goto out;
 845
 846        hists__inc_nr_samples(hists, he->filtered);
 847
 848        err = hist_entry__append_callchain(he, iter->sample);
 849
 850out:
 851        /*
 852         * We don't need to free iter->priv (mem_info) here since the mem info
 853         * was either already freed in hists__findnew_entry() or passed to a
 854         * new hist entry by hist_entry__new().
 855         */
 856        iter->priv = NULL;
 857
 858        iter->he = NULL;
 859        return err;
 860}
 861
 862static int
 863iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 864{
 865        struct branch_info *bi;
 866        struct perf_sample *sample = iter->sample;
 867
 868        bi = sample__resolve_bstack(sample, al);
 869        if (!bi)
 870                return -ENOMEM;
 871
 872        iter->curr = 0;
 873        iter->total = sample->branch_stack->nr;
 874
 875        iter->priv = bi;
 876        return 0;
 877}
 878
 879static int
 880iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
 881                             struct addr_location *al __maybe_unused)
 882{
 883        return 0;
 884}
 885
 886static int
 887iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 888{
 889        struct branch_info *bi = iter->priv;
 890        int i = iter->curr;
 891
 892        if (bi == NULL)
 893                return 0;
 894
 895        if (iter->curr >= iter->total)
 896                return 0;
 897
 898        al->maps = bi[i].to.ms.maps;
 899        al->map = bi[i].to.ms.map;
 900        al->sym = bi[i].to.ms.sym;
 901        al->addr = bi[i].to.addr;
 902        return 1;
 903}
 904
 905static int
 906iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 907{
 908        struct branch_info *bi;
 909        struct evsel *evsel = iter->evsel;
 910        struct hists *hists = evsel__hists(evsel);
 911        struct perf_sample *sample = iter->sample;
 912        struct hist_entry *he = NULL;
 913        int i = iter->curr;
 914        int err = 0;
 915
 916        bi = iter->priv;
 917
 918        if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
 919                goto out;
 920
 921        /*
 922         * The report shows the percentage of total branches captured
 923         * and not events sampled. Thus we use a pseudo period of 1.
 924         */
 925        sample->period = 1;
 926        sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
 927
 928        he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
 929                              sample, true);
 930        if (he == NULL)
 931                return -ENOMEM;
 932
 933        hists__inc_nr_samples(hists, he->filtered);
 934
 935out:
 936        iter->he = he;
 937        iter->curr++;
 938        return err;
 939}
 940
 941static int
 942iter_finish_branch_entry(struct hist_entry_iter *iter,
 943                         struct addr_location *al __maybe_unused)
 944{
 945        zfree(&iter->priv);
 946        iter->he = NULL;
 947
 948        return iter->curr >= iter->total ? 0 : -1;
 949}
 950
 951static int
 952iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
 953                          struct addr_location *al __maybe_unused)
 954{
 955        return 0;
 956}
 957
 958static int
 959iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
 960{
 961        struct evsel *evsel = iter->evsel;
 962        struct perf_sample *sample = iter->sample;
 963        struct hist_entry *he;
 964
 965        he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
 966                              sample, true);
 967        if (he == NULL)
 968                return -ENOMEM;
 969
 970        iter->he = he;
 971        return 0;
 972}
 973
 974static int
 975iter_finish_normal_entry(struct hist_entry_iter *iter,
 976                         struct addr_location *al __maybe_unused)
 977{
 978        struct hist_entry *he = iter->he;
 979        struct evsel *evsel = iter->evsel;
 980        struct perf_sample *sample = iter->sample;
 981
 982        if (he == NULL)
 983                return 0;
 984
 985        iter->he = NULL;
 986
 987        hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
 988
 989        return hist_entry__append_callchain(he, sample);
 990}
 991
 992static int
 993iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
 994                              struct addr_location *al __maybe_unused)
 995{
 996        struct hist_entry **he_cache;
 997
 998        callchain_cursor_commit(&callchain_cursor);
 999
1000        /*
1001         * This is for detecting cycles or recursions so that they're
1002         * cumulated only one time to prevent entries more than 100%
1003         * overhead.
1004         */
1005        he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1006        if (he_cache == NULL)
1007                return -ENOMEM;
1008
1009        iter->priv = he_cache;
1010        iter->curr = 0;
1011
1012        return 0;
1013}
1014
1015static int
1016iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1017                                 struct addr_location *al)
1018{
1019        struct evsel *evsel = iter->evsel;
1020        struct hists *hists = evsel__hists(evsel);
1021        struct perf_sample *sample = iter->sample;
1022        struct hist_entry **he_cache = iter->priv;
1023        struct hist_entry *he;
1024        int err = 0;
1025
1026        he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1027                              sample, true);
1028        if (he == NULL)
1029                return -ENOMEM;
1030
1031        iter->he = he;
1032        he_cache[iter->curr++] = he;
1033
1034        hist_entry__append_callchain(he, sample);
1035
1036        /*
1037         * We need to re-initialize the cursor since callchain_append()
1038         * advanced the cursor to the end.
1039         */
1040        callchain_cursor_commit(&callchain_cursor);
1041
1042        hists__inc_nr_samples(hists, he->filtered);
1043
1044        return err;
1045}
1046
1047static int
1048iter_next_cumulative_entry(struct hist_entry_iter *iter,
1049                           struct addr_location *al)
1050{
1051        struct callchain_cursor_node *node;
1052
1053        node = callchain_cursor_current(&callchain_cursor);
1054        if (node == NULL)
1055                return 0;
1056
1057        return fill_callchain_info(al, node, iter->hide_unresolved);
1058}
1059
1060static int
1061iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1062                               struct addr_location *al)
1063{
1064        struct evsel *evsel = iter->evsel;
1065        struct perf_sample *sample = iter->sample;
1066        struct hist_entry **he_cache = iter->priv;
1067        struct hist_entry *he;
1068        struct hist_entry he_tmp = {
1069                .hists = evsel__hists(evsel),
1070                .cpu = al->cpu,
1071                .thread = al->thread,
1072                .comm = thread__comm(al->thread),
1073                .ip = al->addr,
1074                .ms = {
1075                        .maps = al->maps,
1076                        .map = al->map,
1077                        .sym = al->sym,
1078                },
1079                .srcline = (char *) al->srcline,
1080                .parent = iter->parent,
1081                .raw_data = sample->raw_data,
1082                .raw_size = sample->raw_size,
1083        };
1084        int i;
1085        struct callchain_cursor cursor;
1086
1087        callchain_cursor_snapshot(&cursor, &callchain_cursor);
1088
1089        callchain_cursor_advance(&callchain_cursor);
1090
1091        /*
1092         * Check if there's duplicate entries in the callchain.
1093         * It's possible that it has cycles or recursive calls.
1094         */
1095        for (i = 0; i < iter->curr; i++) {
1096                if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1097                        /* to avoid calling callback function */
1098                        iter->he = NULL;
1099                        return 0;
1100                }
1101        }
1102
1103        he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1104                              sample, false);
1105        if (he == NULL)
1106                return -ENOMEM;
1107
1108        iter->he = he;
1109        he_cache[iter->curr++] = he;
1110
1111        if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1112                callchain_append(he->callchain, &cursor, sample->period);
1113        return 0;
1114}
1115
1116static int
1117iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1118                             struct addr_location *al __maybe_unused)
1119{
1120        zfree(&iter->priv);
1121        iter->he = NULL;
1122
1123        return 0;
1124}
1125
1126const struct hist_iter_ops hist_iter_mem = {
1127        .prepare_entry          = iter_prepare_mem_entry,
1128        .add_single_entry       = iter_add_single_mem_entry,
1129        .next_entry             = iter_next_nop_entry,
1130        .add_next_entry         = iter_add_next_nop_entry,
1131        .finish_entry           = iter_finish_mem_entry,
1132};
1133
1134const struct hist_iter_ops hist_iter_branch = {
1135        .prepare_entry          = iter_prepare_branch_entry,
1136        .add_single_entry       = iter_add_single_branch_entry,
1137        .next_entry             = iter_next_branch_entry,
1138        .add_next_entry         = iter_add_next_branch_entry,
1139        .finish_entry           = iter_finish_branch_entry,
1140};
1141
1142const struct hist_iter_ops hist_iter_normal = {
1143        .prepare_entry          = iter_prepare_normal_entry,
1144        .add_single_entry       = iter_add_single_normal_entry,
1145        .next_entry             = iter_next_nop_entry,
1146        .add_next_entry         = iter_add_next_nop_entry,
1147        .finish_entry           = iter_finish_normal_entry,
1148};
1149
1150const struct hist_iter_ops hist_iter_cumulative = {
1151        .prepare_entry          = iter_prepare_cumulative_entry,
1152        .add_single_entry       = iter_add_single_cumulative_entry,
1153        .next_entry             = iter_next_cumulative_entry,
1154        .add_next_entry         = iter_add_next_cumulative_entry,
1155        .finish_entry           = iter_finish_cumulative_entry,
1156};
1157
1158int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1159                         int max_stack_depth, void *arg)
1160{
1161        int err, err2;
1162        struct map *alm = NULL;
1163
1164        if (al)
1165                alm = map__get(al->map);
1166
1167        err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1168                                        iter->evsel, al, max_stack_depth);
1169        if (err) {
1170                map__put(alm);
1171                return err;
1172        }
1173
1174        err = iter->ops->prepare_entry(iter, al);
1175        if (err)
1176                goto out;
1177
1178        err = iter->ops->add_single_entry(iter, al);
1179        if (err)
1180                goto out;
1181
1182        if (iter->he && iter->add_entry_cb) {
1183                err = iter->add_entry_cb(iter, al, true, arg);
1184                if (err)
1185                        goto out;
1186        }
1187
1188        while (iter->ops->next_entry(iter, al)) {
1189                err = iter->ops->add_next_entry(iter, al);
1190                if (err)
1191                        break;
1192
1193                if (iter->he && iter->add_entry_cb) {
1194                        err = iter->add_entry_cb(iter, al, false, arg);
1195                        if (err)
1196                                goto out;
1197                }
1198        }
1199
1200out:
1201        err2 = iter->ops->finish_entry(iter, al);
1202        if (!err)
1203                err = err2;
1204
1205        map__put(alm);
1206
1207        return err;
1208}
1209
1210int64_t
1211hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1212{
1213        struct hists *hists = left->hists;
1214        struct perf_hpp_fmt *fmt;
1215        int64_t cmp = 0;
1216
1217        hists__for_each_sort_list(hists, fmt) {
1218                if (perf_hpp__is_dynamic_entry(fmt) &&
1219                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1220                        continue;
1221
1222                cmp = fmt->cmp(fmt, left, right);
1223                if (cmp)
1224                        break;
1225        }
1226
1227        return cmp;
1228}
1229
1230int64_t
1231hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1232{
1233        struct hists *hists = left->hists;
1234        struct perf_hpp_fmt *fmt;
1235        int64_t cmp = 0;
1236
1237        hists__for_each_sort_list(hists, fmt) {
1238                if (perf_hpp__is_dynamic_entry(fmt) &&
1239                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1240                        continue;
1241
1242                cmp = fmt->collapse(fmt, left, right);
1243                if (cmp)
1244                        break;
1245        }
1246
1247        return cmp;
1248}
1249
1250void hist_entry__delete(struct hist_entry *he)
1251{
1252        struct hist_entry_ops *ops = he->ops;
1253
1254        thread__zput(he->thread);
1255        map__zput(he->ms.map);
1256
1257        if (he->branch_info) {
1258                map__zput(he->branch_info->from.ms.map);
1259                map__zput(he->branch_info->to.ms.map);
1260                free_srcline(he->branch_info->srcline_from);
1261                free_srcline(he->branch_info->srcline_to);
1262                zfree(&he->branch_info);
1263        }
1264
1265        if (he->mem_info) {
1266                map__zput(he->mem_info->iaddr.ms.map);
1267                map__zput(he->mem_info->daddr.ms.map);
1268                mem_info__zput(he->mem_info);
1269        }
1270
1271        if (he->block_info)
1272                block_info__zput(he->block_info);
1273
1274        zfree(&he->res_samples);
1275        zfree(&he->stat_acc);
1276        free_srcline(he->srcline);
1277        if (he->srcfile && he->srcfile[0])
1278                zfree(&he->srcfile);
1279        free_callchain(he->callchain);
1280        zfree(&he->trace_output);
1281        zfree(&he->raw_data);
1282        ops->free(he);
1283}
1284
1285/*
1286 * If this is not the last column, then we need to pad it according to the
1287 * pre-calculated max length for this column, otherwise don't bother adding
1288 * spaces because that would break viewing this with, for instance, 'less',
1289 * that would show tons of trailing spaces when a long C++ demangled method
1290 * names is sampled.
1291*/
1292int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1293                                   struct perf_hpp_fmt *fmt, int printed)
1294{
1295        if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1296                const int width = fmt->width(fmt, hpp, he->hists);
1297                if (printed < width) {
1298                        advance_hpp(hpp, printed);
1299                        printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1300                }
1301        }
1302
1303        return printed;
1304}
1305
1306/*
1307 * collapse the histogram
1308 */
1309
1310static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1311static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1312                                       enum hist_filter type);
1313
1314typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1315
1316static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1317{
1318        return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1319}
1320
1321static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1322                                                enum hist_filter type,
1323                                                fmt_chk_fn check)
1324{
1325        struct perf_hpp_fmt *fmt;
1326        bool type_match = false;
1327        struct hist_entry *parent = he->parent_he;
1328
1329        switch (type) {
1330        case HIST_FILTER__THREAD:
1331                if (symbol_conf.comm_list == NULL &&
1332                    symbol_conf.pid_list == NULL &&
1333                    symbol_conf.tid_list == NULL)
1334                        return;
1335                break;
1336        case HIST_FILTER__DSO:
1337                if (symbol_conf.dso_list == NULL)
1338                        return;
1339                break;
1340        case HIST_FILTER__SYMBOL:
1341                if (symbol_conf.sym_list == NULL)
1342                        return;
1343                break;
1344        case HIST_FILTER__PARENT:
1345        case HIST_FILTER__GUEST:
1346        case HIST_FILTER__HOST:
1347        case HIST_FILTER__SOCKET:
1348        case HIST_FILTER__C2C:
1349        default:
1350                return;
1351        }
1352
1353        /* if it's filtered by own fmt, it has to have filter bits */
1354        perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1355                if (check(fmt)) {
1356                        type_match = true;
1357                        break;
1358                }
1359        }
1360
1361        if (type_match) {
1362                /*
1363                 * If the filter is for current level entry, propagate
1364                 * filter marker to parents.  The marker bit was
1365                 * already set by default so it only needs to clear
1366                 * non-filtered entries.
1367                 */
1368                if (!(he->filtered & (1 << type))) {
1369                        while (parent) {
1370                                parent->filtered &= ~(1 << type);
1371                                parent = parent->parent_he;
1372                        }
1373                }
1374        } else {
1375                /*
1376                 * If current entry doesn't have matching formats, set
1377                 * filter marker for upper level entries.  it will be
1378                 * cleared if its lower level entries is not filtered.
1379                 *
1380                 * For lower-level entries, it inherits parent's
1381                 * filter bit so that lower level entries of a
1382                 * non-filtered entry won't set the filter marker.
1383                 */
1384                if (parent == NULL)
1385                        he->filtered |= (1 << type);
1386                else
1387                        he->filtered |= (parent->filtered & (1 << type));
1388        }
1389}
1390
1391static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1392{
1393        hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1394                                            check_thread_entry);
1395
1396        hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1397                                            perf_hpp__is_dso_entry);
1398
1399        hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1400                                            perf_hpp__is_sym_entry);
1401
1402        hists__apply_filters(he->hists, he);
1403}
1404
1405static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1406                                                 struct rb_root_cached *root,
1407                                                 struct hist_entry *he,
1408                                                 struct hist_entry *parent_he,
1409                                                 struct perf_hpp_list *hpp_list)
1410{
1411        struct rb_node **p = &root->rb_root.rb_node;
1412        struct rb_node *parent = NULL;
1413        struct hist_entry *iter, *new;
1414        struct perf_hpp_fmt *fmt;
1415        int64_t cmp;
1416        bool leftmost = true;
1417
1418        while (*p != NULL) {
1419                parent = *p;
1420                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1421
1422                cmp = 0;
1423                perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1424                        cmp = fmt->collapse(fmt, iter, he);
1425                        if (cmp)
1426                                break;
1427                }
1428
1429                if (!cmp) {
1430                        he_stat__add_stat(&iter->stat, &he->stat);
1431                        return iter;
1432                }
1433
1434                if (cmp < 0)
1435                        p = &parent->rb_left;
1436                else {
1437                        p = &parent->rb_right;
1438                        leftmost = false;
1439                }
1440        }
1441
1442        new = hist_entry__new(he, true);
1443        if (new == NULL)
1444                return NULL;
1445
1446        hists->nr_entries++;
1447
1448        /* save related format list for output */
1449        new->hpp_list = hpp_list;
1450        new->parent_he = parent_he;
1451
1452        hist_entry__apply_hierarchy_filters(new);
1453
1454        /* some fields are now passed to 'new' */
1455        perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1456                if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1457                        he->trace_output = NULL;
1458                else
1459                        new->trace_output = NULL;
1460
1461                if (perf_hpp__is_srcline_entry(fmt))
1462                        he->srcline = NULL;
1463                else
1464                        new->srcline = NULL;
1465
1466                if (perf_hpp__is_srcfile_entry(fmt))
1467                        he->srcfile = NULL;
1468                else
1469                        new->srcfile = NULL;
1470        }
1471
1472        rb_link_node(&new->rb_node_in, parent, p);
1473        rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1474        return new;
1475}
1476
1477static int hists__hierarchy_insert_entry(struct hists *hists,
1478                                         struct rb_root_cached *root,
1479                                         struct hist_entry *he)
1480{
1481        struct perf_hpp_list_node *node;
1482        struct hist_entry *new_he = NULL;
1483        struct hist_entry *parent = NULL;
1484        int depth = 0;
1485        int ret = 0;
1486
1487        list_for_each_entry(node, &hists->hpp_formats, list) {
1488                /* skip period (overhead) and elided columns */
1489                if (node->level == 0 || node->skip)
1490                        continue;
1491
1492                /* insert copy of 'he' for each fmt into the hierarchy */
1493                new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1494                if (new_he == NULL) {
1495                        ret = -1;
1496                        break;
1497                }
1498
1499                root = &new_he->hroot_in;
1500                new_he->depth = depth++;
1501                parent = new_he;
1502        }
1503
1504        if (new_he) {
1505                new_he->leaf = true;
1506
1507                if (hist_entry__has_callchains(new_he) &&
1508                    symbol_conf.use_callchain) {
1509                        callchain_cursor_reset(&callchain_cursor);
1510                        if (callchain_merge(&callchain_cursor,
1511                                            new_he->callchain,
1512                                            he->callchain) < 0)
1513                                ret = -1;
1514                }
1515        }
1516
1517        /* 'he' is no longer used */
1518        hist_entry__delete(he);
1519
1520        /* return 0 (or -1) since it already applied filters */
1521        return ret;
1522}
1523
1524static int hists__collapse_insert_entry(struct hists *hists,
1525                                        struct rb_root_cached *root,
1526                                        struct hist_entry *he)
1527{
1528        struct rb_node **p = &root->rb_root.rb_node;
1529        struct rb_node *parent = NULL;
1530        struct hist_entry *iter;
1531        int64_t cmp;
1532        bool leftmost = true;
1533
1534        if (symbol_conf.report_hierarchy)
1535                return hists__hierarchy_insert_entry(hists, root, he);
1536
1537        while (*p != NULL) {
1538                parent = *p;
1539                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1540
1541                cmp = hist_entry__collapse(iter, he);
1542
1543                if (!cmp) {
1544                        int ret = 0;
1545
1546                        he_stat__add_stat(&iter->stat, &he->stat);
1547                        if (symbol_conf.cumulate_callchain)
1548                                he_stat__add_stat(iter->stat_acc, he->stat_acc);
1549
1550                        if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1551                                callchain_cursor_reset(&callchain_cursor);
1552                                if (callchain_merge(&callchain_cursor,
1553                                                    iter->callchain,
1554                                                    he->callchain) < 0)
1555                                        ret = -1;
1556                        }
1557                        hist_entry__delete(he);
1558                        return ret;
1559                }
1560
1561                if (cmp < 0)
1562                        p = &(*p)->rb_left;
1563                else {
1564                        p = &(*p)->rb_right;
1565                        leftmost = false;
1566                }
1567        }
1568        hists->nr_entries++;
1569
1570        rb_link_node(&he->rb_node_in, parent, p);
1571        rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1572        return 1;
1573}
1574
1575struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1576{
1577        struct rb_root_cached *root;
1578
1579        pthread_mutex_lock(&hists->lock);
1580
1581        root = hists->entries_in;
1582        if (++hists->entries_in > &hists->entries_in_array[1])
1583                hists->entries_in = &hists->entries_in_array[0];
1584
1585        pthread_mutex_unlock(&hists->lock);
1586
1587        return root;
1588}
1589
1590static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1591{
1592        hists__filter_entry_by_dso(hists, he);
1593        hists__filter_entry_by_thread(hists, he);
1594        hists__filter_entry_by_symbol(hists, he);
1595        hists__filter_entry_by_socket(hists, he);
1596}
1597
1598int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1599{
1600        struct rb_root_cached *root;
1601        struct rb_node *next;
1602        struct hist_entry *n;
1603        int ret;
1604
1605        if (!hists__has(hists, need_collapse))
1606                return 0;
1607
1608        hists->nr_entries = 0;
1609
1610        root = hists__get_rotate_entries_in(hists);
1611
1612        next = rb_first_cached(root);
1613
1614        while (next) {
1615                if (session_done())
1616                        break;
1617                n = rb_entry(next, struct hist_entry, rb_node_in);
1618                next = rb_next(&n->rb_node_in);
1619
1620                rb_erase_cached(&n->rb_node_in, root);
1621                ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1622                if (ret < 0)
1623                        return -1;
1624
1625                if (ret) {
1626                        /*
1627                         * If it wasn't combined with one of the entries already
1628                         * collapsed, we need to apply the filters that may have
1629                         * been set by, say, the hist_browser.
1630                         */
1631                        hists__apply_filters(hists, n);
1632                }
1633                if (prog)
1634                        ui_progress__update(prog, 1);
1635        }
1636        return 0;
1637}
1638
1639static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1640{
1641        struct hists *hists = a->hists;
1642        struct perf_hpp_fmt *fmt;
1643        int64_t cmp = 0;
1644
1645        hists__for_each_sort_list(hists, fmt) {
1646                if (perf_hpp__should_skip(fmt, a->hists))
1647                        continue;
1648
1649                cmp = fmt->sort(fmt, a, b);
1650                if (cmp)
1651                        break;
1652        }
1653
1654        return cmp;
1655}
1656
1657static void hists__reset_filter_stats(struct hists *hists)
1658{
1659        hists->nr_non_filtered_entries = 0;
1660        hists->stats.total_non_filtered_period = 0;
1661}
1662
1663void hists__reset_stats(struct hists *hists)
1664{
1665        hists->nr_entries = 0;
1666        hists->stats.total_period = 0;
1667
1668        hists__reset_filter_stats(hists);
1669}
1670
1671static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1672{
1673        hists->nr_non_filtered_entries++;
1674        hists->stats.total_non_filtered_period += h->stat.period;
1675}
1676
1677void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1678{
1679        if (!h->filtered)
1680                hists__inc_filter_stats(hists, h);
1681
1682        hists->nr_entries++;
1683        hists->stats.total_period += h->stat.period;
1684}
1685
1686static void hierarchy_recalc_total_periods(struct hists *hists)
1687{
1688        struct rb_node *node;
1689        struct hist_entry *he;
1690
1691        node = rb_first_cached(&hists->entries);
1692
1693        hists->stats.total_period = 0;
1694        hists->stats.total_non_filtered_period = 0;
1695
1696        /*
1697         * recalculate total period using top-level entries only
1698         * since lower level entries only see non-filtered entries
1699         * but upper level entries have sum of both entries.
1700         */
1701        while (node) {
1702                he = rb_entry(node, struct hist_entry, rb_node);
1703                node = rb_next(node);
1704
1705                hists->stats.total_period += he->stat.period;
1706                if (!he->filtered)
1707                        hists->stats.total_non_filtered_period += he->stat.period;
1708        }
1709}
1710
1711static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1712                                          struct hist_entry *he)
1713{
1714        struct rb_node **p = &root->rb_root.rb_node;
1715        struct rb_node *parent = NULL;
1716        struct hist_entry *iter;
1717        struct perf_hpp_fmt *fmt;
1718        bool leftmost = true;
1719
1720        while (*p != NULL) {
1721                parent = *p;
1722                iter = rb_entry(parent, struct hist_entry, rb_node);
1723
1724                if (hist_entry__sort(he, iter) > 0)
1725                        p = &parent->rb_left;
1726                else {
1727                        p = &parent->rb_right;
1728                        leftmost = false;
1729                }
1730        }
1731
1732        rb_link_node(&he->rb_node, parent, p);
1733        rb_insert_color_cached(&he->rb_node, root, leftmost);
1734
1735        /* update column width of dynamic entry */
1736        perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1737                if (perf_hpp__is_dynamic_entry(fmt))
1738                        fmt->sort(fmt, he, NULL);
1739        }
1740}
1741
1742static void hists__hierarchy_output_resort(struct hists *hists,
1743                                           struct ui_progress *prog,
1744                                           struct rb_root_cached *root_in,
1745                                           struct rb_root_cached *root_out,
1746                                           u64 min_callchain_hits,
1747                                           bool use_callchain)
1748{
1749        struct rb_node *node;
1750        struct hist_entry *he;
1751
1752        *root_out = RB_ROOT_CACHED;
1753        node = rb_first_cached(root_in);
1754
1755        while (node) {
1756                he = rb_entry(node, struct hist_entry, rb_node_in);
1757                node = rb_next(node);
1758
1759                hierarchy_insert_output_entry(root_out, he);
1760
1761                if (prog)
1762                        ui_progress__update(prog, 1);
1763
1764                hists->nr_entries++;
1765                if (!he->filtered) {
1766                        hists->nr_non_filtered_entries++;
1767                        hists__calc_col_len(hists, he);
1768                }
1769
1770                if (!he->leaf) {
1771                        hists__hierarchy_output_resort(hists, prog,
1772                                                       &he->hroot_in,
1773                                                       &he->hroot_out,
1774                                                       min_callchain_hits,
1775                                                       use_callchain);
1776                        continue;
1777                }
1778
1779                if (!use_callchain)
1780                        continue;
1781
1782                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1783                        u64 total = he->stat.period;
1784
1785                        if (symbol_conf.cumulate_callchain)
1786                                total = he->stat_acc->period;
1787
1788                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1789                }
1790
1791                callchain_param.sort(&he->sorted_chain, he->callchain,
1792                                     min_callchain_hits, &callchain_param);
1793        }
1794}
1795
1796static void __hists__insert_output_entry(struct rb_root_cached *entries,
1797                                         struct hist_entry *he,
1798                                         u64 min_callchain_hits,
1799                                         bool use_callchain)
1800{
1801        struct rb_node **p = &entries->rb_root.rb_node;
1802        struct rb_node *parent = NULL;
1803        struct hist_entry *iter;
1804        struct perf_hpp_fmt *fmt;
1805        bool leftmost = true;
1806
1807        if (use_callchain) {
1808                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1809                        u64 total = he->stat.period;
1810
1811                        if (symbol_conf.cumulate_callchain)
1812                                total = he->stat_acc->period;
1813
1814                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1815                }
1816                callchain_param.sort(&he->sorted_chain, he->callchain,
1817                                      min_callchain_hits, &callchain_param);
1818        }
1819
1820        while (*p != NULL) {
1821                parent = *p;
1822                iter = rb_entry(parent, struct hist_entry, rb_node);
1823
1824                if (hist_entry__sort(he, iter) > 0)
1825                        p = &(*p)->rb_left;
1826                else {
1827                        p = &(*p)->rb_right;
1828                        leftmost = false;
1829                }
1830        }
1831
1832        rb_link_node(&he->rb_node, parent, p);
1833        rb_insert_color_cached(&he->rb_node, entries, leftmost);
1834
1835        perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1836                if (perf_hpp__is_dynamic_entry(fmt) &&
1837                    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1838                        fmt->sort(fmt, he, NULL);  /* update column width */
1839        }
1840}
1841
1842static void output_resort(struct hists *hists, struct ui_progress *prog,
1843                          bool use_callchain, hists__resort_cb_t cb,
1844                          void *cb_arg)
1845{
1846        struct rb_root_cached *root;
1847        struct rb_node *next;
1848        struct hist_entry *n;
1849        u64 callchain_total;
1850        u64 min_callchain_hits;
1851
1852        callchain_total = hists->callchain_period;
1853        if (symbol_conf.filter_relative)
1854                callchain_total = hists->callchain_non_filtered_period;
1855
1856        min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1857
1858        hists__reset_stats(hists);
1859        hists__reset_col_len(hists);
1860
1861        if (symbol_conf.report_hierarchy) {
1862                hists__hierarchy_output_resort(hists, prog,
1863                                               &hists->entries_collapsed,
1864                                               &hists->entries,
1865                                               min_callchain_hits,
1866                                               use_callchain);
1867                hierarchy_recalc_total_periods(hists);
1868                return;
1869        }
1870
1871        if (hists__has(hists, need_collapse))
1872                root = &hists->entries_collapsed;
1873        else
1874                root = hists->entries_in;
1875
1876        next = rb_first_cached(root);
1877        hists->entries = RB_ROOT_CACHED;
1878
1879        while (next) {
1880                n = rb_entry(next, struct hist_entry, rb_node_in);
1881                next = rb_next(&n->rb_node_in);
1882
1883                if (cb && cb(n, cb_arg))
1884                        continue;
1885
1886                __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1887                hists__inc_stats(hists, n);
1888
1889                if (!n->filtered)
1890                        hists__calc_col_len(hists, n);
1891
1892                if (prog)
1893                        ui_progress__update(prog, 1);
1894        }
1895}
1896
1897void perf_evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1898                                  hists__resort_cb_t cb, void *cb_arg)
1899{
1900        bool use_callchain;
1901
1902        if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1903                use_callchain = evsel__has_callchain(evsel);
1904        else
1905                use_callchain = symbol_conf.use_callchain;
1906
1907        use_callchain |= symbol_conf.show_branchflag_count;
1908
1909        output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1910}
1911
1912void perf_evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1913{
1914        return perf_evsel__output_resort_cb(evsel, prog, NULL, NULL);
1915}
1916
1917void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1918{
1919        output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1920}
1921
1922void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1923                             hists__resort_cb_t cb)
1924{
1925        output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1926}
1927
1928static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1929{
1930        if (he->leaf || hmd == HMD_FORCE_SIBLING)
1931                return false;
1932
1933        if (he->unfolded || hmd == HMD_FORCE_CHILD)
1934                return true;
1935
1936        return false;
1937}
1938
1939struct rb_node *rb_hierarchy_last(struct rb_node *node)
1940{
1941        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1942
1943        while (can_goto_child(he, HMD_NORMAL)) {
1944                node = rb_last(&he->hroot_out.rb_root);
1945                he = rb_entry(node, struct hist_entry, rb_node);
1946        }
1947        return node;
1948}
1949
1950struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1951{
1952        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1953
1954        if (can_goto_child(he, hmd))
1955                node = rb_first_cached(&he->hroot_out);
1956        else
1957                node = rb_next(node);
1958
1959        while (node == NULL) {
1960                he = he->parent_he;
1961                if (he == NULL)
1962                        break;
1963
1964                node = rb_next(&he->rb_node);
1965        }
1966        return node;
1967}
1968
1969struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1970{
1971        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1972
1973        node = rb_prev(node);
1974        if (node)
1975                return rb_hierarchy_last(node);
1976
1977        he = he->parent_he;
1978        if (he == NULL)
1979                return NULL;
1980
1981        return &he->rb_node;
1982}
1983
1984bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1985{
1986        struct rb_node *node;
1987        struct hist_entry *child;
1988        float percent;
1989
1990        if (he->leaf)
1991                return false;
1992
1993        node = rb_first_cached(&he->hroot_out);
1994        child = rb_entry(node, struct hist_entry, rb_node);
1995
1996        while (node && child->filtered) {
1997                node = rb_next(node);
1998                child = rb_entry(node, struct hist_entry, rb_node);
1999        }
2000
2001        if (node)
2002                percent = hist_entry__get_percent_limit(child);
2003        else
2004                percent = 0;
2005
2006        return node && percent >= limit;
2007}
2008
2009static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2010                                       enum hist_filter filter)
2011{
2012        h->filtered &= ~(1 << filter);
2013
2014        if (symbol_conf.report_hierarchy) {
2015                struct hist_entry *parent = h->parent_he;
2016
2017                while (parent) {
2018                        he_stat__add_stat(&parent->stat, &h->stat);
2019
2020                        parent->filtered &= ~(1 << filter);
2021
2022                        if (parent->filtered)
2023                                goto next;
2024
2025                        /* force fold unfiltered entry for simplicity */
2026                        parent->unfolded = false;
2027                        parent->has_no_entry = false;
2028                        parent->row_offset = 0;
2029                        parent->nr_rows = 0;
2030next:
2031                        parent = parent->parent_he;
2032                }
2033        }
2034
2035        if (h->filtered)
2036                return;
2037
2038        /* force fold unfiltered entry for simplicity */
2039        h->unfolded = false;
2040        h->has_no_entry = false;
2041        h->row_offset = 0;
2042        h->nr_rows = 0;
2043
2044        hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2045
2046        hists__inc_filter_stats(hists, h);
2047        hists__calc_col_len(hists, h);
2048}
2049
2050
2051static bool hists__filter_entry_by_dso(struct hists *hists,
2052                                       struct hist_entry *he)
2053{
2054        if (hists->dso_filter != NULL &&
2055            (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2056                he->filtered |= (1 << HIST_FILTER__DSO);
2057                return true;
2058        }
2059
2060        return false;
2061}
2062
2063static bool hists__filter_entry_by_thread(struct hists *hists,
2064                                          struct hist_entry *he)
2065{
2066        if (hists->thread_filter != NULL &&
2067            he->thread != hists->thread_filter) {
2068                he->filtered |= (1 << HIST_FILTER__THREAD);
2069                return true;
2070        }
2071
2072        return false;
2073}
2074
2075static bool hists__filter_entry_by_symbol(struct hists *hists,
2076                                          struct hist_entry *he)
2077{
2078        if (hists->symbol_filter_str != NULL &&
2079            (!he->ms.sym || strstr(he->ms.sym->name,
2080                                   hists->symbol_filter_str) == NULL)) {
2081                he->filtered |= (1 << HIST_FILTER__SYMBOL);
2082                return true;
2083        }
2084
2085        return false;
2086}
2087
2088static bool hists__filter_entry_by_socket(struct hists *hists,
2089                                          struct hist_entry *he)
2090{
2091        if ((hists->socket_filter > -1) &&
2092            (he->socket != hists->socket_filter)) {
2093                he->filtered |= (1 << HIST_FILTER__SOCKET);
2094                return true;
2095        }
2096
2097        return false;
2098}
2099
2100typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2101
2102static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2103{
2104        struct rb_node *nd;
2105
2106        hists->stats.nr_non_filtered_samples = 0;
2107
2108        hists__reset_filter_stats(hists);
2109        hists__reset_col_len(hists);
2110
2111        for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2112                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2113
2114                if (filter(hists, h))
2115                        continue;
2116
2117                hists__remove_entry_filter(hists, h, type);
2118        }
2119}
2120
2121static void resort_filtered_entry(struct rb_root_cached *root,
2122                                  struct hist_entry *he)
2123{
2124        struct rb_node **p = &root->rb_root.rb_node;
2125        struct rb_node *parent = NULL;
2126        struct hist_entry *iter;
2127        struct rb_root_cached new_root = RB_ROOT_CACHED;
2128        struct rb_node *nd;
2129        bool leftmost = true;
2130
2131        while (*p != NULL) {
2132                parent = *p;
2133                iter = rb_entry(parent, struct hist_entry, rb_node);
2134
2135                if (hist_entry__sort(he, iter) > 0)
2136                        p = &(*p)->rb_left;
2137                else {
2138                        p = &(*p)->rb_right;
2139                        leftmost = false;
2140                }
2141        }
2142
2143        rb_link_node(&he->rb_node, parent, p);
2144        rb_insert_color_cached(&he->rb_node, root, leftmost);
2145
2146        if (he->leaf || he->filtered)
2147                return;
2148
2149        nd = rb_first_cached(&he->hroot_out);
2150        while (nd) {
2151                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2152
2153                nd = rb_next(nd);
2154                rb_erase_cached(&h->rb_node, &he->hroot_out);
2155
2156                resort_filtered_entry(&new_root, h);
2157        }
2158
2159        he->hroot_out = new_root;
2160}
2161
2162static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2163{
2164        struct rb_node *nd;
2165        struct rb_root_cached new_root = RB_ROOT_CACHED;
2166
2167        hists->stats.nr_non_filtered_samples = 0;
2168
2169        hists__reset_filter_stats(hists);
2170        hists__reset_col_len(hists);
2171
2172        nd = rb_first_cached(&hists->entries);
2173        while (nd) {
2174                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2175                int ret;
2176
2177                ret = hist_entry__filter(h, type, arg);
2178
2179                /*
2180                 * case 1. non-matching type
2181                 * zero out the period, set filter marker and move to child
2182                 */
2183                if (ret < 0) {
2184                        memset(&h->stat, 0, sizeof(h->stat));
2185                        h->filtered |= (1 << type);
2186
2187                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2188                }
2189                /*
2190                 * case 2. matched type (filter out)
2191                 * set filter marker and move to next
2192                 */
2193                else if (ret == 1) {
2194                        h->filtered |= (1 << type);
2195
2196                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2197                }
2198                /*
2199                 * case 3. ok (not filtered)
2200                 * add period to hists and parents, erase the filter marker
2201                 * and move to next sibling
2202                 */
2203                else {
2204                        hists__remove_entry_filter(hists, h, type);
2205
2206                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2207                }
2208        }
2209
2210        hierarchy_recalc_total_periods(hists);
2211
2212        /*
2213         * resort output after applying a new filter since filter in a lower
2214         * hierarchy can change periods in a upper hierarchy.
2215         */
2216        nd = rb_first_cached(&hists->entries);
2217        while (nd) {
2218                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2219
2220                nd = rb_next(nd);
2221                rb_erase_cached(&h->rb_node, &hists->entries);
2222
2223                resort_filtered_entry(&new_root, h);
2224        }
2225
2226        hists->entries = new_root;
2227}
2228
2229void hists__filter_by_thread(struct hists *hists)
2230{
2231        if (symbol_conf.report_hierarchy)
2232                hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2233                                        hists->thread_filter);
2234        else
2235                hists__filter_by_type(hists, HIST_FILTER__THREAD,
2236                                      hists__filter_entry_by_thread);
2237}
2238
2239void hists__filter_by_dso(struct hists *hists)
2240{
2241        if (symbol_conf.report_hierarchy)
2242                hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2243                                        hists->dso_filter);
2244        else
2245                hists__filter_by_type(hists, HIST_FILTER__DSO,
2246                                      hists__filter_entry_by_dso);
2247}
2248
2249void hists__filter_by_symbol(struct hists *hists)
2250{
2251        if (symbol_conf.report_hierarchy)
2252                hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2253                                        hists->symbol_filter_str);
2254        else
2255                hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2256                                      hists__filter_entry_by_symbol);
2257}
2258
2259void hists__filter_by_socket(struct hists *hists)
2260{
2261        if (symbol_conf.report_hierarchy)
2262                hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2263                                        &hists->socket_filter);
2264        else
2265                hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2266                                      hists__filter_entry_by_socket);
2267}
2268
2269void events_stats__inc(struct events_stats *stats, u32 type)
2270{
2271        ++stats->nr_events[0];
2272        ++stats->nr_events[type];
2273}
2274
2275void hists__inc_nr_events(struct hists *hists, u32 type)
2276{
2277        events_stats__inc(&hists->stats, type);
2278}
2279
2280void hists__inc_nr_samples(struct hists *hists, bool filtered)
2281{
2282        events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2283        if (!filtered)
2284                hists->stats.nr_non_filtered_samples++;
2285}
2286
2287static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2288                                                 struct hist_entry *pair)
2289{
2290        struct rb_root_cached *root;
2291        struct rb_node **p;
2292        struct rb_node *parent = NULL;
2293        struct hist_entry *he;
2294        int64_t cmp;
2295        bool leftmost = true;
2296
2297        if (hists__has(hists, need_collapse))
2298                root = &hists->entries_collapsed;
2299        else
2300                root = hists->entries_in;
2301
2302        p = &root->rb_root.rb_node;
2303
2304        while (*p != NULL) {
2305                parent = *p;
2306                he = rb_entry(parent, struct hist_entry, rb_node_in);
2307
2308                cmp = hist_entry__collapse(he, pair);
2309
2310                if (!cmp)
2311                        goto out;
2312
2313                if (cmp < 0)
2314                        p = &(*p)->rb_left;
2315                else {
2316                        p = &(*p)->rb_right;
2317                        leftmost = false;
2318                }
2319        }
2320
2321        he = hist_entry__new(pair, true);
2322        if (he) {
2323                memset(&he->stat, 0, sizeof(he->stat));
2324                he->hists = hists;
2325                if (symbol_conf.cumulate_callchain)
2326                        memset(he->stat_acc, 0, sizeof(he->stat));
2327                rb_link_node(&he->rb_node_in, parent, p);
2328                rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2329                hists__inc_stats(hists, he);
2330                he->dummy = true;
2331        }
2332out:
2333        return he;
2334}
2335
2336static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2337                                                    struct rb_root_cached *root,
2338                                                    struct hist_entry *pair)
2339{
2340        struct rb_node **p;
2341        struct rb_node *parent = NULL;
2342        struct hist_entry *he;
2343        struct perf_hpp_fmt *fmt;
2344        bool leftmost = true;
2345
2346        p = &root->rb_root.rb_node;
2347        while (*p != NULL) {
2348                int64_t cmp = 0;
2349
2350                parent = *p;
2351                he = rb_entry(parent, struct hist_entry, rb_node_in);
2352
2353                perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2354                        cmp = fmt->collapse(fmt, he, pair);
2355                        if (cmp)
2356                                break;
2357                }
2358                if (!cmp)
2359                        goto out;
2360
2361                if (cmp < 0)
2362                        p = &parent->rb_left;
2363                else {
2364                        p = &parent->rb_right;
2365                        leftmost = false;
2366                }
2367        }
2368
2369        he = hist_entry__new(pair, true);
2370        if (he) {
2371                rb_link_node(&he->rb_node_in, parent, p);
2372                rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2373
2374                he->dummy = true;
2375                he->hists = hists;
2376                memset(&he->stat, 0, sizeof(he->stat));
2377                hists__inc_stats(hists, he);
2378        }
2379out:
2380        return he;
2381}
2382
2383static struct hist_entry *hists__find_entry(struct hists *hists,
2384                                            struct hist_entry *he)
2385{
2386        struct rb_node *n;
2387
2388        if (hists__has(hists, need_collapse))
2389                n = hists->entries_collapsed.rb_root.rb_node;
2390        else
2391                n = hists->entries_in->rb_root.rb_node;
2392
2393        while (n) {
2394                struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2395                int64_t cmp = hist_entry__collapse(iter, he);
2396
2397                if (cmp < 0)
2398                        n = n->rb_left;
2399                else if (cmp > 0)
2400                        n = n->rb_right;
2401                else
2402                        return iter;
2403        }
2404
2405        return NULL;
2406}
2407
2408static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2409                                                      struct hist_entry *he)
2410{
2411        struct rb_node *n = root->rb_root.rb_node;
2412
2413        while (n) {
2414                struct hist_entry *iter;
2415                struct perf_hpp_fmt *fmt;
2416                int64_t cmp = 0;
2417
2418                iter = rb_entry(n, struct hist_entry, rb_node_in);
2419                perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2420                        cmp = fmt->collapse(fmt, iter, he);
2421                        if (cmp)
2422                                break;
2423                }
2424
2425                if (cmp < 0)
2426                        n = n->rb_left;
2427                else if (cmp > 0)
2428                        n = n->rb_right;
2429                else
2430                        return iter;
2431        }
2432
2433        return NULL;
2434}
2435
2436static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2437                                   struct rb_root_cached *other_root)
2438{
2439        struct rb_node *nd;
2440        struct hist_entry *pos, *pair;
2441
2442        for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2443                pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2444                pair = hists__find_hierarchy_entry(other_root, pos);
2445
2446                if (pair) {
2447                        hist_entry__add_pair(pair, pos);
2448                        hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2449                }
2450        }
2451}
2452
2453/*
2454 * Look for pairs to link to the leader buckets (hist_entries):
2455 */
2456void hists__match(struct hists *leader, struct hists *other)
2457{
2458        struct rb_root_cached *root;
2459        struct rb_node *nd;
2460        struct hist_entry *pos, *pair;
2461
2462        if (symbol_conf.report_hierarchy) {
2463                /* hierarchy report always collapses entries */
2464                return hists__match_hierarchy(&leader->entries_collapsed,
2465                                              &other->entries_collapsed);
2466        }
2467
2468        if (hists__has(leader, need_collapse))
2469                root = &leader->entries_collapsed;
2470        else
2471                root = leader->entries_in;
2472
2473        for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2474                pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2475                pair = hists__find_entry(other, pos);
2476
2477                if (pair)
2478                        hist_entry__add_pair(pair, pos);
2479        }
2480}
2481
2482static int hists__link_hierarchy(struct hists *leader_hists,
2483                                 struct hist_entry *parent,
2484                                 struct rb_root_cached *leader_root,
2485                                 struct rb_root_cached *other_root)
2486{
2487        struct rb_node *nd;
2488        struct hist_entry *pos, *leader;
2489
2490        for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2491                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2492
2493                if (hist_entry__has_pairs(pos)) {
2494                        bool found = false;
2495
2496                        list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2497                                if (leader->hists == leader_hists) {
2498                                        found = true;
2499                                        break;
2500                                }
2501                        }
2502                        if (!found)
2503                                return -1;
2504                } else {
2505                        leader = add_dummy_hierarchy_entry(leader_hists,
2506                                                           leader_root, pos);
2507                        if (leader == NULL)
2508                                return -1;
2509
2510                        /* do not point parent in the pos */
2511                        leader->parent_he = parent;
2512
2513                        hist_entry__add_pair(pos, leader);
2514                }
2515
2516                if (!pos->leaf) {
2517                        if (hists__link_hierarchy(leader_hists, leader,
2518                                                  &leader->hroot_in,
2519                                                  &pos->hroot_in) < 0)
2520                                return -1;
2521                }
2522        }
2523        return 0;
2524}
2525
2526/*
2527 * Look for entries in the other hists that are not present in the leader, if
2528 * we find them, just add a dummy entry on the leader hists, with period=0,
2529 * nr_events=0, to serve as the list header.
2530 */
2531int hists__link(struct hists *leader, struct hists *other)
2532{
2533        struct rb_root_cached *root;
2534        struct rb_node *nd;
2535        struct hist_entry *pos, *pair;
2536
2537        if (symbol_conf.report_hierarchy) {
2538                /* hierarchy report always collapses entries */
2539                return hists__link_hierarchy(leader, NULL,
2540                                             &leader->entries_collapsed,
2541                                             &other->entries_collapsed);
2542        }
2543
2544        if (hists__has(other, need_collapse))
2545                root = &other->entries_collapsed;
2546        else
2547                root = other->entries_in;
2548
2549        for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2550                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2551
2552                if (!hist_entry__has_pairs(pos)) {
2553                        pair = hists__add_dummy_entry(leader, pos);
2554                        if (pair == NULL)
2555                                return -1;
2556                        hist_entry__add_pair(pos, pair);
2557                }
2558        }
2559
2560        return 0;
2561}
2562
2563int hists__unlink(struct hists *hists)
2564{
2565        struct rb_root_cached *root;
2566        struct rb_node *nd;
2567        struct hist_entry *pos;
2568
2569        if (hists__has(hists, need_collapse))
2570                root = &hists->entries_collapsed;
2571        else
2572                root = hists->entries_in;
2573
2574        for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2575                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2576                list_del_init(&pos->pairs.node);
2577        }
2578
2579        return 0;
2580}
2581
2582void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2583                          struct perf_sample *sample, bool nonany_branch_mode,
2584                          u64 *total_cycles)
2585{
2586        struct branch_info *bi;
2587
2588        /* If we have branch cycles always annotate them. */
2589        if (bs && bs->nr && bs->entries[0].flags.cycles) {
2590                int i;
2591
2592                bi = sample__resolve_bstack(sample, al);
2593                if (bi) {
2594                        struct addr_map_symbol *prev = NULL;
2595
2596                        /*
2597                         * Ignore errors, still want to process the
2598                         * other entries.
2599                         *
2600                         * For non standard branch modes always
2601                         * force no IPC (prev == NULL)
2602                         *
2603                         * Note that perf stores branches reversed from
2604                         * program order!
2605                         */
2606                        for (i = bs->nr - 1; i >= 0; i--) {
2607                                addr_map_symbol__account_cycles(&bi[i].from,
2608                                        nonany_branch_mode ? NULL : prev,
2609                                        bi[i].flags.cycles);
2610                                prev = &bi[i].to;
2611
2612                                if (total_cycles)
2613                                        *total_cycles += bi[i].flags.cycles;
2614                        }
2615                        free(bi);
2616                }
2617        }
2618}
2619
2620size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2621{
2622        struct evsel *pos;
2623        size_t ret = 0;
2624
2625        evlist__for_each_entry(evlist, pos) {
2626                ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2627                ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2628        }
2629
2630        return ret;
2631}
2632
2633
2634u64 hists__total_period(struct hists *hists)
2635{
2636        return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2637                hists->stats.total_period;
2638}
2639
2640int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2641{
2642        char unit;
2643        int printed;
2644        const struct dso *dso = hists->dso_filter;
2645        struct thread *thread = hists->thread_filter;
2646        int socket_id = hists->socket_filter;
2647        unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
2648        u64 nr_events = hists->stats.total_period;
2649        struct evsel *evsel = hists_to_evsel(hists);
2650        const char *ev_name = perf_evsel__name(evsel);
2651        char buf[512], sample_freq_str[64] = "";
2652        size_t buflen = sizeof(buf);
2653        char ref[30] = " show reference callgraph, ";
2654        bool enable_ref = false;
2655
2656        if (symbol_conf.filter_relative) {
2657                nr_samples = hists->stats.nr_non_filtered_samples;
2658                nr_events = hists->stats.total_non_filtered_period;
2659        }
2660
2661        if (perf_evsel__is_group_event(evsel)) {
2662                struct evsel *pos;
2663
2664                perf_evsel__group_desc(evsel, buf, buflen);
2665                ev_name = buf;
2666
2667                for_each_group_member(pos, evsel) {
2668                        struct hists *pos_hists = evsel__hists(pos);
2669
2670                        if (symbol_conf.filter_relative) {
2671                                nr_samples += pos_hists->stats.nr_non_filtered_samples;
2672                                nr_events += pos_hists->stats.total_non_filtered_period;
2673                        } else {
2674                                nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
2675                                nr_events += pos_hists->stats.total_period;
2676                        }
2677                }
2678        }
2679
2680        if (symbol_conf.show_ref_callgraph &&
2681            strstr(ev_name, "call-graph=no"))
2682                enable_ref = true;
2683
2684        if (show_freq)
2685                scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2686
2687        nr_samples = convert_unit(nr_samples, &unit);
2688        printed = scnprintf(bf, size,
2689                           "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2690                           nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2691                           ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2692
2693
2694        if (hists->uid_filter_str)
2695                printed += snprintf(bf + printed, size - printed,
2696                                    ", UID: %s", hists->uid_filter_str);
2697        if (thread) {
2698                if (hists__has(hists, thread)) {
2699                        printed += scnprintf(bf + printed, size - printed,
2700                                    ", Thread: %s(%d)",
2701                                     (thread->comm_set ? thread__comm_str(thread) : ""),
2702                                    thread->tid);
2703                } else {
2704                        printed += scnprintf(bf + printed, size - printed,
2705                                    ", Thread: %s",
2706                                     (thread->comm_set ? thread__comm_str(thread) : ""));
2707                }
2708        }
2709        if (dso)
2710                printed += scnprintf(bf + printed, size - printed,
2711                                    ", DSO: %s", dso->short_name);
2712        if (socket_id > -1)
2713                printed += scnprintf(bf + printed, size - printed,
2714                                    ", Processor Socket: %d", socket_id);
2715
2716        return printed;
2717}
2718
2719int parse_filter_percentage(const struct option *opt __maybe_unused,
2720                            const char *arg, int unset __maybe_unused)
2721{
2722        if (!strcmp(arg, "relative"))
2723                symbol_conf.filter_relative = true;
2724        else if (!strcmp(arg, "absolute"))
2725                symbol_conf.filter_relative = false;
2726        else {
2727                pr_debug("Invalid percentage: %s\n", arg);
2728                return -1;
2729        }
2730
2731        return 0;
2732}
2733
2734int perf_hist_config(const char *var, const char *value)
2735{
2736        if (!strcmp(var, "hist.percentage"))
2737                return parse_filter_percentage(NULL, value, 0);
2738
2739        return 0;
2740}
2741
2742int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2743{
2744        memset(hists, 0, sizeof(*hists));
2745        hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2746        hists->entries_in = &hists->entries_in_array[0];
2747        hists->entries_collapsed = RB_ROOT_CACHED;
2748        hists->entries = RB_ROOT_CACHED;
2749        pthread_mutex_init(&hists->lock, NULL);
2750        hists->socket_filter = -1;
2751        hists->hpp_list = hpp_list;
2752        INIT_LIST_HEAD(&hists->hpp_formats);
2753        return 0;
2754}
2755
2756static void hists__delete_remaining_entries(struct rb_root_cached *root)
2757{
2758        struct rb_node *node;
2759        struct hist_entry *he;
2760
2761        while (!RB_EMPTY_ROOT(&root->rb_root)) {
2762                node = rb_first_cached(root);
2763                rb_erase_cached(node, root);
2764
2765                he = rb_entry(node, struct hist_entry, rb_node_in);
2766                hist_entry__delete(he);
2767        }
2768}
2769
2770static void hists__delete_all_entries(struct hists *hists)
2771{
2772        hists__delete_entries(hists);
2773        hists__delete_remaining_entries(&hists->entries_in_array[0]);
2774        hists__delete_remaining_entries(&hists->entries_in_array[1]);
2775        hists__delete_remaining_entries(&hists->entries_collapsed);
2776}
2777
2778static void hists_evsel__exit(struct evsel *evsel)
2779{
2780        struct hists *hists = evsel__hists(evsel);
2781        struct perf_hpp_fmt *fmt, *pos;
2782        struct perf_hpp_list_node *node, *tmp;
2783
2784        hists__delete_all_entries(hists);
2785
2786        list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2787                perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2788                        list_del_init(&fmt->list);
2789                        free(fmt);
2790                }
2791                list_del_init(&node->list);
2792                free(node);
2793        }
2794}
2795
2796static int hists_evsel__init(struct evsel *evsel)
2797{
2798        struct hists *hists = evsel__hists(evsel);
2799
2800        __hists__init(hists, &perf_hpp_list);
2801        return 0;
2802}
2803
2804/*
2805 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2806 * stored in the rbtree...
2807 */
2808
2809int hists__init(void)
2810{
2811        int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2812                                            hists_evsel__init,
2813                                            hists_evsel__exit);
2814        if (err)
2815                fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2816
2817        return err;
2818}
2819
2820void perf_hpp_list__init(struct perf_hpp_list *list)
2821{
2822        INIT_LIST_HEAD(&list->fields);
2823        INIT_LIST_HEAD(&list->sorts);
2824}
2825