linux/tools/perf/util/hist.c
<<
>>
Prefs
   1#include "util.h"
   2#include "build-id.h"
   3#include "hist.h"
   4#include "session.h"
   5#include "sort.h"
   6#include "evlist.h"
   7#include "evsel.h"
   8#include "annotate.h"
   9#include "ui/progress.h"
  10#include <math.h>
  11
  12static bool hists__filter_entry_by_dso(struct hists *hists,
  13                                       struct hist_entry *he);
  14static bool hists__filter_entry_by_thread(struct hists *hists,
  15                                          struct hist_entry *he);
  16static bool hists__filter_entry_by_symbol(struct hists *hists,
  17                                          struct hist_entry *he);
  18static bool hists__filter_entry_by_socket(struct hists *hists,
  19                                          struct hist_entry *he);
  20
  21u16 hists__col_len(struct hists *hists, enum hist_column col)
  22{
  23        return hists->col_len[col];
  24}
  25
  26void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  27{
  28        hists->col_len[col] = len;
  29}
  30
  31bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  32{
  33        if (len > hists__col_len(hists, col)) {
  34                hists__set_col_len(hists, col, len);
  35                return true;
  36        }
  37        return false;
  38}
  39
  40void hists__reset_col_len(struct hists *hists)
  41{
  42        enum hist_column col;
  43
  44        for (col = 0; col < HISTC_NR_COLS; ++col)
  45                hists__set_col_len(hists, col, 0);
  46}
  47
  48static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  49{
  50        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  51
  52        if (hists__col_len(hists, dso) < unresolved_col_width &&
  53            !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  54            !symbol_conf.dso_list)
  55                hists__set_col_len(hists, dso, unresolved_col_width);
  56}
  57
  58void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  59{
  60        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  61        int symlen;
  62        u16 len;
  63
  64        /*
  65         * +4 accounts for '[x] ' priv level info
  66         * +2 accounts for 0x prefix on raw addresses
  67         * +3 accounts for ' y ' symtab origin info
  68         */
  69        if (h->ms.sym) {
  70                symlen = h->ms.sym->namelen + 4;
  71                if (verbose)
  72                        symlen += BITS_PER_LONG / 4 + 2 + 3;
  73                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  74        } else {
  75                symlen = unresolved_col_width + 4 + 2;
  76                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  77                hists__set_unres_dso_col_len(hists, HISTC_DSO);
  78        }
  79
  80        len = thread__comm_len(h->thread);
  81        if (hists__new_col_len(hists, HISTC_COMM, len))
  82                hists__set_col_len(hists, HISTC_THREAD, len + 8);
  83
  84        if (h->ms.map) {
  85                len = dso__name_len(h->ms.map->dso);
  86                hists__new_col_len(hists, HISTC_DSO, len);
  87        }
  88
  89        if (h->parent)
  90                hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  91
  92        if (h->branch_info) {
  93                if (h->branch_info->from.sym) {
  94                        symlen = (int)h->branch_info->from.sym->namelen + 4;
  95                        if (verbose)
  96                                symlen += BITS_PER_LONG / 4 + 2 + 3;
  97                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  98
  99                        symlen = dso__name_len(h->branch_info->from.map->dso);
 100                        hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
 101                } else {
 102                        symlen = unresolved_col_width + 4 + 2;
 103                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
 104                        hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
 105                }
 106
 107                if (h->branch_info->to.sym) {
 108                        symlen = (int)h->branch_info->to.sym->namelen + 4;
 109                        if (verbose)
 110                                symlen += BITS_PER_LONG / 4 + 2 + 3;
 111                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 112
 113                        symlen = dso__name_len(h->branch_info->to.map->dso);
 114                        hists__new_col_len(hists, HISTC_DSO_TO, symlen);
 115                } else {
 116                        symlen = unresolved_col_width + 4 + 2;
 117                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 118                        hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
 119                }
 120
 121                if (h->branch_info->srcline_from)
 122                        hists__new_col_len(hists, HISTC_SRCLINE_FROM,
 123                                        strlen(h->branch_info->srcline_from));
 124                if (h->branch_info->srcline_to)
 125                        hists__new_col_len(hists, HISTC_SRCLINE_TO,
 126                                        strlen(h->branch_info->srcline_to));
 127        }
 128
 129        if (h->mem_info) {
 130                if (h->mem_info->daddr.sym) {
 131                        symlen = (int)h->mem_info->daddr.sym->namelen + 4
 132                               + unresolved_col_width + 2;
 133                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 134                                           symlen);
 135                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 136                                           symlen + 1);
 137                } else {
 138                        symlen = unresolved_col_width + 4 + 2;
 139                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 140                                           symlen);
 141                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 142                                           symlen);
 143                }
 144
 145                if (h->mem_info->iaddr.sym) {
 146                        symlen = (int)h->mem_info->iaddr.sym->namelen + 4
 147                               + unresolved_col_width + 2;
 148                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 149                                           symlen);
 150                } else {
 151                        symlen = unresolved_col_width + 4 + 2;
 152                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 153                                           symlen);
 154                }
 155
 156                if (h->mem_info->daddr.map) {
 157                        symlen = dso__name_len(h->mem_info->daddr.map->dso);
 158                        hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
 159                                           symlen);
 160                } else {
 161                        symlen = unresolved_col_width + 4 + 2;
 162                        hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 163                }
 164        } else {
 165                symlen = unresolved_col_width + 4 + 2;
 166                hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
 167                hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
 168                hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 169        }
 170
 171        hists__new_col_len(hists, HISTC_CPU, 3);
 172        hists__new_col_len(hists, HISTC_SOCKET, 6);
 173        hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
 174        hists__new_col_len(hists, HISTC_MEM_TLB, 22);
 175        hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
 176        hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
 177        hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
 178        hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
 179
 180        if (h->srcline) {
 181                len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
 182                hists__new_col_len(hists, HISTC_SRCLINE, len);
 183        }
 184
 185        if (h->srcfile)
 186                hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
 187
 188        if (h->transaction)
 189                hists__new_col_len(hists, HISTC_TRANSACTION,
 190                                   hist_entry__transaction_len());
 191
 192        if (h->trace_output)
 193                hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
 194}
 195
 196void hists__output_recalc_col_len(struct hists *hists, int max_rows)
 197{
 198        struct rb_node *next = rb_first(&hists->entries);
 199        struct hist_entry *n;
 200        int row = 0;
 201
 202        hists__reset_col_len(hists);
 203
 204        while (next && row++ < max_rows) {
 205                n = rb_entry(next, struct hist_entry, rb_node);
 206                if (!n->filtered)
 207                        hists__calc_col_len(hists, n);
 208                next = rb_next(&n->rb_node);
 209        }
 210}
 211
 212static void he_stat__add_cpumode_period(struct he_stat *he_stat,
 213                                        unsigned int cpumode, u64 period)
 214{
 215        switch (cpumode) {
 216        case PERF_RECORD_MISC_KERNEL:
 217                he_stat->period_sys += period;
 218                break;
 219        case PERF_RECORD_MISC_USER:
 220                he_stat->period_us += period;
 221                break;
 222        case PERF_RECORD_MISC_GUEST_KERNEL:
 223                he_stat->period_guest_sys += period;
 224                break;
 225        case PERF_RECORD_MISC_GUEST_USER:
 226                he_stat->period_guest_us += period;
 227                break;
 228        default:
 229                break;
 230        }
 231}
 232
 233static void he_stat__add_period(struct he_stat *he_stat, u64 period,
 234                                u64 weight)
 235{
 236
 237        he_stat->period         += period;
 238        he_stat->weight         += weight;
 239        he_stat->nr_events      += 1;
 240}
 241
 242static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
 243{
 244        dest->period            += src->period;
 245        dest->period_sys        += src->period_sys;
 246        dest->period_us         += src->period_us;
 247        dest->period_guest_sys  += src->period_guest_sys;
 248        dest->period_guest_us   += src->period_guest_us;
 249        dest->nr_events         += src->nr_events;
 250        dest->weight            += src->weight;
 251}
 252
 253static void he_stat__decay(struct he_stat *he_stat)
 254{
 255        he_stat->period = (he_stat->period * 7) / 8;
 256        he_stat->nr_events = (he_stat->nr_events * 7) / 8;
 257        /* XXX need decay for weight too? */
 258}
 259
 260static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
 261
 262static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
 263{
 264        u64 prev_period = he->stat.period;
 265        u64 diff;
 266
 267        if (prev_period == 0)
 268                return true;
 269
 270        he_stat__decay(&he->stat);
 271        if (symbol_conf.cumulate_callchain)
 272                he_stat__decay(he->stat_acc);
 273        decay_callchain(he->callchain);
 274
 275        diff = prev_period - he->stat.period;
 276
 277        if (!he->depth) {
 278                hists->stats.total_period -= diff;
 279                if (!he->filtered)
 280                        hists->stats.total_non_filtered_period -= diff;
 281        }
 282
 283        if (!he->leaf) {
 284                struct hist_entry *child;
 285                struct rb_node *node = rb_first(&he->hroot_out);
 286                while (node) {
 287                        child = rb_entry(node, struct hist_entry, rb_node);
 288                        node = rb_next(node);
 289
 290                        if (hists__decay_entry(hists, child))
 291                                hists__delete_entry(hists, child);
 292                }
 293        }
 294
 295        return he->stat.period == 0;
 296}
 297
 298static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
 299{
 300        struct rb_root *root_in;
 301        struct rb_root *root_out;
 302
 303        if (he->parent_he) {
 304                root_in  = &he->parent_he->hroot_in;
 305                root_out = &he->parent_he->hroot_out;
 306        } else {
 307                if (hists__has(hists, need_collapse))
 308                        root_in = &hists->entries_collapsed;
 309                else
 310                        root_in = hists->entries_in;
 311                root_out = &hists->entries;
 312        }
 313
 314        rb_erase(&he->rb_node_in, root_in);
 315        rb_erase(&he->rb_node, root_out);
 316
 317        --hists->nr_entries;
 318        if (!he->filtered)
 319                --hists->nr_non_filtered_entries;
 320
 321        hist_entry__delete(he);
 322}
 323
 324void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
 325{
 326        struct rb_node *next = rb_first(&hists->entries);
 327        struct hist_entry *n;
 328
 329        while (next) {
 330                n = rb_entry(next, struct hist_entry, rb_node);
 331                next = rb_next(&n->rb_node);
 332                if (((zap_user && n->level == '.') ||
 333                     (zap_kernel && n->level != '.') ||
 334                     hists__decay_entry(hists, n))) {
 335                        hists__delete_entry(hists, n);
 336                }
 337        }
 338}
 339
 340void hists__delete_entries(struct hists *hists)
 341{
 342        struct rb_node *next = rb_first(&hists->entries);
 343        struct hist_entry *n;
 344
 345        while (next) {
 346                n = rb_entry(next, struct hist_entry, rb_node);
 347                next = rb_next(&n->rb_node);
 348
 349                hists__delete_entry(hists, n);
 350        }
 351}
 352
 353/*
 354 * histogram, sorted on item, collects periods
 355 */
 356
 357static int hist_entry__init(struct hist_entry *he,
 358                            struct hist_entry *template,
 359                            bool sample_self)
 360{
 361        *he = *template;
 362
 363        if (symbol_conf.cumulate_callchain) {
 364                he->stat_acc = malloc(sizeof(he->stat));
 365                if (he->stat_acc == NULL)
 366                        return -ENOMEM;
 367                memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
 368                if (!sample_self)
 369                        memset(&he->stat, 0, sizeof(he->stat));
 370        }
 371
 372        map__get(he->ms.map);
 373
 374        if (he->branch_info) {
 375                /*
 376                 * This branch info is (a part of) allocated from
 377                 * sample__resolve_bstack() and will be freed after
 378                 * adding new entries.  So we need to save a copy.
 379                 */
 380                he->branch_info = malloc(sizeof(*he->branch_info));
 381                if (he->branch_info == NULL) {
 382                        map__zput(he->ms.map);
 383                        free(he->stat_acc);
 384                        return -ENOMEM;
 385                }
 386
 387                memcpy(he->branch_info, template->branch_info,
 388                       sizeof(*he->branch_info));
 389
 390                map__get(he->branch_info->from.map);
 391                map__get(he->branch_info->to.map);
 392        }
 393
 394        if (he->mem_info) {
 395                map__get(he->mem_info->iaddr.map);
 396                map__get(he->mem_info->daddr.map);
 397        }
 398
 399        if (symbol_conf.use_callchain)
 400                callchain_init(he->callchain);
 401
 402        if (he->raw_data) {
 403                he->raw_data = memdup(he->raw_data, he->raw_size);
 404
 405                if (he->raw_data == NULL) {
 406                        map__put(he->ms.map);
 407                        if (he->branch_info) {
 408                                map__put(he->branch_info->from.map);
 409                                map__put(he->branch_info->to.map);
 410                                free(he->branch_info);
 411                        }
 412                        if (he->mem_info) {
 413                                map__put(he->mem_info->iaddr.map);
 414                                map__put(he->mem_info->daddr.map);
 415                        }
 416                        free(he->stat_acc);
 417                        return -ENOMEM;
 418                }
 419        }
 420        INIT_LIST_HEAD(&he->pairs.node);
 421        thread__get(he->thread);
 422        he->hroot_in  = RB_ROOT;
 423        he->hroot_out = RB_ROOT;
 424
 425        if (!symbol_conf.report_hierarchy)
 426                he->leaf = true;
 427
 428        return 0;
 429}
 430
 431static void *hist_entry__zalloc(size_t size)
 432{
 433        return zalloc(size + sizeof(struct hist_entry));
 434}
 435
 436static void hist_entry__free(void *ptr)
 437{
 438        free(ptr);
 439}
 440
 441static struct hist_entry_ops default_ops = {
 442        .new    = hist_entry__zalloc,
 443        .free   = hist_entry__free,
 444};
 445
 446static struct hist_entry *hist_entry__new(struct hist_entry *template,
 447                                          bool sample_self)
 448{
 449        struct hist_entry_ops *ops = template->ops;
 450        size_t callchain_size = 0;
 451        struct hist_entry *he;
 452        int err = 0;
 453
 454        if (!ops)
 455                ops = template->ops = &default_ops;
 456
 457        if (symbol_conf.use_callchain)
 458                callchain_size = sizeof(struct callchain_root);
 459
 460        he = ops->new(callchain_size);
 461        if (he) {
 462                err = hist_entry__init(he, template, sample_self);
 463                if (err) {
 464                        ops->free(he);
 465                        he = NULL;
 466                }
 467        }
 468
 469        return he;
 470}
 471
 472static u8 symbol__parent_filter(const struct symbol *parent)
 473{
 474        if (symbol_conf.exclude_other && parent == NULL)
 475                return 1 << HIST_FILTER__PARENT;
 476        return 0;
 477}
 478
 479static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
 480{
 481        if (!symbol_conf.use_callchain)
 482                return;
 483
 484        he->hists->callchain_period += period;
 485        if (!he->filtered)
 486                he->hists->callchain_non_filtered_period += period;
 487}
 488
 489static struct hist_entry *hists__findnew_entry(struct hists *hists,
 490                                               struct hist_entry *entry,
 491                                               struct addr_location *al,
 492                                               bool sample_self)
 493{
 494        struct rb_node **p;
 495        struct rb_node *parent = NULL;
 496        struct hist_entry *he;
 497        int64_t cmp;
 498        u64 period = entry->stat.period;
 499        u64 weight = entry->stat.weight;
 500
 501        p = &hists->entries_in->rb_node;
 502
 503        while (*p != NULL) {
 504                parent = *p;
 505                he = rb_entry(parent, struct hist_entry, rb_node_in);
 506
 507                /*
 508                 * Make sure that it receives arguments in a same order as
 509                 * hist_entry__collapse() so that we can use an appropriate
 510                 * function when searching an entry regardless which sort
 511                 * keys were used.
 512                 */
 513                cmp = hist_entry__cmp(he, entry);
 514
 515                if (!cmp) {
 516                        if (sample_self) {
 517                                he_stat__add_period(&he->stat, period, weight);
 518                                hist_entry__add_callchain_period(he, period);
 519                        }
 520                        if (symbol_conf.cumulate_callchain)
 521                                he_stat__add_period(he->stat_acc, period, weight);
 522
 523                        /*
 524                         * This mem info was allocated from sample__resolve_mem
 525                         * and will not be used anymore.
 526                         */
 527                        zfree(&entry->mem_info);
 528
 529                        /* If the map of an existing hist_entry has
 530                         * become out-of-date due to an exec() or
 531                         * similar, update it.  Otherwise we will
 532                         * mis-adjust symbol addresses when computing
 533                         * the history counter to increment.
 534                         */
 535                        if (he->ms.map != entry->ms.map) {
 536                                map__put(he->ms.map);
 537                                he->ms.map = map__get(entry->ms.map);
 538                        }
 539                        goto out;
 540                }
 541
 542                if (cmp < 0)
 543                        p = &(*p)->rb_left;
 544                else
 545                        p = &(*p)->rb_right;
 546        }
 547
 548        he = hist_entry__new(entry, sample_self);
 549        if (!he)
 550                return NULL;
 551
 552        if (sample_self)
 553                hist_entry__add_callchain_period(he, period);
 554        hists->nr_entries++;
 555
 556        rb_link_node(&he->rb_node_in, parent, p);
 557        rb_insert_color(&he->rb_node_in, hists->entries_in);
 558out:
 559        if (sample_self)
 560                he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
 561        if (symbol_conf.cumulate_callchain)
 562                he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
 563        return he;
 564}
 565
 566static struct hist_entry*
 567__hists__add_entry(struct hists *hists,
 568                   struct addr_location *al,
 569                   struct symbol *sym_parent,
 570                   struct branch_info *bi,
 571                   struct mem_info *mi,
 572                   struct perf_sample *sample,
 573                   bool sample_self,
 574                   struct hist_entry_ops *ops)
 575{
 576        struct hist_entry entry = {
 577                .thread = al->thread,
 578                .comm = thread__comm(al->thread),
 579                .ms = {
 580                        .map    = al->map,
 581                        .sym    = al->sym,
 582                },
 583                .socket  = al->socket,
 584                .cpu     = al->cpu,
 585                .cpumode = al->cpumode,
 586                .ip      = al->addr,
 587                .level   = al->level,
 588                .stat = {
 589                        .nr_events = 1,
 590                        .period = sample->period,
 591                        .weight = sample->weight,
 592                },
 593                .parent = sym_parent,
 594                .filtered = symbol__parent_filter(sym_parent) | al->filtered,
 595                .hists  = hists,
 596                .branch_info = bi,
 597                .mem_info = mi,
 598                .transaction = sample->transaction,
 599                .raw_data = sample->raw_data,
 600                .raw_size = sample->raw_size,
 601                .ops = ops,
 602        };
 603
 604        return hists__findnew_entry(hists, &entry, al, sample_self);
 605}
 606
 607struct hist_entry *hists__add_entry(struct hists *hists,
 608                                    struct addr_location *al,
 609                                    struct symbol *sym_parent,
 610                                    struct branch_info *bi,
 611                                    struct mem_info *mi,
 612                                    struct perf_sample *sample,
 613                                    bool sample_self)
 614{
 615        return __hists__add_entry(hists, al, sym_parent, bi, mi,
 616                                  sample, sample_self, NULL);
 617}
 618
 619struct hist_entry *hists__add_entry_ops(struct hists *hists,
 620                                        struct hist_entry_ops *ops,
 621                                        struct addr_location *al,
 622                                        struct symbol *sym_parent,
 623                                        struct branch_info *bi,
 624                                        struct mem_info *mi,
 625                                        struct perf_sample *sample,
 626                                        bool sample_self)
 627{
 628        return __hists__add_entry(hists, al, sym_parent, bi, mi,
 629                                  sample, sample_self, ops);
 630}
 631
 632static int
 633iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 634                    struct addr_location *al __maybe_unused)
 635{
 636        return 0;
 637}
 638
 639static int
 640iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 641                        struct addr_location *al __maybe_unused)
 642{
 643        return 0;
 644}
 645
 646static int
 647iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 648{
 649        struct perf_sample *sample = iter->sample;
 650        struct mem_info *mi;
 651
 652        mi = sample__resolve_mem(sample, al);
 653        if (mi == NULL)
 654                return -ENOMEM;
 655
 656        iter->priv = mi;
 657        return 0;
 658}
 659
 660static int
 661iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 662{
 663        u64 cost;
 664        struct mem_info *mi = iter->priv;
 665        struct hists *hists = evsel__hists(iter->evsel);
 666        struct perf_sample *sample = iter->sample;
 667        struct hist_entry *he;
 668
 669        if (mi == NULL)
 670                return -EINVAL;
 671
 672        cost = sample->weight;
 673        if (!cost)
 674                cost = 1;
 675
 676        /*
 677         * must pass period=weight in order to get the correct
 678         * sorting from hists__collapse_resort() which is solely
 679         * based on periods. We want sorting be done on nr_events * weight
 680         * and this is indirectly achieved by passing period=weight here
 681         * and the he_stat__add_period() function.
 682         */
 683        sample->period = cost;
 684
 685        he = hists__add_entry(hists, al, iter->parent, NULL, mi,
 686                              sample, true);
 687        if (!he)
 688                return -ENOMEM;
 689
 690        iter->he = he;
 691        return 0;
 692}
 693
 694static int
 695iter_finish_mem_entry(struct hist_entry_iter *iter,
 696                      struct addr_location *al __maybe_unused)
 697{
 698        struct perf_evsel *evsel = iter->evsel;
 699        struct hists *hists = evsel__hists(evsel);
 700        struct hist_entry *he = iter->he;
 701        int err = -EINVAL;
 702
 703        if (he == NULL)
 704                goto out;
 705
 706        hists__inc_nr_samples(hists, he->filtered);
 707
 708        err = hist_entry__append_callchain(he, iter->sample);
 709
 710out:
 711        /*
 712         * We don't need to free iter->priv (mem_info) here since the mem info
 713         * was either already freed in hists__findnew_entry() or passed to a
 714         * new hist entry by hist_entry__new().
 715         */
 716        iter->priv = NULL;
 717
 718        iter->he = NULL;
 719        return err;
 720}
 721
 722static int
 723iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 724{
 725        struct branch_info *bi;
 726        struct perf_sample *sample = iter->sample;
 727
 728        bi = sample__resolve_bstack(sample, al);
 729        if (!bi)
 730                return -ENOMEM;
 731
 732        iter->curr = 0;
 733        iter->total = sample->branch_stack->nr;
 734
 735        iter->priv = bi;
 736        return 0;
 737}
 738
 739static int
 740iter_add_single_branch_entry(struct hist_entry_iter *iter,
 741                             struct addr_location *al __maybe_unused)
 742{
 743        /* to avoid calling callback function */
 744        iter->he = NULL;
 745
 746        return 0;
 747}
 748
 749static int
 750iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 751{
 752        struct branch_info *bi = iter->priv;
 753        int i = iter->curr;
 754
 755        if (bi == NULL)
 756                return 0;
 757
 758        if (iter->curr >= iter->total)
 759                return 0;
 760
 761        al->map = bi[i].to.map;
 762        al->sym = bi[i].to.sym;
 763        al->addr = bi[i].to.addr;
 764        return 1;
 765}
 766
 767static int
 768iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 769{
 770        struct branch_info *bi;
 771        struct perf_evsel *evsel = iter->evsel;
 772        struct hists *hists = evsel__hists(evsel);
 773        struct perf_sample *sample = iter->sample;
 774        struct hist_entry *he = NULL;
 775        int i = iter->curr;
 776        int err = 0;
 777
 778        bi = iter->priv;
 779
 780        if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
 781                goto out;
 782
 783        /*
 784         * The report shows the percentage of total branches captured
 785         * and not events sampled. Thus we use a pseudo period of 1.
 786         */
 787        sample->period = 1;
 788        sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
 789
 790        he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
 791                              sample, true);
 792        if (he == NULL)
 793                return -ENOMEM;
 794
 795        hists__inc_nr_samples(hists, he->filtered);
 796
 797out:
 798        iter->he = he;
 799        iter->curr++;
 800        return err;
 801}
 802
 803static int
 804iter_finish_branch_entry(struct hist_entry_iter *iter,
 805                         struct addr_location *al __maybe_unused)
 806{
 807        zfree(&iter->priv);
 808        iter->he = NULL;
 809
 810        return iter->curr >= iter->total ? 0 : -1;
 811}
 812
 813static int
 814iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
 815                          struct addr_location *al __maybe_unused)
 816{
 817        return 0;
 818}
 819
 820static int
 821iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
 822{
 823        struct perf_evsel *evsel = iter->evsel;
 824        struct perf_sample *sample = iter->sample;
 825        struct hist_entry *he;
 826
 827        he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
 828                              sample, true);
 829        if (he == NULL)
 830                return -ENOMEM;
 831
 832        iter->he = he;
 833        return 0;
 834}
 835
 836static int
 837iter_finish_normal_entry(struct hist_entry_iter *iter,
 838                         struct addr_location *al __maybe_unused)
 839{
 840        struct hist_entry *he = iter->he;
 841        struct perf_evsel *evsel = iter->evsel;
 842        struct perf_sample *sample = iter->sample;
 843
 844        if (he == NULL)
 845                return 0;
 846
 847        iter->he = NULL;
 848
 849        hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
 850
 851        return hist_entry__append_callchain(he, sample);
 852}
 853
 854static int
 855iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
 856                              struct addr_location *al __maybe_unused)
 857{
 858        struct hist_entry **he_cache;
 859
 860        callchain_cursor_commit(&callchain_cursor);
 861
 862        /*
 863         * This is for detecting cycles or recursions so that they're
 864         * cumulated only one time to prevent entries more than 100%
 865         * overhead.
 866         */
 867        he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
 868        if (he_cache == NULL)
 869                return -ENOMEM;
 870
 871        iter->priv = he_cache;
 872        iter->curr = 0;
 873
 874        return 0;
 875}
 876
 877static int
 878iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
 879                                 struct addr_location *al)
 880{
 881        struct perf_evsel *evsel = iter->evsel;
 882        struct hists *hists = evsel__hists(evsel);
 883        struct perf_sample *sample = iter->sample;
 884        struct hist_entry **he_cache = iter->priv;
 885        struct hist_entry *he;
 886        int err = 0;
 887
 888        he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
 889                              sample, true);
 890        if (he == NULL)
 891                return -ENOMEM;
 892
 893        iter->he = he;
 894        he_cache[iter->curr++] = he;
 895
 896        hist_entry__append_callchain(he, sample);
 897
 898        /*
 899         * We need to re-initialize the cursor since callchain_append()
 900         * advanced the cursor to the end.
 901         */
 902        callchain_cursor_commit(&callchain_cursor);
 903
 904        hists__inc_nr_samples(hists, he->filtered);
 905
 906        return err;
 907}
 908
 909static int
 910iter_next_cumulative_entry(struct hist_entry_iter *iter,
 911                           struct addr_location *al)
 912{
 913        struct callchain_cursor_node *node;
 914
 915        node = callchain_cursor_current(&callchain_cursor);
 916        if (node == NULL)
 917                return 0;
 918
 919        return fill_callchain_info(al, node, iter->hide_unresolved);
 920}
 921
 922static int
 923iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
 924                               struct addr_location *al)
 925{
 926        struct perf_evsel *evsel = iter->evsel;
 927        struct perf_sample *sample = iter->sample;
 928        struct hist_entry **he_cache = iter->priv;
 929        struct hist_entry *he;
 930        struct hist_entry he_tmp = {
 931                .hists = evsel__hists(evsel),
 932                .cpu = al->cpu,
 933                .thread = al->thread,
 934                .comm = thread__comm(al->thread),
 935                .ip = al->addr,
 936                .ms = {
 937                        .map = al->map,
 938                        .sym = al->sym,
 939                },
 940                .parent = iter->parent,
 941                .raw_data = sample->raw_data,
 942                .raw_size = sample->raw_size,
 943        };
 944        int i;
 945        struct callchain_cursor cursor;
 946
 947        callchain_cursor_snapshot(&cursor, &callchain_cursor);
 948
 949        callchain_cursor_advance(&callchain_cursor);
 950
 951        /*
 952         * Check if there's duplicate entries in the callchain.
 953         * It's possible that it has cycles or recursive calls.
 954         */
 955        for (i = 0; i < iter->curr; i++) {
 956                if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
 957                        /* to avoid calling callback function */
 958                        iter->he = NULL;
 959                        return 0;
 960                }
 961        }
 962
 963        he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
 964                              sample, false);
 965        if (he == NULL)
 966                return -ENOMEM;
 967
 968        iter->he = he;
 969        he_cache[iter->curr++] = he;
 970
 971        if (symbol_conf.use_callchain)
 972                callchain_append(he->callchain, &cursor, sample->period);
 973        return 0;
 974}
 975
 976static int
 977iter_finish_cumulative_entry(struct hist_entry_iter *iter,
 978                             struct addr_location *al __maybe_unused)
 979{
 980        zfree(&iter->priv);
 981        iter->he = NULL;
 982
 983        return 0;
 984}
 985
 986const struct hist_iter_ops hist_iter_mem = {
 987        .prepare_entry          = iter_prepare_mem_entry,
 988        .add_single_entry       = iter_add_single_mem_entry,
 989        .next_entry             = iter_next_nop_entry,
 990        .add_next_entry         = iter_add_next_nop_entry,
 991        .finish_entry           = iter_finish_mem_entry,
 992};
 993
 994const struct hist_iter_ops hist_iter_branch = {
 995        .prepare_entry          = iter_prepare_branch_entry,
 996        .add_single_entry       = iter_add_single_branch_entry,
 997        .next_entry             = iter_next_branch_entry,
 998        .add_next_entry         = iter_add_next_branch_entry,
 999        .finish_entry           = iter_finish_branch_entry,
1000};
1001
1002const struct hist_iter_ops hist_iter_normal = {
1003        .prepare_entry          = iter_prepare_normal_entry,
1004        .add_single_entry       = iter_add_single_normal_entry,
1005        .next_entry             = iter_next_nop_entry,
1006        .add_next_entry         = iter_add_next_nop_entry,
1007        .finish_entry           = iter_finish_normal_entry,
1008};
1009
1010const struct hist_iter_ops hist_iter_cumulative = {
1011        .prepare_entry          = iter_prepare_cumulative_entry,
1012        .add_single_entry       = iter_add_single_cumulative_entry,
1013        .next_entry             = iter_next_cumulative_entry,
1014        .add_next_entry         = iter_add_next_cumulative_entry,
1015        .finish_entry           = iter_finish_cumulative_entry,
1016};
1017
1018int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1019                         int max_stack_depth, void *arg)
1020{
1021        int err, err2;
1022
1023        err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1024                                        iter->evsel, al, max_stack_depth);
1025        if (err)
1026                return err;
1027
1028        iter->max_stack = max_stack_depth;
1029
1030        err = iter->ops->prepare_entry(iter, al);
1031        if (err)
1032                goto out;
1033
1034        err = iter->ops->add_single_entry(iter, al);
1035        if (err)
1036                goto out;
1037
1038        if (iter->he && iter->add_entry_cb) {
1039                err = iter->add_entry_cb(iter, al, true, arg);
1040                if (err)
1041                        goto out;
1042        }
1043
1044        while (iter->ops->next_entry(iter, al)) {
1045                err = iter->ops->add_next_entry(iter, al);
1046                if (err)
1047                        break;
1048
1049                if (iter->he && iter->add_entry_cb) {
1050                        err = iter->add_entry_cb(iter, al, false, arg);
1051                        if (err)
1052                                goto out;
1053                }
1054        }
1055
1056out:
1057        err2 = iter->ops->finish_entry(iter, al);
1058        if (!err)
1059                err = err2;
1060
1061        return err;
1062}
1063
1064int64_t
1065hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1066{
1067        struct hists *hists = left->hists;
1068        struct perf_hpp_fmt *fmt;
1069        int64_t cmp = 0;
1070
1071        hists__for_each_sort_list(hists, fmt) {
1072                if (perf_hpp__is_dynamic_entry(fmt) &&
1073                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1074                        continue;
1075
1076                cmp = fmt->cmp(fmt, left, right);
1077                if (cmp)
1078                        break;
1079        }
1080
1081        return cmp;
1082}
1083
1084int64_t
1085hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1086{
1087        struct hists *hists = left->hists;
1088        struct perf_hpp_fmt *fmt;
1089        int64_t cmp = 0;
1090
1091        hists__for_each_sort_list(hists, fmt) {
1092                if (perf_hpp__is_dynamic_entry(fmt) &&
1093                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1094                        continue;
1095
1096                cmp = fmt->collapse(fmt, left, right);
1097                if (cmp)
1098                        break;
1099        }
1100
1101        return cmp;
1102}
1103
1104void hist_entry__delete(struct hist_entry *he)
1105{
1106        struct hist_entry_ops *ops = he->ops;
1107
1108        thread__zput(he->thread);
1109        map__zput(he->ms.map);
1110
1111        if (he->branch_info) {
1112                map__zput(he->branch_info->from.map);
1113                map__zput(he->branch_info->to.map);
1114                free_srcline(he->branch_info->srcline_from);
1115                free_srcline(he->branch_info->srcline_to);
1116                zfree(&he->branch_info);
1117        }
1118
1119        if (he->mem_info) {
1120                map__zput(he->mem_info->iaddr.map);
1121                map__zput(he->mem_info->daddr.map);
1122                zfree(&he->mem_info);
1123        }
1124
1125        zfree(&he->stat_acc);
1126        free_srcline(he->srcline);
1127        if (he->srcfile && he->srcfile[0])
1128                free(he->srcfile);
1129        free_callchain(he->callchain);
1130        free(he->trace_output);
1131        free(he->raw_data);
1132        ops->free(he);
1133}
1134
1135/*
1136 * If this is not the last column, then we need to pad it according to the
1137 * pre-calculated max lenght for this column, otherwise don't bother adding
1138 * spaces because that would break viewing this with, for instance, 'less',
1139 * that would show tons of trailing spaces when a long C++ demangled method
1140 * names is sampled.
1141*/
1142int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1143                                   struct perf_hpp_fmt *fmt, int printed)
1144{
1145        if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1146                const int width = fmt->width(fmt, hpp, he->hists);
1147                if (printed < width) {
1148                        advance_hpp(hpp, printed);
1149                        printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1150                }
1151        }
1152
1153        return printed;
1154}
1155
1156/*
1157 * collapse the histogram
1158 */
1159
1160static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1161static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1162                                       enum hist_filter type);
1163
1164typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1165
1166static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1167{
1168        return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1169}
1170
1171static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1172                                                enum hist_filter type,
1173                                                fmt_chk_fn check)
1174{
1175        struct perf_hpp_fmt *fmt;
1176        bool type_match = false;
1177        struct hist_entry *parent = he->parent_he;
1178
1179        switch (type) {
1180        case HIST_FILTER__THREAD:
1181                if (symbol_conf.comm_list == NULL &&
1182                    symbol_conf.pid_list == NULL &&
1183                    symbol_conf.tid_list == NULL)
1184                        return;
1185                break;
1186        case HIST_FILTER__DSO:
1187                if (symbol_conf.dso_list == NULL)
1188                        return;
1189                break;
1190        case HIST_FILTER__SYMBOL:
1191                if (symbol_conf.sym_list == NULL)
1192                        return;
1193                break;
1194        case HIST_FILTER__PARENT:
1195        case HIST_FILTER__GUEST:
1196        case HIST_FILTER__HOST:
1197        case HIST_FILTER__SOCKET:
1198        default:
1199                return;
1200        }
1201
1202        /* if it's filtered by own fmt, it has to have filter bits */
1203        perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1204                if (check(fmt)) {
1205                        type_match = true;
1206                        break;
1207                }
1208        }
1209
1210        if (type_match) {
1211                /*
1212                 * If the filter is for current level entry, propagate
1213                 * filter marker to parents.  The marker bit was
1214                 * already set by default so it only needs to clear
1215                 * non-filtered entries.
1216                 */
1217                if (!(he->filtered & (1 << type))) {
1218                        while (parent) {
1219                                parent->filtered &= ~(1 << type);
1220                                parent = parent->parent_he;
1221                        }
1222                }
1223        } else {
1224                /*
1225                 * If current entry doesn't have matching formats, set
1226                 * filter marker for upper level entries.  it will be
1227                 * cleared if its lower level entries is not filtered.
1228                 *
1229                 * For lower-level entries, it inherits parent's
1230                 * filter bit so that lower level entries of a
1231                 * non-filtered entry won't set the filter marker.
1232                 */
1233                if (parent == NULL)
1234                        he->filtered |= (1 << type);
1235                else
1236                        he->filtered |= (parent->filtered & (1 << type));
1237        }
1238}
1239
1240static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1241{
1242        hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1243                                            check_thread_entry);
1244
1245        hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1246                                            perf_hpp__is_dso_entry);
1247
1248        hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1249                                            perf_hpp__is_sym_entry);
1250
1251        hists__apply_filters(he->hists, he);
1252}
1253
1254static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1255                                                 struct rb_root *root,
1256                                                 struct hist_entry *he,
1257                                                 struct hist_entry *parent_he,
1258                                                 struct perf_hpp_list *hpp_list)
1259{
1260        struct rb_node **p = &root->rb_node;
1261        struct rb_node *parent = NULL;
1262        struct hist_entry *iter, *new;
1263        struct perf_hpp_fmt *fmt;
1264        int64_t cmp;
1265
1266        while (*p != NULL) {
1267                parent = *p;
1268                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1269
1270                cmp = 0;
1271                perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1272                        cmp = fmt->collapse(fmt, iter, he);
1273                        if (cmp)
1274                                break;
1275                }
1276
1277                if (!cmp) {
1278                        he_stat__add_stat(&iter->stat, &he->stat);
1279                        return iter;
1280                }
1281
1282                if (cmp < 0)
1283                        p = &parent->rb_left;
1284                else
1285                        p = &parent->rb_right;
1286        }
1287
1288        new = hist_entry__new(he, true);
1289        if (new == NULL)
1290                return NULL;
1291
1292        hists->nr_entries++;
1293
1294        /* save related format list for output */
1295        new->hpp_list = hpp_list;
1296        new->parent_he = parent_he;
1297
1298        hist_entry__apply_hierarchy_filters(new);
1299
1300        /* some fields are now passed to 'new' */
1301        perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1302                if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1303                        he->trace_output = NULL;
1304                else
1305                        new->trace_output = NULL;
1306
1307                if (perf_hpp__is_srcline_entry(fmt))
1308                        he->srcline = NULL;
1309                else
1310                        new->srcline = NULL;
1311
1312                if (perf_hpp__is_srcfile_entry(fmt))
1313                        he->srcfile = NULL;
1314                else
1315                        new->srcfile = NULL;
1316        }
1317
1318        rb_link_node(&new->rb_node_in, parent, p);
1319        rb_insert_color(&new->rb_node_in, root);
1320        return new;
1321}
1322
1323static int hists__hierarchy_insert_entry(struct hists *hists,
1324                                         struct rb_root *root,
1325                                         struct hist_entry *he)
1326{
1327        struct perf_hpp_list_node *node;
1328        struct hist_entry *new_he = NULL;
1329        struct hist_entry *parent = NULL;
1330        int depth = 0;
1331        int ret = 0;
1332
1333        list_for_each_entry(node, &hists->hpp_formats, list) {
1334                /* skip period (overhead) and elided columns */
1335                if (node->level == 0 || node->skip)
1336                        continue;
1337
1338                /* insert copy of 'he' for each fmt into the hierarchy */
1339                new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1340                if (new_he == NULL) {
1341                        ret = -1;
1342                        break;
1343                }
1344
1345                root = &new_he->hroot_in;
1346                new_he->depth = depth++;
1347                parent = new_he;
1348        }
1349
1350        if (new_he) {
1351                new_he->leaf = true;
1352
1353                if (symbol_conf.use_callchain) {
1354                        callchain_cursor_reset(&callchain_cursor);
1355                        if (callchain_merge(&callchain_cursor,
1356                                            new_he->callchain,
1357                                            he->callchain) < 0)
1358                                ret = -1;
1359                }
1360        }
1361
1362        /* 'he' is no longer used */
1363        hist_entry__delete(he);
1364
1365        /* return 0 (or -1) since it already applied filters */
1366        return ret;
1367}
1368
1369static int hists__collapse_insert_entry(struct hists *hists,
1370                                        struct rb_root *root,
1371                                        struct hist_entry *he)
1372{
1373        struct rb_node **p = &root->rb_node;
1374        struct rb_node *parent = NULL;
1375        struct hist_entry *iter;
1376        int64_t cmp;
1377
1378        if (symbol_conf.report_hierarchy)
1379                return hists__hierarchy_insert_entry(hists, root, he);
1380
1381        while (*p != NULL) {
1382                parent = *p;
1383                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1384
1385                cmp = hist_entry__collapse(iter, he);
1386
1387                if (!cmp) {
1388                        int ret = 0;
1389
1390                        he_stat__add_stat(&iter->stat, &he->stat);
1391                        if (symbol_conf.cumulate_callchain)
1392                                he_stat__add_stat(iter->stat_acc, he->stat_acc);
1393
1394                        if (symbol_conf.use_callchain) {
1395                                callchain_cursor_reset(&callchain_cursor);
1396                                if (callchain_merge(&callchain_cursor,
1397                                                    iter->callchain,
1398                                                    he->callchain) < 0)
1399                                        ret = -1;
1400                        }
1401                        hist_entry__delete(he);
1402                        return ret;
1403                }
1404
1405                if (cmp < 0)
1406                        p = &(*p)->rb_left;
1407                else
1408                        p = &(*p)->rb_right;
1409        }
1410        hists->nr_entries++;
1411
1412        rb_link_node(&he->rb_node_in, parent, p);
1413        rb_insert_color(&he->rb_node_in, root);
1414        return 1;
1415}
1416
1417struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1418{
1419        struct rb_root *root;
1420
1421        pthread_mutex_lock(&hists->lock);
1422
1423        root = hists->entries_in;
1424        if (++hists->entries_in > &hists->entries_in_array[1])
1425                hists->entries_in = &hists->entries_in_array[0];
1426
1427        pthread_mutex_unlock(&hists->lock);
1428
1429        return root;
1430}
1431
1432static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1433{
1434        hists__filter_entry_by_dso(hists, he);
1435        hists__filter_entry_by_thread(hists, he);
1436        hists__filter_entry_by_symbol(hists, he);
1437        hists__filter_entry_by_socket(hists, he);
1438}
1439
1440int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1441{
1442        struct rb_root *root;
1443        struct rb_node *next;
1444        struct hist_entry *n;
1445        int ret;
1446
1447        if (!hists__has(hists, need_collapse))
1448                return 0;
1449
1450        hists->nr_entries = 0;
1451
1452        root = hists__get_rotate_entries_in(hists);
1453
1454        next = rb_first(root);
1455
1456        while (next) {
1457                if (session_done())
1458                        break;
1459                n = rb_entry(next, struct hist_entry, rb_node_in);
1460                next = rb_next(&n->rb_node_in);
1461
1462                rb_erase(&n->rb_node_in, root);
1463                ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1464                if (ret < 0)
1465                        return -1;
1466
1467                if (ret) {
1468                        /*
1469                         * If it wasn't combined with one of the entries already
1470                         * collapsed, we need to apply the filters that may have
1471                         * been set by, say, the hist_browser.
1472                         */
1473                        hists__apply_filters(hists, n);
1474                }
1475                if (prog)
1476                        ui_progress__update(prog, 1);
1477        }
1478        return 0;
1479}
1480
1481static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1482{
1483        struct hists *hists = a->hists;
1484        struct perf_hpp_fmt *fmt;
1485        int64_t cmp = 0;
1486
1487        hists__for_each_sort_list(hists, fmt) {
1488                if (perf_hpp__should_skip(fmt, a->hists))
1489                        continue;
1490
1491                cmp = fmt->sort(fmt, a, b);
1492                if (cmp)
1493                        break;
1494        }
1495
1496        return cmp;
1497}
1498
1499static void hists__reset_filter_stats(struct hists *hists)
1500{
1501        hists->nr_non_filtered_entries = 0;
1502        hists->stats.total_non_filtered_period = 0;
1503}
1504
1505void hists__reset_stats(struct hists *hists)
1506{
1507        hists->nr_entries = 0;
1508        hists->stats.total_period = 0;
1509
1510        hists__reset_filter_stats(hists);
1511}
1512
1513static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1514{
1515        hists->nr_non_filtered_entries++;
1516        hists->stats.total_non_filtered_period += h->stat.period;
1517}
1518
1519void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1520{
1521        if (!h->filtered)
1522                hists__inc_filter_stats(hists, h);
1523
1524        hists->nr_entries++;
1525        hists->stats.total_period += h->stat.period;
1526}
1527
1528static void hierarchy_recalc_total_periods(struct hists *hists)
1529{
1530        struct rb_node *node;
1531        struct hist_entry *he;
1532
1533        node = rb_first(&hists->entries);
1534
1535        hists->stats.total_period = 0;
1536        hists->stats.total_non_filtered_period = 0;
1537
1538        /*
1539         * recalculate total period using top-level entries only
1540         * since lower level entries only see non-filtered entries
1541         * but upper level entries have sum of both entries.
1542         */
1543        while (node) {
1544                he = rb_entry(node, struct hist_entry, rb_node);
1545                node = rb_next(node);
1546
1547                hists->stats.total_period += he->stat.period;
1548                if (!he->filtered)
1549                        hists->stats.total_non_filtered_period += he->stat.period;
1550        }
1551}
1552
1553static void hierarchy_insert_output_entry(struct rb_root *root,
1554                                          struct hist_entry *he)
1555{
1556        struct rb_node **p = &root->rb_node;
1557        struct rb_node *parent = NULL;
1558        struct hist_entry *iter;
1559        struct perf_hpp_fmt *fmt;
1560
1561        while (*p != NULL) {
1562                parent = *p;
1563                iter = rb_entry(parent, struct hist_entry, rb_node);
1564
1565                if (hist_entry__sort(he, iter) > 0)
1566                        p = &parent->rb_left;
1567                else
1568                        p = &parent->rb_right;
1569        }
1570
1571        rb_link_node(&he->rb_node, parent, p);
1572        rb_insert_color(&he->rb_node, root);
1573
1574        /* update column width of dynamic entry */
1575        perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1576                if (perf_hpp__is_dynamic_entry(fmt))
1577                        fmt->sort(fmt, he, NULL);
1578        }
1579}
1580
1581static void hists__hierarchy_output_resort(struct hists *hists,
1582                                           struct ui_progress *prog,
1583                                           struct rb_root *root_in,
1584                                           struct rb_root *root_out,
1585                                           u64 min_callchain_hits,
1586                                           bool use_callchain)
1587{
1588        struct rb_node *node;
1589        struct hist_entry *he;
1590
1591        *root_out = RB_ROOT;
1592        node = rb_first(root_in);
1593
1594        while (node) {
1595                he = rb_entry(node, struct hist_entry, rb_node_in);
1596                node = rb_next(node);
1597
1598                hierarchy_insert_output_entry(root_out, he);
1599
1600                if (prog)
1601                        ui_progress__update(prog, 1);
1602
1603                hists->nr_entries++;
1604                if (!he->filtered) {
1605                        hists->nr_non_filtered_entries++;
1606                        hists__calc_col_len(hists, he);
1607                }
1608
1609                if (!he->leaf) {
1610                        hists__hierarchy_output_resort(hists, prog,
1611                                                       &he->hroot_in,
1612                                                       &he->hroot_out,
1613                                                       min_callchain_hits,
1614                                                       use_callchain);
1615                        continue;
1616                }
1617
1618                if (!use_callchain)
1619                        continue;
1620
1621                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1622                        u64 total = he->stat.period;
1623
1624                        if (symbol_conf.cumulate_callchain)
1625                                total = he->stat_acc->period;
1626
1627                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1628                }
1629
1630                callchain_param.sort(&he->sorted_chain, he->callchain,
1631                                     min_callchain_hits, &callchain_param);
1632        }
1633}
1634
1635static void __hists__insert_output_entry(struct rb_root *entries,
1636                                         struct hist_entry *he,
1637                                         u64 min_callchain_hits,
1638                                         bool use_callchain)
1639{
1640        struct rb_node **p = &entries->rb_node;
1641        struct rb_node *parent = NULL;
1642        struct hist_entry *iter;
1643        struct perf_hpp_fmt *fmt;
1644
1645        if (use_callchain) {
1646                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1647                        u64 total = he->stat.period;
1648
1649                        if (symbol_conf.cumulate_callchain)
1650                                total = he->stat_acc->period;
1651
1652                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1653                }
1654                callchain_param.sort(&he->sorted_chain, he->callchain,
1655                                      min_callchain_hits, &callchain_param);
1656        }
1657
1658        while (*p != NULL) {
1659                parent = *p;
1660                iter = rb_entry(parent, struct hist_entry, rb_node);
1661
1662                if (hist_entry__sort(he, iter) > 0)
1663                        p = &(*p)->rb_left;
1664                else
1665                        p = &(*p)->rb_right;
1666        }
1667
1668        rb_link_node(&he->rb_node, parent, p);
1669        rb_insert_color(&he->rb_node, entries);
1670
1671        perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1672                if (perf_hpp__is_dynamic_entry(fmt) &&
1673                    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1674                        fmt->sort(fmt, he, NULL);  /* update column width */
1675        }
1676}
1677
1678static void output_resort(struct hists *hists, struct ui_progress *prog,
1679                          bool use_callchain, hists__resort_cb_t cb)
1680{
1681        struct rb_root *root;
1682        struct rb_node *next;
1683        struct hist_entry *n;
1684        u64 callchain_total;
1685        u64 min_callchain_hits;
1686
1687        callchain_total = hists->callchain_period;
1688        if (symbol_conf.filter_relative)
1689                callchain_total = hists->callchain_non_filtered_period;
1690
1691        min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1692
1693        hists__reset_stats(hists);
1694        hists__reset_col_len(hists);
1695
1696        if (symbol_conf.report_hierarchy) {
1697                hists__hierarchy_output_resort(hists, prog,
1698                                               &hists->entries_collapsed,
1699                                               &hists->entries,
1700                                               min_callchain_hits,
1701                                               use_callchain);
1702                hierarchy_recalc_total_periods(hists);
1703                return;
1704        }
1705
1706        if (hists__has(hists, need_collapse))
1707                root = &hists->entries_collapsed;
1708        else
1709                root = hists->entries_in;
1710
1711        next = rb_first(root);
1712        hists->entries = RB_ROOT;
1713
1714        while (next) {
1715                n = rb_entry(next, struct hist_entry, rb_node_in);
1716                next = rb_next(&n->rb_node_in);
1717
1718                if (cb && cb(n))
1719                        continue;
1720
1721                __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1722                hists__inc_stats(hists, n);
1723
1724                if (!n->filtered)
1725                        hists__calc_col_len(hists, n);
1726
1727                if (prog)
1728                        ui_progress__update(prog, 1);
1729        }
1730}
1731
1732void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1733{
1734        bool use_callchain;
1735
1736        if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1737                use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1738        else
1739                use_callchain = symbol_conf.use_callchain;
1740
1741        output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
1742}
1743
1744void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1745{
1746        output_resort(hists, prog, symbol_conf.use_callchain, NULL);
1747}
1748
1749void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1750                             hists__resort_cb_t cb)
1751{
1752        output_resort(hists, prog, symbol_conf.use_callchain, cb);
1753}
1754
1755static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1756{
1757        if (he->leaf || hmd == HMD_FORCE_SIBLING)
1758                return false;
1759
1760        if (he->unfolded || hmd == HMD_FORCE_CHILD)
1761                return true;
1762
1763        return false;
1764}
1765
1766struct rb_node *rb_hierarchy_last(struct rb_node *node)
1767{
1768        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1769
1770        while (can_goto_child(he, HMD_NORMAL)) {
1771                node = rb_last(&he->hroot_out);
1772                he = rb_entry(node, struct hist_entry, rb_node);
1773        }
1774        return node;
1775}
1776
1777struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1778{
1779        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1780
1781        if (can_goto_child(he, hmd))
1782                node = rb_first(&he->hroot_out);
1783        else
1784                node = rb_next(node);
1785
1786        while (node == NULL) {
1787                he = he->parent_he;
1788                if (he == NULL)
1789                        break;
1790
1791                node = rb_next(&he->rb_node);
1792        }
1793        return node;
1794}
1795
1796struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1797{
1798        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1799
1800        node = rb_prev(node);
1801        if (node)
1802                return rb_hierarchy_last(node);
1803
1804        he = he->parent_he;
1805        if (he == NULL)
1806                return NULL;
1807
1808        return &he->rb_node;
1809}
1810
1811bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1812{
1813        struct rb_node *node;
1814        struct hist_entry *child;
1815        float percent;
1816
1817        if (he->leaf)
1818                return false;
1819
1820        node = rb_first(&he->hroot_out);
1821        child = rb_entry(node, struct hist_entry, rb_node);
1822
1823        while (node && child->filtered) {
1824                node = rb_next(node);
1825                child = rb_entry(node, struct hist_entry, rb_node);
1826        }
1827
1828        if (node)
1829                percent = hist_entry__get_percent_limit(child);
1830        else
1831                percent = 0;
1832
1833        return node && percent >= limit;
1834}
1835
1836static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1837                                       enum hist_filter filter)
1838{
1839        h->filtered &= ~(1 << filter);
1840
1841        if (symbol_conf.report_hierarchy) {
1842                struct hist_entry *parent = h->parent_he;
1843
1844                while (parent) {
1845                        he_stat__add_stat(&parent->stat, &h->stat);
1846
1847                        parent->filtered &= ~(1 << filter);
1848
1849                        if (parent->filtered)
1850                                goto next;
1851
1852                        /* force fold unfiltered entry for simplicity */
1853                        parent->unfolded = false;
1854                        parent->has_no_entry = false;
1855                        parent->row_offset = 0;
1856                        parent->nr_rows = 0;
1857next:
1858                        parent = parent->parent_he;
1859                }
1860        }
1861
1862        if (h->filtered)
1863                return;
1864
1865        /* force fold unfiltered entry for simplicity */
1866        h->unfolded = false;
1867        h->has_no_entry = false;
1868        h->row_offset = 0;
1869        h->nr_rows = 0;
1870
1871        hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1872
1873        hists__inc_filter_stats(hists, h);
1874        hists__calc_col_len(hists, h);
1875}
1876
1877
1878static bool hists__filter_entry_by_dso(struct hists *hists,
1879                                       struct hist_entry *he)
1880{
1881        if (hists->dso_filter != NULL &&
1882            (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1883                he->filtered |= (1 << HIST_FILTER__DSO);
1884                return true;
1885        }
1886
1887        return false;
1888}
1889
1890static bool hists__filter_entry_by_thread(struct hists *hists,
1891                                          struct hist_entry *he)
1892{
1893        if (hists->thread_filter != NULL &&
1894            he->thread != hists->thread_filter) {
1895                he->filtered |= (1 << HIST_FILTER__THREAD);
1896                return true;
1897        }
1898
1899        return false;
1900}
1901
1902static bool hists__filter_entry_by_symbol(struct hists *hists,
1903                                          struct hist_entry *he)
1904{
1905        if (hists->symbol_filter_str != NULL &&
1906            (!he->ms.sym || strstr(he->ms.sym->name,
1907                                   hists->symbol_filter_str) == NULL)) {
1908                he->filtered |= (1 << HIST_FILTER__SYMBOL);
1909                return true;
1910        }
1911
1912        return false;
1913}
1914
1915static bool hists__filter_entry_by_socket(struct hists *hists,
1916                                          struct hist_entry *he)
1917{
1918        if ((hists->socket_filter > -1) &&
1919            (he->socket != hists->socket_filter)) {
1920                he->filtered |= (1 << HIST_FILTER__SOCKET);
1921                return true;
1922        }
1923
1924        return false;
1925}
1926
1927typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1928
1929static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1930{
1931        struct rb_node *nd;
1932
1933        hists->stats.nr_non_filtered_samples = 0;
1934
1935        hists__reset_filter_stats(hists);
1936        hists__reset_col_len(hists);
1937
1938        for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1939                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1940
1941                if (filter(hists, h))
1942                        continue;
1943
1944                hists__remove_entry_filter(hists, h, type);
1945        }
1946}
1947
1948static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1949{
1950        struct rb_node **p = &root->rb_node;
1951        struct rb_node *parent = NULL;
1952        struct hist_entry *iter;
1953        struct rb_root new_root = RB_ROOT;
1954        struct rb_node *nd;
1955
1956        while (*p != NULL) {
1957                parent = *p;
1958                iter = rb_entry(parent, struct hist_entry, rb_node);
1959
1960                if (hist_entry__sort(he, iter) > 0)
1961                        p = &(*p)->rb_left;
1962                else
1963                        p = &(*p)->rb_right;
1964        }
1965
1966        rb_link_node(&he->rb_node, parent, p);
1967        rb_insert_color(&he->rb_node, root);
1968
1969        if (he->leaf || he->filtered)
1970                return;
1971
1972        nd = rb_first(&he->hroot_out);
1973        while (nd) {
1974                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1975
1976                nd = rb_next(nd);
1977                rb_erase(&h->rb_node, &he->hroot_out);
1978
1979                resort_filtered_entry(&new_root, h);
1980        }
1981
1982        he->hroot_out = new_root;
1983}
1984
1985static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
1986{
1987        struct rb_node *nd;
1988        struct rb_root new_root = RB_ROOT;
1989
1990        hists->stats.nr_non_filtered_samples = 0;
1991
1992        hists__reset_filter_stats(hists);
1993        hists__reset_col_len(hists);
1994
1995        nd = rb_first(&hists->entries);
1996        while (nd) {
1997                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1998                int ret;
1999
2000                ret = hist_entry__filter(h, type, arg);
2001
2002                /*
2003                 * case 1. non-matching type
2004                 * zero out the period, set filter marker and move to child
2005                 */
2006                if (ret < 0) {
2007                        memset(&h->stat, 0, sizeof(h->stat));
2008                        h->filtered |= (1 << type);
2009
2010                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2011                }
2012                /*
2013                 * case 2. matched type (filter out)
2014                 * set filter marker and move to next
2015                 */
2016                else if (ret == 1) {
2017                        h->filtered |= (1 << type);
2018
2019                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2020                }
2021                /*
2022                 * case 3. ok (not filtered)
2023                 * add period to hists and parents, erase the filter marker
2024                 * and move to next sibling
2025                 */
2026                else {
2027                        hists__remove_entry_filter(hists, h, type);
2028
2029                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2030                }
2031        }
2032
2033        hierarchy_recalc_total_periods(hists);
2034
2035        /*
2036         * resort output after applying a new filter since filter in a lower
2037         * hierarchy can change periods in a upper hierarchy.
2038         */
2039        nd = rb_first(&hists->entries);
2040        while (nd) {
2041                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2042
2043                nd = rb_next(nd);
2044                rb_erase(&h->rb_node, &hists->entries);
2045
2046                resort_filtered_entry(&new_root, h);
2047        }
2048
2049        hists->entries = new_root;
2050}
2051
2052void hists__filter_by_thread(struct hists *hists)
2053{
2054        if (symbol_conf.report_hierarchy)
2055                hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2056                                        hists->thread_filter);
2057        else
2058                hists__filter_by_type(hists, HIST_FILTER__THREAD,
2059                                      hists__filter_entry_by_thread);
2060}
2061
2062void hists__filter_by_dso(struct hists *hists)
2063{
2064        if (symbol_conf.report_hierarchy)
2065                hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2066                                        hists->dso_filter);
2067        else
2068                hists__filter_by_type(hists, HIST_FILTER__DSO,
2069                                      hists__filter_entry_by_dso);
2070}
2071
2072void hists__filter_by_symbol(struct hists *hists)
2073{
2074        if (symbol_conf.report_hierarchy)
2075                hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2076                                        hists->symbol_filter_str);
2077        else
2078                hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2079                                      hists__filter_entry_by_symbol);
2080}
2081
2082void hists__filter_by_socket(struct hists *hists)
2083{
2084        if (symbol_conf.report_hierarchy)
2085                hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2086                                        &hists->socket_filter);
2087        else
2088                hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2089                                      hists__filter_entry_by_socket);
2090}
2091
2092void events_stats__inc(struct events_stats *stats, u32 type)
2093{
2094        ++stats->nr_events[0];
2095        ++stats->nr_events[type];
2096}
2097
2098void hists__inc_nr_events(struct hists *hists, u32 type)
2099{
2100        events_stats__inc(&hists->stats, type);
2101}
2102
2103void hists__inc_nr_samples(struct hists *hists, bool filtered)
2104{
2105        events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2106        if (!filtered)
2107                hists->stats.nr_non_filtered_samples++;
2108}
2109
2110static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2111                                                 struct hist_entry *pair)
2112{
2113        struct rb_root *root;
2114        struct rb_node **p;
2115        struct rb_node *parent = NULL;
2116        struct hist_entry *he;
2117        int64_t cmp;
2118
2119        if (hists__has(hists, need_collapse))
2120                root = &hists->entries_collapsed;
2121        else
2122                root = hists->entries_in;
2123
2124        p = &root->rb_node;
2125
2126        while (*p != NULL) {
2127                parent = *p;
2128                he = rb_entry(parent, struct hist_entry, rb_node_in);
2129
2130                cmp = hist_entry__collapse(he, pair);
2131
2132                if (!cmp)
2133                        goto out;
2134
2135                if (cmp < 0)
2136                        p = &(*p)->rb_left;
2137                else
2138                        p = &(*p)->rb_right;
2139        }
2140
2141        he = hist_entry__new(pair, true);
2142        if (he) {
2143                memset(&he->stat, 0, sizeof(he->stat));
2144                he->hists = hists;
2145                if (symbol_conf.cumulate_callchain)
2146                        memset(he->stat_acc, 0, sizeof(he->stat));
2147                rb_link_node(&he->rb_node_in, parent, p);
2148                rb_insert_color(&he->rb_node_in, root);
2149                hists__inc_stats(hists, he);
2150                he->dummy = true;
2151        }
2152out:
2153        return he;
2154}
2155
2156static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2157                                                    struct rb_root *root,
2158                                                    struct hist_entry *pair)
2159{
2160        struct rb_node **p;
2161        struct rb_node *parent = NULL;
2162        struct hist_entry *he;
2163        struct perf_hpp_fmt *fmt;
2164
2165        p = &root->rb_node;
2166        while (*p != NULL) {
2167                int64_t cmp = 0;
2168
2169                parent = *p;
2170                he = rb_entry(parent, struct hist_entry, rb_node_in);
2171
2172                perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2173                        cmp = fmt->collapse(fmt, he, pair);
2174                        if (cmp)
2175                                break;
2176                }
2177                if (!cmp)
2178                        goto out;
2179
2180                if (cmp < 0)
2181                        p = &parent->rb_left;
2182                else
2183                        p = &parent->rb_right;
2184        }
2185
2186        he = hist_entry__new(pair, true);
2187        if (he) {
2188                rb_link_node(&he->rb_node_in, parent, p);
2189                rb_insert_color(&he->rb_node_in, root);
2190
2191                he->dummy = true;
2192                he->hists = hists;
2193                memset(&he->stat, 0, sizeof(he->stat));
2194                hists__inc_stats(hists, he);
2195        }
2196out:
2197        return he;
2198}
2199
2200static struct hist_entry *hists__find_entry(struct hists *hists,
2201                                            struct hist_entry *he)
2202{
2203        struct rb_node *n;
2204
2205        if (hists__has(hists, need_collapse))
2206                n = hists->entries_collapsed.rb_node;
2207        else
2208                n = hists->entries_in->rb_node;
2209
2210        while (n) {
2211                struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2212                int64_t cmp = hist_entry__collapse(iter, he);
2213
2214                if (cmp < 0)
2215                        n = n->rb_left;
2216                else if (cmp > 0)
2217                        n = n->rb_right;
2218                else
2219                        return iter;
2220        }
2221
2222        return NULL;
2223}
2224
2225static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
2226                                                      struct hist_entry *he)
2227{
2228        struct rb_node *n = root->rb_node;
2229
2230        while (n) {
2231                struct hist_entry *iter;
2232                struct perf_hpp_fmt *fmt;
2233                int64_t cmp = 0;
2234
2235                iter = rb_entry(n, struct hist_entry, rb_node_in);
2236                perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2237                        cmp = fmt->collapse(fmt, iter, he);
2238                        if (cmp)
2239                                break;
2240                }
2241
2242                if (cmp < 0)
2243                        n = n->rb_left;
2244                else if (cmp > 0)
2245                        n = n->rb_right;
2246                else
2247                        return iter;
2248        }
2249
2250        return NULL;
2251}
2252
2253static void hists__match_hierarchy(struct rb_root *leader_root,
2254                                   struct rb_root *other_root)
2255{
2256        struct rb_node *nd;
2257        struct hist_entry *pos, *pair;
2258
2259        for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
2260                pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2261                pair = hists__find_hierarchy_entry(other_root, pos);
2262
2263                if (pair) {
2264                        hist_entry__add_pair(pair, pos);
2265                        hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2266                }
2267        }
2268}
2269
2270/*
2271 * Look for pairs to link to the leader buckets (hist_entries):
2272 */
2273void hists__match(struct hists *leader, struct hists *other)
2274{
2275        struct rb_root *root;
2276        struct rb_node *nd;
2277        struct hist_entry *pos, *pair;
2278
2279        if (symbol_conf.report_hierarchy) {
2280                /* hierarchy report always collapses entries */
2281                return hists__match_hierarchy(&leader->entries_collapsed,
2282                                              &other->entries_collapsed);
2283        }
2284
2285        if (hists__has(leader, need_collapse))
2286                root = &leader->entries_collapsed;
2287        else
2288                root = leader->entries_in;
2289
2290        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2291                pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2292                pair = hists__find_entry(other, pos);
2293
2294                if (pair)
2295                        hist_entry__add_pair(pair, pos);
2296        }
2297}
2298
2299static int hists__link_hierarchy(struct hists *leader_hists,
2300                                 struct hist_entry *parent,
2301                                 struct rb_root *leader_root,
2302                                 struct rb_root *other_root)
2303{
2304        struct rb_node *nd;
2305        struct hist_entry *pos, *leader;
2306
2307        for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
2308                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2309
2310                if (hist_entry__has_pairs(pos)) {
2311                        bool found = false;
2312
2313                        list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2314                                if (leader->hists == leader_hists) {
2315                                        found = true;
2316                                        break;
2317                                }
2318                        }
2319                        if (!found)
2320                                return -1;
2321                } else {
2322                        leader = add_dummy_hierarchy_entry(leader_hists,
2323                                                           leader_root, pos);
2324                        if (leader == NULL)
2325                                return -1;
2326
2327                        /* do not point parent in the pos */
2328                        leader->parent_he = parent;
2329
2330                        hist_entry__add_pair(pos, leader);
2331                }
2332
2333                if (!pos->leaf) {
2334                        if (hists__link_hierarchy(leader_hists, leader,
2335                                                  &leader->hroot_in,
2336                                                  &pos->hroot_in) < 0)
2337                                return -1;
2338                }
2339        }
2340        return 0;
2341}
2342
2343/*
2344 * Look for entries in the other hists that are not present in the leader, if
2345 * we find them, just add a dummy entry on the leader hists, with period=0,
2346 * nr_events=0, to serve as the list header.
2347 */
2348int hists__link(struct hists *leader, struct hists *other)
2349{
2350        struct rb_root *root;
2351        struct rb_node *nd;
2352        struct hist_entry *pos, *pair;
2353
2354        if (symbol_conf.report_hierarchy) {
2355                /* hierarchy report always collapses entries */
2356                return hists__link_hierarchy(leader, NULL,
2357                                             &leader->entries_collapsed,
2358                                             &other->entries_collapsed);
2359        }
2360
2361        if (hists__has(other, need_collapse))
2362                root = &other->entries_collapsed;
2363        else
2364                root = other->entries_in;
2365
2366        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2367                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2368
2369                if (!hist_entry__has_pairs(pos)) {
2370                        pair = hists__add_dummy_entry(leader, pos);
2371                        if (pair == NULL)
2372                                return -1;
2373                        hist_entry__add_pair(pos, pair);
2374                }
2375        }
2376
2377        return 0;
2378}
2379
2380void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2381                          struct perf_sample *sample, bool nonany_branch_mode)
2382{
2383        struct branch_info *bi;
2384
2385        /* If we have branch cycles always annotate them. */
2386        if (bs && bs->nr && bs->entries[0].flags.cycles) {
2387                int i;
2388
2389                bi = sample__resolve_bstack(sample, al);
2390                if (bi) {
2391                        struct addr_map_symbol *prev = NULL;
2392
2393                        /*
2394                         * Ignore errors, still want to process the
2395                         * other entries.
2396                         *
2397                         * For non standard branch modes always
2398                         * force no IPC (prev == NULL)
2399                         *
2400                         * Note that perf stores branches reversed from
2401                         * program order!
2402                         */
2403                        for (i = bs->nr - 1; i >= 0; i--) {
2404                                addr_map_symbol__account_cycles(&bi[i].from,
2405                                        nonany_branch_mode ? NULL : prev,
2406                                        bi[i].flags.cycles);
2407                                prev = &bi[i].to;
2408                        }
2409                        free(bi);
2410                }
2411        }
2412}
2413
2414size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2415{
2416        struct perf_evsel *pos;
2417        size_t ret = 0;
2418
2419        evlist__for_each_entry(evlist, pos) {
2420                ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2421                ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2422        }
2423
2424        return ret;
2425}
2426
2427
2428u64 hists__total_period(struct hists *hists)
2429{
2430        return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2431                hists->stats.total_period;
2432}
2433
2434int parse_filter_percentage(const struct option *opt __maybe_unused,
2435                            const char *arg, int unset __maybe_unused)
2436{
2437        if (!strcmp(arg, "relative"))
2438                symbol_conf.filter_relative = true;
2439        else if (!strcmp(arg, "absolute"))
2440                symbol_conf.filter_relative = false;
2441        else
2442                return -1;
2443
2444        return 0;
2445}
2446
2447int perf_hist_config(const char *var, const char *value)
2448{
2449        if (!strcmp(var, "hist.percentage"))
2450                return parse_filter_percentage(NULL, value, 0);
2451
2452        return 0;
2453}
2454
2455int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2456{
2457        memset(hists, 0, sizeof(*hists));
2458        hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2459        hists->entries_in = &hists->entries_in_array[0];
2460        hists->entries_collapsed = RB_ROOT;
2461        hists->entries = RB_ROOT;
2462        pthread_mutex_init(&hists->lock, NULL);
2463        hists->socket_filter = -1;
2464        hists->hpp_list = hpp_list;
2465        INIT_LIST_HEAD(&hists->hpp_formats);
2466        return 0;
2467}
2468
2469static void hists__delete_remaining_entries(struct rb_root *root)
2470{
2471        struct rb_node *node;
2472        struct hist_entry *he;
2473
2474        while (!RB_EMPTY_ROOT(root)) {
2475                node = rb_first(root);
2476                rb_erase(node, root);
2477
2478                he = rb_entry(node, struct hist_entry, rb_node_in);
2479                hist_entry__delete(he);
2480        }
2481}
2482
2483static void hists__delete_all_entries(struct hists *hists)
2484{
2485        hists__delete_entries(hists);
2486        hists__delete_remaining_entries(&hists->entries_in_array[0]);
2487        hists__delete_remaining_entries(&hists->entries_in_array[1]);
2488        hists__delete_remaining_entries(&hists->entries_collapsed);
2489}
2490
2491static void hists_evsel__exit(struct perf_evsel *evsel)
2492{
2493        struct hists *hists = evsel__hists(evsel);
2494        struct perf_hpp_fmt *fmt, *pos;
2495        struct perf_hpp_list_node *node, *tmp;
2496
2497        hists__delete_all_entries(hists);
2498
2499        list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2500                perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2501                        list_del(&fmt->list);
2502                        free(fmt);
2503                }
2504                list_del(&node->list);
2505                free(node);
2506        }
2507}
2508
2509static int hists_evsel__init(struct perf_evsel *evsel)
2510{
2511        struct hists *hists = evsel__hists(evsel);
2512
2513        __hists__init(hists, &perf_hpp_list);
2514        return 0;
2515}
2516
2517/*
2518 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2519 * stored in the rbtree...
2520 */
2521
2522int hists__init(void)
2523{
2524        int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2525                                            hists_evsel__init,
2526                                            hists_evsel__exit);
2527        if (err)
2528                fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2529
2530        return err;
2531}
2532
2533void perf_hpp_list__init(struct perf_hpp_list *list)
2534{
2535        INIT_LIST_HEAD(&list->fields);
2536        INIT_LIST_HEAD(&list->sorts);
2537}
2538