linux/tools/perf/util/hist.c
<<
>>
Prefs
   1#include "util.h"
   2#include "build-id.h"
   3#include "hist.h"
   4#include "session.h"
   5#include "sort.h"
   6#include "evlist.h"
   7#include "evsel.h"
   8#include "annotate.h"
   9#include "ui/progress.h"
  10#include <math.h>
  11
  12static bool hists__filter_entry_by_dso(struct hists *hists,
  13                                       struct hist_entry *he);
  14static bool hists__filter_entry_by_thread(struct hists *hists,
  15                                          struct hist_entry *he);
  16static bool hists__filter_entry_by_symbol(struct hists *hists,
  17                                          struct hist_entry *he);
  18static bool hists__filter_entry_by_socket(struct hists *hists,
  19                                          struct hist_entry *he);
  20
  21u16 hists__col_len(struct hists *hists, enum hist_column col)
  22{
  23        return hists->col_len[col];
  24}
  25
  26void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  27{
  28        hists->col_len[col] = len;
  29}
  30
  31bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  32{
  33        if (len > hists__col_len(hists, col)) {
  34                hists__set_col_len(hists, col, len);
  35                return true;
  36        }
  37        return false;
  38}
  39
  40void hists__reset_col_len(struct hists *hists)
  41{
  42        enum hist_column col;
  43
  44        for (col = 0; col < HISTC_NR_COLS; ++col)
  45                hists__set_col_len(hists, col, 0);
  46}
  47
  48static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  49{
  50        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  51
  52        if (hists__col_len(hists, dso) < unresolved_col_width &&
  53            !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  54            !symbol_conf.dso_list)
  55                hists__set_col_len(hists, dso, unresolved_col_width);
  56}
  57
  58void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  59{
  60        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  61        int symlen;
  62        u16 len;
  63
  64        /*
  65         * +4 accounts for '[x] ' priv level info
  66         * +2 accounts for 0x prefix on raw addresses
  67         * +3 accounts for ' y ' symtab origin info
  68         */
  69        if (h->ms.sym) {
  70                symlen = h->ms.sym->namelen + 4;
  71                if (verbose)
  72                        symlen += BITS_PER_LONG / 4 + 2 + 3;
  73                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  74        } else {
  75                symlen = unresolved_col_width + 4 + 2;
  76                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  77                hists__set_unres_dso_col_len(hists, HISTC_DSO);
  78        }
  79
  80        len = thread__comm_len(h->thread);
  81        if (hists__new_col_len(hists, HISTC_COMM, len))
  82                hists__set_col_len(hists, HISTC_THREAD, len + 6);
  83
  84        if (h->ms.map) {
  85                len = dso__name_len(h->ms.map->dso);
  86                hists__new_col_len(hists, HISTC_DSO, len);
  87        }
  88
  89        if (h->parent)
  90                hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  91
  92        if (h->branch_info) {
  93                if (h->branch_info->from.sym) {
  94                        symlen = (int)h->branch_info->from.sym->namelen + 4;
  95                        if (verbose)
  96                                symlen += BITS_PER_LONG / 4 + 2 + 3;
  97                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
  98
  99                        symlen = dso__name_len(h->branch_info->from.map->dso);
 100                        hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
 101                } else {
 102                        symlen = unresolved_col_width + 4 + 2;
 103                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
 104                        hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
 105                }
 106
 107                if (h->branch_info->to.sym) {
 108                        symlen = (int)h->branch_info->to.sym->namelen + 4;
 109                        if (verbose)
 110                                symlen += BITS_PER_LONG / 4 + 2 + 3;
 111                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 112
 113                        symlen = dso__name_len(h->branch_info->to.map->dso);
 114                        hists__new_col_len(hists, HISTC_DSO_TO, symlen);
 115                } else {
 116                        symlen = unresolved_col_width + 4 + 2;
 117                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 118                        hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
 119                }
 120        }
 121
 122        if (h->mem_info) {
 123                if (h->mem_info->daddr.sym) {
 124                        symlen = (int)h->mem_info->daddr.sym->namelen + 4
 125                               + unresolved_col_width + 2;
 126                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 127                                           symlen);
 128                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 129                                           symlen + 1);
 130                } else {
 131                        symlen = unresolved_col_width + 4 + 2;
 132                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 133                                           symlen);
 134                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 135                                           symlen);
 136                }
 137
 138                if (h->mem_info->iaddr.sym) {
 139                        symlen = (int)h->mem_info->iaddr.sym->namelen + 4
 140                               + unresolved_col_width + 2;
 141                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 142                                           symlen);
 143                } else {
 144                        symlen = unresolved_col_width + 4 + 2;
 145                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 146                                           symlen);
 147                }
 148
 149                if (h->mem_info->daddr.map) {
 150                        symlen = dso__name_len(h->mem_info->daddr.map->dso);
 151                        hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
 152                                           symlen);
 153                } else {
 154                        symlen = unresolved_col_width + 4 + 2;
 155                        hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 156                }
 157        } else {
 158                symlen = unresolved_col_width + 4 + 2;
 159                hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
 160                hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
 161                hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 162        }
 163
 164        hists__new_col_len(hists, HISTC_CPU, 3);
 165        hists__new_col_len(hists, HISTC_SOCKET, 6);
 166        hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
 167        hists__new_col_len(hists, HISTC_MEM_TLB, 22);
 168        hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
 169        hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
 170        hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
 171        hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
 172
 173        if (h->srcline)
 174                hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
 175
 176        if (h->srcfile)
 177                hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
 178
 179        if (h->transaction)
 180                hists__new_col_len(hists, HISTC_TRANSACTION,
 181                                   hist_entry__transaction_len());
 182
 183        if (h->trace_output)
 184                hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
 185}
 186
 187void hists__output_recalc_col_len(struct hists *hists, int max_rows)
 188{
 189        struct rb_node *next = rb_first(&hists->entries);
 190        struct hist_entry *n;
 191        int row = 0;
 192
 193        hists__reset_col_len(hists);
 194
 195        while (next && row++ < max_rows) {
 196                n = rb_entry(next, struct hist_entry, rb_node);
 197                if (!n->filtered)
 198                        hists__calc_col_len(hists, n);
 199                next = rb_next(&n->rb_node);
 200        }
 201}
 202
 203static void he_stat__add_cpumode_period(struct he_stat *he_stat,
 204                                        unsigned int cpumode, u64 period)
 205{
 206        switch (cpumode) {
 207        case PERF_RECORD_MISC_KERNEL:
 208                he_stat->period_sys += period;
 209                break;
 210        case PERF_RECORD_MISC_USER:
 211                he_stat->period_us += period;
 212                break;
 213        case PERF_RECORD_MISC_GUEST_KERNEL:
 214                he_stat->period_guest_sys += period;
 215                break;
 216        case PERF_RECORD_MISC_GUEST_USER:
 217                he_stat->period_guest_us += period;
 218                break;
 219        default:
 220                break;
 221        }
 222}
 223
 224static void he_stat__add_period(struct he_stat *he_stat, u64 period,
 225                                u64 weight)
 226{
 227
 228        he_stat->period         += period;
 229        he_stat->weight         += weight;
 230        he_stat->nr_events      += 1;
 231}
 232
 233static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
 234{
 235        dest->period            += src->period;
 236        dest->period_sys        += src->period_sys;
 237        dest->period_us         += src->period_us;
 238        dest->period_guest_sys  += src->period_guest_sys;
 239        dest->period_guest_us   += src->period_guest_us;
 240        dest->nr_events         += src->nr_events;
 241        dest->weight            += src->weight;
 242}
 243
 244static void he_stat__decay(struct he_stat *he_stat)
 245{
 246        he_stat->period = (he_stat->period * 7) / 8;
 247        he_stat->nr_events = (he_stat->nr_events * 7) / 8;
 248        /* XXX need decay for weight too? */
 249}
 250
 251static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
 252
 253static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
 254{
 255        u64 prev_period = he->stat.period;
 256        u64 diff;
 257
 258        if (prev_period == 0)
 259                return true;
 260
 261        he_stat__decay(&he->stat);
 262        if (symbol_conf.cumulate_callchain)
 263                he_stat__decay(he->stat_acc);
 264        decay_callchain(he->callchain);
 265
 266        diff = prev_period - he->stat.period;
 267
 268        if (!he->depth) {
 269                hists->stats.total_period -= diff;
 270                if (!he->filtered)
 271                        hists->stats.total_non_filtered_period -= diff;
 272        }
 273
 274        if (!he->leaf) {
 275                struct hist_entry *child;
 276                struct rb_node *node = rb_first(&he->hroot_out);
 277                while (node) {
 278                        child = rb_entry(node, struct hist_entry, rb_node);
 279                        node = rb_next(node);
 280
 281                        if (hists__decay_entry(hists, child))
 282                                hists__delete_entry(hists, child);
 283                }
 284        }
 285
 286        return he->stat.period == 0;
 287}
 288
 289static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
 290{
 291        struct rb_root *root_in;
 292        struct rb_root *root_out;
 293
 294        if (he->parent_he) {
 295                root_in  = &he->parent_he->hroot_in;
 296                root_out = &he->parent_he->hroot_out;
 297        } else {
 298                if (sort__need_collapse)
 299                        root_in = &hists->entries_collapsed;
 300                else
 301                        root_in = hists->entries_in;
 302                root_out = &hists->entries;
 303        }
 304
 305        rb_erase(&he->rb_node_in, root_in);
 306        rb_erase(&he->rb_node, root_out);
 307
 308        --hists->nr_entries;
 309        if (!he->filtered)
 310                --hists->nr_non_filtered_entries;
 311
 312        hist_entry__delete(he);
 313}
 314
 315void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
 316{
 317        struct rb_node *next = rb_first(&hists->entries);
 318        struct hist_entry *n;
 319
 320        while (next) {
 321                n = rb_entry(next, struct hist_entry, rb_node);
 322                next = rb_next(&n->rb_node);
 323                if (((zap_user && n->level == '.') ||
 324                     (zap_kernel && n->level != '.') ||
 325                     hists__decay_entry(hists, n))) {
 326                        hists__delete_entry(hists, n);
 327                }
 328        }
 329}
 330
 331void hists__delete_entries(struct hists *hists)
 332{
 333        struct rb_node *next = rb_first(&hists->entries);
 334        struct hist_entry *n;
 335
 336        while (next) {
 337                n = rb_entry(next, struct hist_entry, rb_node);
 338                next = rb_next(&n->rb_node);
 339
 340                hists__delete_entry(hists, n);
 341        }
 342}
 343
 344/*
 345 * histogram, sorted on item, collects periods
 346 */
 347
 348static struct hist_entry *hist_entry__new(struct hist_entry *template,
 349                                          bool sample_self)
 350{
 351        size_t callchain_size = 0;
 352        struct hist_entry *he;
 353
 354        if (symbol_conf.use_callchain)
 355                callchain_size = sizeof(struct callchain_root);
 356
 357        he = zalloc(sizeof(*he) + callchain_size);
 358
 359        if (he != NULL) {
 360                *he = *template;
 361
 362                if (symbol_conf.cumulate_callchain) {
 363                        he->stat_acc = malloc(sizeof(he->stat));
 364                        if (he->stat_acc == NULL) {
 365                                free(he);
 366                                return NULL;
 367                        }
 368                        memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
 369                        if (!sample_self)
 370                                memset(&he->stat, 0, sizeof(he->stat));
 371                }
 372
 373                map__get(he->ms.map);
 374
 375                if (he->branch_info) {
 376                        /*
 377                         * This branch info is (a part of) allocated from
 378                         * sample__resolve_bstack() and will be freed after
 379                         * adding new entries.  So we need to save a copy.
 380                         */
 381                        he->branch_info = malloc(sizeof(*he->branch_info));
 382                        if (he->branch_info == NULL) {
 383                                map__zput(he->ms.map);
 384                                free(he->stat_acc);
 385                                free(he);
 386                                return NULL;
 387                        }
 388
 389                        memcpy(he->branch_info, template->branch_info,
 390                               sizeof(*he->branch_info));
 391
 392                        map__get(he->branch_info->from.map);
 393                        map__get(he->branch_info->to.map);
 394                }
 395
 396                if (he->mem_info) {
 397                        map__get(he->mem_info->iaddr.map);
 398                        map__get(he->mem_info->daddr.map);
 399                }
 400
 401                if (symbol_conf.use_callchain)
 402                        callchain_init(he->callchain);
 403
 404                if (he->raw_data) {
 405                        he->raw_data = memdup(he->raw_data, he->raw_size);
 406
 407                        if (he->raw_data == NULL) {
 408                                map__put(he->ms.map);
 409                                if (he->branch_info) {
 410                                        map__put(he->branch_info->from.map);
 411                                        map__put(he->branch_info->to.map);
 412                                        free(he->branch_info);
 413                                }
 414                                if (he->mem_info) {
 415                                        map__put(he->mem_info->iaddr.map);
 416                                        map__put(he->mem_info->daddr.map);
 417                                }
 418                                free(he->stat_acc);
 419                                free(he);
 420                                return NULL;
 421                        }
 422                }
 423                INIT_LIST_HEAD(&he->pairs.node);
 424                thread__get(he->thread);
 425
 426                if (!symbol_conf.report_hierarchy)
 427                        he->leaf = true;
 428        }
 429
 430        return he;
 431}
 432
 433static u8 symbol__parent_filter(const struct symbol *parent)
 434{
 435        if (symbol_conf.exclude_other && parent == NULL)
 436                return 1 << HIST_FILTER__PARENT;
 437        return 0;
 438}
 439
 440static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
 441{
 442        if (!symbol_conf.use_callchain)
 443                return;
 444
 445        he->hists->callchain_period += period;
 446        if (!he->filtered)
 447                he->hists->callchain_non_filtered_period += period;
 448}
 449
 450static struct hist_entry *hists__findnew_entry(struct hists *hists,
 451                                               struct hist_entry *entry,
 452                                               struct addr_location *al,
 453                                               bool sample_self)
 454{
 455        struct rb_node **p;
 456        struct rb_node *parent = NULL;
 457        struct hist_entry *he;
 458        int64_t cmp;
 459        u64 period = entry->stat.period;
 460        u64 weight = entry->stat.weight;
 461
 462        p = &hists->entries_in->rb_node;
 463
 464        while (*p != NULL) {
 465                parent = *p;
 466                he = rb_entry(parent, struct hist_entry, rb_node_in);
 467
 468                /*
 469                 * Make sure that it receives arguments in a same order as
 470                 * hist_entry__collapse() so that we can use an appropriate
 471                 * function when searching an entry regardless which sort
 472                 * keys were used.
 473                 */
 474                cmp = hist_entry__cmp(he, entry);
 475
 476                if (!cmp) {
 477                        if (sample_self) {
 478                                he_stat__add_period(&he->stat, period, weight);
 479                                hist_entry__add_callchain_period(he, period);
 480                        }
 481                        if (symbol_conf.cumulate_callchain)
 482                                he_stat__add_period(he->stat_acc, period, weight);
 483
 484                        /*
 485                         * This mem info was allocated from sample__resolve_mem
 486                         * and will not be used anymore.
 487                         */
 488                        zfree(&entry->mem_info);
 489
 490                        /* If the map of an existing hist_entry has
 491                         * become out-of-date due to an exec() or
 492                         * similar, update it.  Otherwise we will
 493                         * mis-adjust symbol addresses when computing
 494                         * the history counter to increment.
 495                         */
 496                        if (he->ms.map != entry->ms.map) {
 497                                map__put(he->ms.map);
 498                                he->ms.map = map__get(entry->ms.map);
 499                        }
 500                        goto out;
 501                }
 502
 503                if (cmp < 0)
 504                        p = &(*p)->rb_left;
 505                else
 506                        p = &(*p)->rb_right;
 507        }
 508
 509        he = hist_entry__new(entry, sample_self);
 510        if (!he)
 511                return NULL;
 512
 513        if (sample_self)
 514                hist_entry__add_callchain_period(he, period);
 515        hists->nr_entries++;
 516
 517        rb_link_node(&he->rb_node_in, parent, p);
 518        rb_insert_color(&he->rb_node_in, hists->entries_in);
 519out:
 520        if (sample_self)
 521                he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
 522        if (symbol_conf.cumulate_callchain)
 523                he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
 524        return he;
 525}
 526
 527struct hist_entry *__hists__add_entry(struct hists *hists,
 528                                      struct addr_location *al,
 529                                      struct symbol *sym_parent,
 530                                      struct branch_info *bi,
 531                                      struct mem_info *mi,
 532                                      struct perf_sample *sample,
 533                                      bool sample_self)
 534{
 535        struct hist_entry entry = {
 536                .thread = al->thread,
 537                .comm = thread__comm(al->thread),
 538                .ms = {
 539                        .map    = al->map,
 540                        .sym    = al->sym,
 541                },
 542                .socket  = al->socket,
 543                .cpu     = al->cpu,
 544                .cpumode = al->cpumode,
 545                .ip      = al->addr,
 546                .level   = al->level,
 547                .stat = {
 548                        .nr_events = 1,
 549                        .period = sample->period,
 550                        .weight = sample->weight,
 551                },
 552                .parent = sym_parent,
 553                .filtered = symbol__parent_filter(sym_parent) | al->filtered,
 554                .hists  = hists,
 555                .branch_info = bi,
 556                .mem_info = mi,
 557                .transaction = sample->transaction,
 558                .raw_data = sample->raw_data,
 559                .raw_size = sample->raw_size,
 560        };
 561
 562        return hists__findnew_entry(hists, &entry, al, sample_self);
 563}
 564
 565static int
 566iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 567                    struct addr_location *al __maybe_unused)
 568{
 569        return 0;
 570}
 571
 572static int
 573iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 574                        struct addr_location *al __maybe_unused)
 575{
 576        return 0;
 577}
 578
 579static int
 580iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 581{
 582        struct perf_sample *sample = iter->sample;
 583        struct mem_info *mi;
 584
 585        mi = sample__resolve_mem(sample, al);
 586        if (mi == NULL)
 587                return -ENOMEM;
 588
 589        iter->priv = mi;
 590        return 0;
 591}
 592
 593static int
 594iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 595{
 596        u64 cost;
 597        struct mem_info *mi = iter->priv;
 598        struct hists *hists = evsel__hists(iter->evsel);
 599        struct perf_sample *sample = iter->sample;
 600        struct hist_entry *he;
 601
 602        if (mi == NULL)
 603                return -EINVAL;
 604
 605        cost = sample->weight;
 606        if (!cost)
 607                cost = 1;
 608
 609        /*
 610         * must pass period=weight in order to get the correct
 611         * sorting from hists__collapse_resort() which is solely
 612         * based on periods. We want sorting be done on nr_events * weight
 613         * and this is indirectly achieved by passing period=weight here
 614         * and the he_stat__add_period() function.
 615         */
 616        sample->period = cost;
 617
 618        he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
 619                                sample, true);
 620        if (!he)
 621                return -ENOMEM;
 622
 623        iter->he = he;
 624        return 0;
 625}
 626
 627static int
 628iter_finish_mem_entry(struct hist_entry_iter *iter,
 629                      struct addr_location *al __maybe_unused)
 630{
 631        struct perf_evsel *evsel = iter->evsel;
 632        struct hists *hists = evsel__hists(evsel);
 633        struct hist_entry *he = iter->he;
 634        int err = -EINVAL;
 635
 636        if (he == NULL)
 637                goto out;
 638
 639        hists__inc_nr_samples(hists, he->filtered);
 640
 641        err = hist_entry__append_callchain(he, iter->sample);
 642
 643out:
 644        /*
 645         * We don't need to free iter->priv (mem_info) here since the mem info
 646         * was either already freed in hists__findnew_entry() or passed to a
 647         * new hist entry by hist_entry__new().
 648         */
 649        iter->priv = NULL;
 650
 651        iter->he = NULL;
 652        return err;
 653}
 654
 655static int
 656iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 657{
 658        struct branch_info *bi;
 659        struct perf_sample *sample = iter->sample;
 660
 661        bi = sample__resolve_bstack(sample, al);
 662        if (!bi)
 663                return -ENOMEM;
 664
 665        iter->curr = 0;
 666        iter->total = sample->branch_stack->nr;
 667
 668        iter->priv = bi;
 669        return 0;
 670}
 671
 672static int
 673iter_add_single_branch_entry(struct hist_entry_iter *iter,
 674                             struct addr_location *al __maybe_unused)
 675{
 676        /* to avoid calling callback function */
 677        iter->he = NULL;
 678
 679        return 0;
 680}
 681
 682static int
 683iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 684{
 685        struct branch_info *bi = iter->priv;
 686        int i = iter->curr;
 687
 688        if (bi == NULL)
 689                return 0;
 690
 691        if (iter->curr >= iter->total)
 692                return 0;
 693
 694        al->map = bi[i].to.map;
 695        al->sym = bi[i].to.sym;
 696        al->addr = bi[i].to.addr;
 697        return 1;
 698}
 699
 700static int
 701iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 702{
 703        struct branch_info *bi;
 704        struct perf_evsel *evsel = iter->evsel;
 705        struct hists *hists = evsel__hists(evsel);
 706        struct perf_sample *sample = iter->sample;
 707        struct hist_entry *he = NULL;
 708        int i = iter->curr;
 709        int err = 0;
 710
 711        bi = iter->priv;
 712
 713        if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
 714                goto out;
 715
 716        /*
 717         * The report shows the percentage of total branches captured
 718         * and not events sampled. Thus we use a pseudo period of 1.
 719         */
 720        sample->period = 1;
 721        sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
 722
 723        he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
 724                                sample, true);
 725        if (he == NULL)
 726                return -ENOMEM;
 727
 728        hists__inc_nr_samples(hists, he->filtered);
 729
 730out:
 731        iter->he = he;
 732        iter->curr++;
 733        return err;
 734}
 735
 736static int
 737iter_finish_branch_entry(struct hist_entry_iter *iter,
 738                         struct addr_location *al __maybe_unused)
 739{
 740        zfree(&iter->priv);
 741        iter->he = NULL;
 742
 743        return iter->curr >= iter->total ? 0 : -1;
 744}
 745
 746static int
 747iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
 748                          struct addr_location *al __maybe_unused)
 749{
 750        return 0;
 751}
 752
 753static int
 754iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
 755{
 756        struct perf_evsel *evsel = iter->evsel;
 757        struct perf_sample *sample = iter->sample;
 758        struct hist_entry *he;
 759
 760        he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
 761                                sample, true);
 762        if (he == NULL)
 763                return -ENOMEM;
 764
 765        iter->he = he;
 766        return 0;
 767}
 768
 769static int
 770iter_finish_normal_entry(struct hist_entry_iter *iter,
 771                         struct addr_location *al __maybe_unused)
 772{
 773        struct hist_entry *he = iter->he;
 774        struct perf_evsel *evsel = iter->evsel;
 775        struct perf_sample *sample = iter->sample;
 776
 777        if (he == NULL)
 778                return 0;
 779
 780        iter->he = NULL;
 781
 782        hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
 783
 784        return hist_entry__append_callchain(he, sample);
 785}
 786
 787static int
 788iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
 789                              struct addr_location *al __maybe_unused)
 790{
 791        struct hist_entry **he_cache;
 792
 793        callchain_cursor_commit(&callchain_cursor);
 794
 795        /*
 796         * This is for detecting cycles or recursions so that they're
 797         * cumulated only one time to prevent entries more than 100%
 798         * overhead.
 799         */
 800        he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
 801        if (he_cache == NULL)
 802                return -ENOMEM;
 803
 804        iter->priv = he_cache;
 805        iter->curr = 0;
 806
 807        return 0;
 808}
 809
 810static int
 811iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
 812                                 struct addr_location *al)
 813{
 814        struct perf_evsel *evsel = iter->evsel;
 815        struct hists *hists = evsel__hists(evsel);
 816        struct perf_sample *sample = iter->sample;
 817        struct hist_entry **he_cache = iter->priv;
 818        struct hist_entry *he;
 819        int err = 0;
 820
 821        he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
 822                                sample, true);
 823        if (he == NULL)
 824                return -ENOMEM;
 825
 826        iter->he = he;
 827        he_cache[iter->curr++] = he;
 828
 829        hist_entry__append_callchain(he, sample);
 830
 831        /*
 832         * We need to re-initialize the cursor since callchain_append()
 833         * advanced the cursor to the end.
 834         */
 835        callchain_cursor_commit(&callchain_cursor);
 836
 837        hists__inc_nr_samples(hists, he->filtered);
 838
 839        return err;
 840}
 841
 842static int
 843iter_next_cumulative_entry(struct hist_entry_iter *iter,
 844                           struct addr_location *al)
 845{
 846        struct callchain_cursor_node *node;
 847
 848        node = callchain_cursor_current(&callchain_cursor);
 849        if (node == NULL)
 850                return 0;
 851
 852        return fill_callchain_info(al, node, iter->hide_unresolved);
 853}
 854
 855static int
 856iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
 857                               struct addr_location *al)
 858{
 859        struct perf_evsel *evsel = iter->evsel;
 860        struct perf_sample *sample = iter->sample;
 861        struct hist_entry **he_cache = iter->priv;
 862        struct hist_entry *he;
 863        struct hist_entry he_tmp = {
 864                .hists = evsel__hists(evsel),
 865                .cpu = al->cpu,
 866                .thread = al->thread,
 867                .comm = thread__comm(al->thread),
 868                .ip = al->addr,
 869                .ms = {
 870                        .map = al->map,
 871                        .sym = al->sym,
 872                },
 873                .parent = iter->parent,
 874                .raw_data = sample->raw_data,
 875                .raw_size = sample->raw_size,
 876        };
 877        int i;
 878        struct callchain_cursor cursor;
 879
 880        callchain_cursor_snapshot(&cursor, &callchain_cursor);
 881
 882        callchain_cursor_advance(&callchain_cursor);
 883
 884        /*
 885         * Check if there's duplicate entries in the callchain.
 886         * It's possible that it has cycles or recursive calls.
 887         */
 888        for (i = 0; i < iter->curr; i++) {
 889                if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
 890                        /* to avoid calling callback function */
 891                        iter->he = NULL;
 892                        return 0;
 893                }
 894        }
 895
 896        he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
 897                                sample, false);
 898        if (he == NULL)
 899                return -ENOMEM;
 900
 901        iter->he = he;
 902        he_cache[iter->curr++] = he;
 903
 904        if (symbol_conf.use_callchain)
 905                callchain_append(he->callchain, &cursor, sample->period);
 906        return 0;
 907}
 908
 909static int
 910iter_finish_cumulative_entry(struct hist_entry_iter *iter,
 911                             struct addr_location *al __maybe_unused)
 912{
 913        zfree(&iter->priv);
 914        iter->he = NULL;
 915
 916        return 0;
 917}
 918
 919const struct hist_iter_ops hist_iter_mem = {
 920        .prepare_entry          = iter_prepare_mem_entry,
 921        .add_single_entry       = iter_add_single_mem_entry,
 922        .next_entry             = iter_next_nop_entry,
 923        .add_next_entry         = iter_add_next_nop_entry,
 924        .finish_entry           = iter_finish_mem_entry,
 925};
 926
 927const struct hist_iter_ops hist_iter_branch = {
 928        .prepare_entry          = iter_prepare_branch_entry,
 929        .add_single_entry       = iter_add_single_branch_entry,
 930        .next_entry             = iter_next_branch_entry,
 931        .add_next_entry         = iter_add_next_branch_entry,
 932        .finish_entry           = iter_finish_branch_entry,
 933};
 934
 935const struct hist_iter_ops hist_iter_normal = {
 936        .prepare_entry          = iter_prepare_normal_entry,
 937        .add_single_entry       = iter_add_single_normal_entry,
 938        .next_entry             = iter_next_nop_entry,
 939        .add_next_entry         = iter_add_next_nop_entry,
 940        .finish_entry           = iter_finish_normal_entry,
 941};
 942
 943const struct hist_iter_ops hist_iter_cumulative = {
 944        .prepare_entry          = iter_prepare_cumulative_entry,
 945        .add_single_entry       = iter_add_single_cumulative_entry,
 946        .next_entry             = iter_next_cumulative_entry,
 947        .add_next_entry         = iter_add_next_cumulative_entry,
 948        .finish_entry           = iter_finish_cumulative_entry,
 949};
 950
 951int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
 952                         int max_stack_depth, void *arg)
 953{
 954        int err, err2;
 955
 956        err = sample__resolve_callchain(iter->sample, &iter->parent,
 957                                        iter->evsel, al, max_stack_depth);
 958        if (err)
 959                return err;
 960
 961        iter->max_stack = max_stack_depth;
 962
 963        err = iter->ops->prepare_entry(iter, al);
 964        if (err)
 965                goto out;
 966
 967        err = iter->ops->add_single_entry(iter, al);
 968        if (err)
 969                goto out;
 970
 971        if (iter->he && iter->add_entry_cb) {
 972                err = iter->add_entry_cb(iter, al, true, arg);
 973                if (err)
 974                        goto out;
 975        }
 976
 977        while (iter->ops->next_entry(iter, al)) {
 978                err = iter->ops->add_next_entry(iter, al);
 979                if (err)
 980                        break;
 981
 982                if (iter->he && iter->add_entry_cb) {
 983                        err = iter->add_entry_cb(iter, al, false, arg);
 984                        if (err)
 985                                goto out;
 986                }
 987        }
 988
 989out:
 990        err2 = iter->ops->finish_entry(iter, al);
 991        if (!err)
 992                err = err2;
 993
 994        return err;
 995}
 996
 997int64_t
 998hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
 999{
1000        struct hists *hists = left->hists;
1001        struct perf_hpp_fmt *fmt;
1002        int64_t cmp = 0;
1003
1004        hists__for_each_sort_list(hists, fmt) {
1005                if (perf_hpp__is_dynamic_entry(fmt) &&
1006                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1007                        continue;
1008
1009                cmp = fmt->cmp(fmt, left, right);
1010                if (cmp)
1011                        break;
1012        }
1013
1014        return cmp;
1015}
1016
1017int64_t
1018hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1019{
1020        struct hists *hists = left->hists;
1021        struct perf_hpp_fmt *fmt;
1022        int64_t cmp = 0;
1023
1024        hists__for_each_sort_list(hists, fmt) {
1025                if (perf_hpp__is_dynamic_entry(fmt) &&
1026                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1027                        continue;
1028
1029                cmp = fmt->collapse(fmt, left, right);
1030                if (cmp)
1031                        break;
1032        }
1033
1034        return cmp;
1035}
1036
1037void hist_entry__delete(struct hist_entry *he)
1038{
1039        thread__zput(he->thread);
1040        map__zput(he->ms.map);
1041
1042        if (he->branch_info) {
1043                map__zput(he->branch_info->from.map);
1044                map__zput(he->branch_info->to.map);
1045                zfree(&he->branch_info);
1046        }
1047
1048        if (he->mem_info) {
1049                map__zput(he->mem_info->iaddr.map);
1050                map__zput(he->mem_info->daddr.map);
1051                zfree(&he->mem_info);
1052        }
1053
1054        zfree(&he->stat_acc);
1055        free_srcline(he->srcline);
1056        if (he->srcfile && he->srcfile[0])
1057                free(he->srcfile);
1058        free_callchain(he->callchain);
1059        free(he->trace_output);
1060        free(he->raw_data);
1061        free(he);
1062}
1063
1064/*
1065 * If this is not the last column, then we need to pad it according to the
1066 * pre-calculated max lenght for this column, otherwise don't bother adding
1067 * spaces because that would break viewing this with, for instance, 'less',
1068 * that would show tons of trailing spaces when a long C++ demangled method
1069 * names is sampled.
1070*/
1071int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1072                                   struct perf_hpp_fmt *fmt, int printed)
1073{
1074        if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1075                const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
1076                if (printed < width) {
1077                        advance_hpp(hpp, printed);
1078                        printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1079                }
1080        }
1081
1082        return printed;
1083}
1084
1085/*
1086 * collapse the histogram
1087 */
1088
1089static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1090static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1091                                       enum hist_filter type);
1092
1093typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1094
1095static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1096{
1097        return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1098}
1099
1100static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1101                                                enum hist_filter type,
1102                                                fmt_chk_fn check)
1103{
1104        struct perf_hpp_fmt *fmt;
1105        bool type_match = false;
1106        struct hist_entry *parent = he->parent_he;
1107
1108        switch (type) {
1109        case HIST_FILTER__THREAD:
1110                if (symbol_conf.comm_list == NULL &&
1111                    symbol_conf.pid_list == NULL &&
1112                    symbol_conf.tid_list == NULL)
1113                        return;
1114                break;
1115        case HIST_FILTER__DSO:
1116                if (symbol_conf.dso_list == NULL)
1117                        return;
1118                break;
1119        case HIST_FILTER__SYMBOL:
1120                if (symbol_conf.sym_list == NULL)
1121                        return;
1122                break;
1123        case HIST_FILTER__PARENT:
1124        case HIST_FILTER__GUEST:
1125        case HIST_FILTER__HOST:
1126        case HIST_FILTER__SOCKET:
1127        default:
1128                return;
1129        }
1130
1131        /* if it's filtered by own fmt, it has to have filter bits */
1132        perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1133                if (check(fmt)) {
1134                        type_match = true;
1135                        break;
1136                }
1137        }
1138
1139        if (type_match) {
1140                /*
1141                 * If the filter is for current level entry, propagate
1142                 * filter marker to parents.  The marker bit was
1143                 * already set by default so it only needs to clear
1144                 * non-filtered entries.
1145                 */
1146                if (!(he->filtered & (1 << type))) {
1147                        while (parent) {
1148                                parent->filtered &= ~(1 << type);
1149                                parent = parent->parent_he;
1150                        }
1151                }
1152        } else {
1153                /*
1154                 * If current entry doesn't have matching formats, set
1155                 * filter marker for upper level entries.  it will be
1156                 * cleared if its lower level entries is not filtered.
1157                 *
1158                 * For lower-level entries, it inherits parent's
1159                 * filter bit so that lower level entries of a
1160                 * non-filtered entry won't set the filter marker.
1161                 */
1162                if (parent == NULL)
1163                        he->filtered |= (1 << type);
1164                else
1165                        he->filtered |= (parent->filtered & (1 << type));
1166        }
1167}
1168
1169static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1170{
1171        hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1172                                            check_thread_entry);
1173
1174        hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1175                                            perf_hpp__is_dso_entry);
1176
1177        hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1178                                            perf_hpp__is_sym_entry);
1179
1180        hists__apply_filters(he->hists, he);
1181}
1182
1183static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1184                                                 struct rb_root *root,
1185                                                 struct hist_entry *he,
1186                                                 struct hist_entry *parent_he,
1187                                                 struct perf_hpp_list *hpp_list)
1188{
1189        struct rb_node **p = &root->rb_node;
1190        struct rb_node *parent = NULL;
1191        struct hist_entry *iter, *new;
1192        struct perf_hpp_fmt *fmt;
1193        int64_t cmp;
1194
1195        while (*p != NULL) {
1196                parent = *p;
1197                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1198
1199                cmp = 0;
1200                perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1201                        cmp = fmt->collapse(fmt, iter, he);
1202                        if (cmp)
1203                                break;
1204                }
1205
1206                if (!cmp) {
1207                        he_stat__add_stat(&iter->stat, &he->stat);
1208                        return iter;
1209                }
1210
1211                if (cmp < 0)
1212                        p = &parent->rb_left;
1213                else
1214                        p = &parent->rb_right;
1215        }
1216
1217        new = hist_entry__new(he, true);
1218        if (new == NULL)
1219                return NULL;
1220
1221        hists->nr_entries++;
1222
1223        /* save related format list for output */
1224        new->hpp_list = hpp_list;
1225        new->parent_he = parent_he;
1226
1227        hist_entry__apply_hierarchy_filters(new);
1228
1229        /* some fields are now passed to 'new' */
1230        perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1231                if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1232                        he->trace_output = NULL;
1233                else
1234                        new->trace_output = NULL;
1235
1236                if (perf_hpp__is_srcline_entry(fmt))
1237                        he->srcline = NULL;
1238                else
1239                        new->srcline = NULL;
1240
1241                if (perf_hpp__is_srcfile_entry(fmt))
1242                        he->srcfile = NULL;
1243                else
1244                        new->srcfile = NULL;
1245        }
1246
1247        rb_link_node(&new->rb_node_in, parent, p);
1248        rb_insert_color(&new->rb_node_in, root);
1249        return new;
1250}
1251
1252static int hists__hierarchy_insert_entry(struct hists *hists,
1253                                         struct rb_root *root,
1254                                         struct hist_entry *he)
1255{
1256        struct perf_hpp_list_node *node;
1257        struct hist_entry *new_he = NULL;
1258        struct hist_entry *parent = NULL;
1259        int depth = 0;
1260        int ret = 0;
1261
1262        list_for_each_entry(node, &hists->hpp_formats, list) {
1263                /* skip period (overhead) and elided columns */
1264                if (node->level == 0 || node->skip)
1265                        continue;
1266
1267                /* insert copy of 'he' for each fmt into the hierarchy */
1268                new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1269                if (new_he == NULL) {
1270                        ret = -1;
1271                        break;
1272                }
1273
1274                root = &new_he->hroot_in;
1275                new_he->depth = depth++;
1276                parent = new_he;
1277        }
1278
1279        if (new_he) {
1280                new_he->leaf = true;
1281
1282                if (symbol_conf.use_callchain) {
1283                        callchain_cursor_reset(&callchain_cursor);
1284                        if (callchain_merge(&callchain_cursor,
1285                                            new_he->callchain,
1286                                            he->callchain) < 0)
1287                                ret = -1;
1288                }
1289        }
1290
1291        /* 'he' is no longer used */
1292        hist_entry__delete(he);
1293
1294        /* return 0 (or -1) since it already applied filters */
1295        return ret;
1296}
1297
1298int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
1299                                 struct hist_entry *he)
1300{
1301        struct rb_node **p = &root->rb_node;
1302        struct rb_node *parent = NULL;
1303        struct hist_entry *iter;
1304        int64_t cmp;
1305
1306        if (symbol_conf.report_hierarchy)
1307                return hists__hierarchy_insert_entry(hists, root, he);
1308
1309        while (*p != NULL) {
1310                parent = *p;
1311                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1312
1313                cmp = hist_entry__collapse(iter, he);
1314
1315                if (!cmp) {
1316                        int ret = 0;
1317
1318                        he_stat__add_stat(&iter->stat, &he->stat);
1319                        if (symbol_conf.cumulate_callchain)
1320                                he_stat__add_stat(iter->stat_acc, he->stat_acc);
1321
1322                        if (symbol_conf.use_callchain) {
1323                                callchain_cursor_reset(&callchain_cursor);
1324                                if (callchain_merge(&callchain_cursor,
1325                                                    iter->callchain,
1326                                                    he->callchain) < 0)
1327                                        ret = -1;
1328                        }
1329                        hist_entry__delete(he);
1330                        return ret;
1331                }
1332
1333                if (cmp < 0)
1334                        p = &(*p)->rb_left;
1335                else
1336                        p = &(*p)->rb_right;
1337        }
1338        hists->nr_entries++;
1339
1340        rb_link_node(&he->rb_node_in, parent, p);
1341        rb_insert_color(&he->rb_node_in, root);
1342        return 1;
1343}
1344
1345struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1346{
1347        struct rb_root *root;
1348
1349        pthread_mutex_lock(&hists->lock);
1350
1351        root = hists->entries_in;
1352        if (++hists->entries_in > &hists->entries_in_array[1])
1353                hists->entries_in = &hists->entries_in_array[0];
1354
1355        pthread_mutex_unlock(&hists->lock);
1356
1357        return root;
1358}
1359
1360static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1361{
1362        hists__filter_entry_by_dso(hists, he);
1363        hists__filter_entry_by_thread(hists, he);
1364        hists__filter_entry_by_symbol(hists, he);
1365        hists__filter_entry_by_socket(hists, he);
1366}
1367
1368int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1369{
1370        struct rb_root *root;
1371        struct rb_node *next;
1372        struct hist_entry *n;
1373        int ret;
1374
1375        if (!sort__need_collapse)
1376                return 0;
1377
1378        hists->nr_entries = 0;
1379
1380        root = hists__get_rotate_entries_in(hists);
1381
1382        next = rb_first(root);
1383
1384        while (next) {
1385                if (session_done())
1386                        break;
1387                n = rb_entry(next, struct hist_entry, rb_node_in);
1388                next = rb_next(&n->rb_node_in);
1389
1390                rb_erase(&n->rb_node_in, root);
1391                ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1392                if (ret < 0)
1393                        return -1;
1394
1395                if (ret) {
1396                        /*
1397                         * If it wasn't combined with one of the entries already
1398                         * collapsed, we need to apply the filters that may have
1399                         * been set by, say, the hist_browser.
1400                         */
1401                        hists__apply_filters(hists, n);
1402                }
1403                if (prog)
1404                        ui_progress__update(prog, 1);
1405        }
1406        return 0;
1407}
1408
1409static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1410{
1411        struct hists *hists = a->hists;
1412        struct perf_hpp_fmt *fmt;
1413        int64_t cmp = 0;
1414
1415        hists__for_each_sort_list(hists, fmt) {
1416                if (perf_hpp__should_skip(fmt, a->hists))
1417                        continue;
1418
1419                cmp = fmt->sort(fmt, a, b);
1420                if (cmp)
1421                        break;
1422        }
1423
1424        return cmp;
1425}
1426
1427static void hists__reset_filter_stats(struct hists *hists)
1428{
1429        hists->nr_non_filtered_entries = 0;
1430        hists->stats.total_non_filtered_period = 0;
1431}
1432
1433void hists__reset_stats(struct hists *hists)
1434{
1435        hists->nr_entries = 0;
1436        hists->stats.total_period = 0;
1437
1438        hists__reset_filter_stats(hists);
1439}
1440
1441static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1442{
1443        hists->nr_non_filtered_entries++;
1444        hists->stats.total_non_filtered_period += h->stat.period;
1445}
1446
1447void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1448{
1449        if (!h->filtered)
1450                hists__inc_filter_stats(hists, h);
1451
1452        hists->nr_entries++;
1453        hists->stats.total_period += h->stat.period;
1454}
1455
1456static void hierarchy_recalc_total_periods(struct hists *hists)
1457{
1458        struct rb_node *node;
1459        struct hist_entry *he;
1460
1461        node = rb_first(&hists->entries);
1462
1463        hists->stats.total_period = 0;
1464        hists->stats.total_non_filtered_period = 0;
1465
1466        /*
1467         * recalculate total period using top-level entries only
1468         * since lower level entries only see non-filtered entries
1469         * but upper level entries have sum of both entries.
1470         */
1471        while (node) {
1472                he = rb_entry(node, struct hist_entry, rb_node);
1473                node = rb_next(node);
1474
1475                hists->stats.total_period += he->stat.period;
1476                if (!he->filtered)
1477                        hists->stats.total_non_filtered_period += he->stat.period;
1478        }
1479}
1480
1481static void hierarchy_insert_output_entry(struct rb_root *root,
1482                                          struct hist_entry *he)
1483{
1484        struct rb_node **p = &root->rb_node;
1485        struct rb_node *parent = NULL;
1486        struct hist_entry *iter;
1487        struct perf_hpp_fmt *fmt;
1488
1489        while (*p != NULL) {
1490                parent = *p;
1491                iter = rb_entry(parent, struct hist_entry, rb_node);
1492
1493                if (hist_entry__sort(he, iter) > 0)
1494                        p = &parent->rb_left;
1495                else
1496                        p = &parent->rb_right;
1497        }
1498
1499        rb_link_node(&he->rb_node, parent, p);
1500        rb_insert_color(&he->rb_node, root);
1501
1502        /* update column width of dynamic entry */
1503        perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1504                if (perf_hpp__is_dynamic_entry(fmt))
1505                        fmt->sort(fmt, he, NULL);
1506        }
1507}
1508
1509static void hists__hierarchy_output_resort(struct hists *hists,
1510                                           struct ui_progress *prog,
1511                                           struct rb_root *root_in,
1512                                           struct rb_root *root_out,
1513                                           u64 min_callchain_hits,
1514                                           bool use_callchain)
1515{
1516        struct rb_node *node;
1517        struct hist_entry *he;
1518
1519        *root_out = RB_ROOT;
1520        node = rb_first(root_in);
1521
1522        while (node) {
1523                he = rb_entry(node, struct hist_entry, rb_node_in);
1524                node = rb_next(node);
1525
1526                hierarchy_insert_output_entry(root_out, he);
1527
1528                if (prog)
1529                        ui_progress__update(prog, 1);
1530
1531                if (!he->leaf) {
1532                        hists__hierarchy_output_resort(hists, prog,
1533                                                       &he->hroot_in,
1534                                                       &he->hroot_out,
1535                                                       min_callchain_hits,
1536                                                       use_callchain);
1537                        hists->nr_entries++;
1538                        if (!he->filtered) {
1539                                hists->nr_non_filtered_entries++;
1540                                hists__calc_col_len(hists, he);
1541                        }
1542
1543                        continue;
1544                }
1545
1546                if (!use_callchain)
1547                        continue;
1548
1549                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1550                        u64 total = he->stat.period;
1551
1552                        if (symbol_conf.cumulate_callchain)
1553                                total = he->stat_acc->period;
1554
1555                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1556                }
1557
1558                callchain_param.sort(&he->sorted_chain, he->callchain,
1559                                     min_callchain_hits, &callchain_param);
1560        }
1561}
1562
1563static void __hists__insert_output_entry(struct rb_root *entries,
1564                                         struct hist_entry *he,
1565                                         u64 min_callchain_hits,
1566                                         bool use_callchain)
1567{
1568        struct rb_node **p = &entries->rb_node;
1569        struct rb_node *parent = NULL;
1570        struct hist_entry *iter;
1571        struct perf_hpp_fmt *fmt;
1572
1573        if (use_callchain) {
1574                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1575                        u64 total = he->stat.period;
1576
1577                        if (symbol_conf.cumulate_callchain)
1578                                total = he->stat_acc->period;
1579
1580                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1581                }
1582                callchain_param.sort(&he->sorted_chain, he->callchain,
1583                                      min_callchain_hits, &callchain_param);
1584        }
1585
1586        while (*p != NULL) {
1587                parent = *p;
1588                iter = rb_entry(parent, struct hist_entry, rb_node);
1589
1590                if (hist_entry__sort(he, iter) > 0)
1591                        p = &(*p)->rb_left;
1592                else
1593                        p = &(*p)->rb_right;
1594        }
1595
1596        rb_link_node(&he->rb_node, parent, p);
1597        rb_insert_color(&he->rb_node, entries);
1598
1599        perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1600                if (perf_hpp__is_dynamic_entry(fmt) &&
1601                    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1602                        fmt->sort(fmt, he, NULL);  /* update column width */
1603        }
1604}
1605
1606static void output_resort(struct hists *hists, struct ui_progress *prog,
1607                          bool use_callchain)
1608{
1609        struct rb_root *root;
1610        struct rb_node *next;
1611        struct hist_entry *n;
1612        u64 callchain_total;
1613        u64 min_callchain_hits;
1614
1615        callchain_total = hists->callchain_period;
1616        if (symbol_conf.filter_relative)
1617                callchain_total = hists->callchain_non_filtered_period;
1618
1619        min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1620
1621        hists__reset_stats(hists);
1622        hists__reset_col_len(hists);
1623
1624        if (symbol_conf.report_hierarchy) {
1625                hists__hierarchy_output_resort(hists, prog,
1626                                               &hists->entries_collapsed,
1627                                               &hists->entries,
1628                                               min_callchain_hits,
1629                                               use_callchain);
1630                hierarchy_recalc_total_periods(hists);
1631                return;
1632        }
1633
1634        if (sort__need_collapse)
1635                root = &hists->entries_collapsed;
1636        else
1637                root = hists->entries_in;
1638
1639        next = rb_first(root);
1640        hists->entries = RB_ROOT;
1641
1642        while (next) {
1643                n = rb_entry(next, struct hist_entry, rb_node_in);
1644                next = rb_next(&n->rb_node_in);
1645
1646                __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1647                hists__inc_stats(hists, n);
1648
1649                if (!n->filtered)
1650                        hists__calc_col_len(hists, n);
1651
1652                if (prog)
1653                        ui_progress__update(prog, 1);
1654        }
1655}
1656
1657void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1658{
1659        bool use_callchain;
1660
1661        if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1662                use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1663        else
1664                use_callchain = symbol_conf.use_callchain;
1665
1666        output_resort(evsel__hists(evsel), prog, use_callchain);
1667}
1668
1669void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1670{
1671        output_resort(hists, prog, symbol_conf.use_callchain);
1672}
1673
1674static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1675{
1676        if (he->leaf || hmd == HMD_FORCE_SIBLING)
1677                return false;
1678
1679        if (he->unfolded || hmd == HMD_FORCE_CHILD)
1680                return true;
1681
1682        return false;
1683}
1684
1685struct rb_node *rb_hierarchy_last(struct rb_node *node)
1686{
1687        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1688
1689        while (can_goto_child(he, HMD_NORMAL)) {
1690                node = rb_last(&he->hroot_out);
1691                he = rb_entry(node, struct hist_entry, rb_node);
1692        }
1693        return node;
1694}
1695
1696struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1697{
1698        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1699
1700        if (can_goto_child(he, hmd))
1701                node = rb_first(&he->hroot_out);
1702        else
1703                node = rb_next(node);
1704
1705        while (node == NULL) {
1706                he = he->parent_he;
1707                if (he == NULL)
1708                        break;
1709
1710                node = rb_next(&he->rb_node);
1711        }
1712        return node;
1713}
1714
1715struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1716{
1717        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1718
1719        node = rb_prev(node);
1720        if (node)
1721                return rb_hierarchy_last(node);
1722
1723        he = he->parent_he;
1724        if (he == NULL)
1725                return NULL;
1726
1727        return &he->rb_node;
1728}
1729
1730bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1731{
1732        struct rb_node *node;
1733        struct hist_entry *child;
1734        float percent;
1735
1736        if (he->leaf)
1737                return false;
1738
1739        node = rb_first(&he->hroot_out);
1740        child = rb_entry(node, struct hist_entry, rb_node);
1741
1742        while (node && child->filtered) {
1743                node = rb_next(node);
1744                child = rb_entry(node, struct hist_entry, rb_node);
1745        }
1746
1747        if (node)
1748                percent = hist_entry__get_percent_limit(child);
1749        else
1750                percent = 0;
1751
1752        return node && percent >= limit;
1753}
1754
1755static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1756                                       enum hist_filter filter)
1757{
1758        h->filtered &= ~(1 << filter);
1759
1760        if (symbol_conf.report_hierarchy) {
1761                struct hist_entry *parent = h->parent_he;
1762
1763                while (parent) {
1764                        he_stat__add_stat(&parent->stat, &h->stat);
1765
1766                        parent->filtered &= ~(1 << filter);
1767
1768                        if (parent->filtered)
1769                                goto next;
1770
1771                        /* force fold unfiltered entry for simplicity */
1772                        parent->unfolded = false;
1773                        parent->has_no_entry = false;
1774                        parent->row_offset = 0;
1775                        parent->nr_rows = 0;
1776next:
1777                        parent = parent->parent_he;
1778                }
1779        }
1780
1781        if (h->filtered)
1782                return;
1783
1784        /* force fold unfiltered entry for simplicity */
1785        h->unfolded = false;
1786        h->has_no_entry = false;
1787        h->row_offset = 0;
1788        h->nr_rows = 0;
1789
1790        hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1791
1792        hists__inc_filter_stats(hists, h);
1793        hists__calc_col_len(hists, h);
1794}
1795
1796
1797static bool hists__filter_entry_by_dso(struct hists *hists,
1798                                       struct hist_entry *he)
1799{
1800        if (hists->dso_filter != NULL &&
1801            (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1802                he->filtered |= (1 << HIST_FILTER__DSO);
1803                return true;
1804        }
1805
1806        return false;
1807}
1808
1809static bool hists__filter_entry_by_thread(struct hists *hists,
1810                                          struct hist_entry *he)
1811{
1812        if (hists->thread_filter != NULL &&
1813            he->thread != hists->thread_filter) {
1814                he->filtered |= (1 << HIST_FILTER__THREAD);
1815                return true;
1816        }
1817
1818        return false;
1819}
1820
1821static bool hists__filter_entry_by_symbol(struct hists *hists,
1822                                          struct hist_entry *he)
1823{
1824        if (hists->symbol_filter_str != NULL &&
1825            (!he->ms.sym || strstr(he->ms.sym->name,
1826                                   hists->symbol_filter_str) == NULL)) {
1827                he->filtered |= (1 << HIST_FILTER__SYMBOL);
1828                return true;
1829        }
1830
1831        return false;
1832}
1833
1834static bool hists__filter_entry_by_socket(struct hists *hists,
1835                                          struct hist_entry *he)
1836{
1837        if ((hists->socket_filter > -1) &&
1838            (he->socket != hists->socket_filter)) {
1839                he->filtered |= (1 << HIST_FILTER__SOCKET);
1840                return true;
1841        }
1842
1843        return false;
1844}
1845
1846typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1847
1848static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1849{
1850        struct rb_node *nd;
1851
1852        hists->stats.nr_non_filtered_samples = 0;
1853
1854        hists__reset_filter_stats(hists);
1855        hists__reset_col_len(hists);
1856
1857        for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1858                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1859
1860                if (filter(hists, h))
1861                        continue;
1862
1863                hists__remove_entry_filter(hists, h, type);
1864        }
1865}
1866
1867static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1868{
1869        struct rb_node **p = &root->rb_node;
1870        struct rb_node *parent = NULL;
1871        struct hist_entry *iter;
1872        struct rb_root new_root = RB_ROOT;
1873        struct rb_node *nd;
1874
1875        while (*p != NULL) {
1876                parent = *p;
1877                iter = rb_entry(parent, struct hist_entry, rb_node);
1878
1879                if (hist_entry__sort(he, iter) > 0)
1880                        p = &(*p)->rb_left;
1881                else
1882                        p = &(*p)->rb_right;
1883        }
1884
1885        rb_link_node(&he->rb_node, parent, p);
1886        rb_insert_color(&he->rb_node, root);
1887
1888        if (he->leaf || he->filtered)
1889                return;
1890
1891        nd = rb_first(&he->hroot_out);
1892        while (nd) {
1893                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1894
1895                nd = rb_next(nd);
1896                rb_erase(&h->rb_node, &he->hroot_out);
1897
1898                resort_filtered_entry(&new_root, h);
1899        }
1900
1901        he->hroot_out = new_root;
1902}
1903
1904static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
1905{
1906        struct rb_node *nd;
1907        struct rb_root new_root = RB_ROOT;
1908
1909        hists->stats.nr_non_filtered_samples = 0;
1910
1911        hists__reset_filter_stats(hists);
1912        hists__reset_col_len(hists);
1913
1914        nd = rb_first(&hists->entries);
1915        while (nd) {
1916                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1917                int ret;
1918
1919                ret = hist_entry__filter(h, type, arg);
1920
1921                /*
1922                 * case 1. non-matching type
1923                 * zero out the period, set filter marker and move to child
1924                 */
1925                if (ret < 0) {
1926                        memset(&h->stat, 0, sizeof(h->stat));
1927                        h->filtered |= (1 << type);
1928
1929                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
1930                }
1931                /*
1932                 * case 2. matched type (filter out)
1933                 * set filter marker and move to next
1934                 */
1935                else if (ret == 1) {
1936                        h->filtered |= (1 << type);
1937
1938                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1939                }
1940                /*
1941                 * case 3. ok (not filtered)
1942                 * add period to hists and parents, erase the filter marker
1943                 * and move to next sibling
1944                 */
1945                else {
1946                        hists__remove_entry_filter(hists, h, type);
1947
1948                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1949                }
1950        }
1951
1952        hierarchy_recalc_total_periods(hists);
1953
1954        /*
1955         * resort output after applying a new filter since filter in a lower
1956         * hierarchy can change periods in a upper hierarchy.
1957         */
1958        nd = rb_first(&hists->entries);
1959        while (nd) {
1960                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1961
1962                nd = rb_next(nd);
1963                rb_erase(&h->rb_node, &hists->entries);
1964
1965                resort_filtered_entry(&new_root, h);
1966        }
1967
1968        hists->entries = new_root;
1969}
1970
1971void hists__filter_by_thread(struct hists *hists)
1972{
1973        if (symbol_conf.report_hierarchy)
1974                hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
1975                                        hists->thread_filter);
1976        else
1977                hists__filter_by_type(hists, HIST_FILTER__THREAD,
1978                                      hists__filter_entry_by_thread);
1979}
1980
1981void hists__filter_by_dso(struct hists *hists)
1982{
1983        if (symbol_conf.report_hierarchy)
1984                hists__filter_hierarchy(hists, HIST_FILTER__DSO,
1985                                        hists->dso_filter);
1986        else
1987                hists__filter_by_type(hists, HIST_FILTER__DSO,
1988                                      hists__filter_entry_by_dso);
1989}
1990
1991void hists__filter_by_symbol(struct hists *hists)
1992{
1993        if (symbol_conf.report_hierarchy)
1994                hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
1995                                        hists->symbol_filter_str);
1996        else
1997                hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
1998                                      hists__filter_entry_by_symbol);
1999}
2000
2001void hists__filter_by_socket(struct hists *hists)
2002{
2003        if (symbol_conf.report_hierarchy)
2004                hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2005                                        &hists->socket_filter);
2006        else
2007                hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2008                                      hists__filter_entry_by_socket);
2009}
2010
2011void events_stats__inc(struct events_stats *stats, u32 type)
2012{
2013        ++stats->nr_events[0];
2014        ++stats->nr_events[type];
2015}
2016
2017void hists__inc_nr_events(struct hists *hists, u32 type)
2018{
2019        events_stats__inc(&hists->stats, type);
2020}
2021
2022void hists__inc_nr_samples(struct hists *hists, bool filtered)
2023{
2024        events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2025        if (!filtered)
2026                hists->stats.nr_non_filtered_samples++;
2027}
2028
2029static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2030                                                 struct hist_entry *pair)
2031{
2032        struct rb_root *root;
2033        struct rb_node **p;
2034        struct rb_node *parent = NULL;
2035        struct hist_entry *he;
2036        int64_t cmp;
2037
2038        if (sort__need_collapse)
2039                root = &hists->entries_collapsed;
2040        else
2041                root = hists->entries_in;
2042
2043        p = &root->rb_node;
2044
2045        while (*p != NULL) {
2046                parent = *p;
2047                he = rb_entry(parent, struct hist_entry, rb_node_in);
2048
2049                cmp = hist_entry__collapse(he, pair);
2050
2051                if (!cmp)
2052                        goto out;
2053
2054                if (cmp < 0)
2055                        p = &(*p)->rb_left;
2056                else
2057                        p = &(*p)->rb_right;
2058        }
2059
2060        he = hist_entry__new(pair, true);
2061        if (he) {
2062                memset(&he->stat, 0, sizeof(he->stat));
2063                he->hists = hists;
2064                rb_link_node(&he->rb_node_in, parent, p);
2065                rb_insert_color(&he->rb_node_in, root);
2066                hists__inc_stats(hists, he);
2067                he->dummy = true;
2068        }
2069out:
2070        return he;
2071}
2072
2073static struct hist_entry *hists__find_entry(struct hists *hists,
2074                                            struct hist_entry *he)
2075{
2076        struct rb_node *n;
2077
2078        if (sort__need_collapse)
2079                n = hists->entries_collapsed.rb_node;
2080        else
2081                n = hists->entries_in->rb_node;
2082
2083        while (n) {
2084                struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2085                int64_t cmp = hist_entry__collapse(iter, he);
2086
2087                if (cmp < 0)
2088                        n = n->rb_left;
2089                else if (cmp > 0)
2090                        n = n->rb_right;
2091                else
2092                        return iter;
2093        }
2094
2095        return NULL;
2096}
2097
2098/*
2099 * Look for pairs to link to the leader buckets (hist_entries):
2100 */
2101void hists__match(struct hists *leader, struct hists *other)
2102{
2103        struct rb_root *root;
2104        struct rb_node *nd;
2105        struct hist_entry *pos, *pair;
2106
2107        if (sort__need_collapse)
2108                root = &leader->entries_collapsed;
2109        else
2110                root = leader->entries_in;
2111
2112        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2113                pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2114                pair = hists__find_entry(other, pos);
2115
2116                if (pair)
2117                        hist_entry__add_pair(pair, pos);
2118        }
2119}
2120
2121/*
2122 * Look for entries in the other hists that are not present in the leader, if
2123 * we find them, just add a dummy entry on the leader hists, with period=0,
2124 * nr_events=0, to serve as the list header.
2125 */
2126int hists__link(struct hists *leader, struct hists *other)
2127{
2128        struct rb_root *root;
2129        struct rb_node *nd;
2130        struct hist_entry *pos, *pair;
2131
2132        if (sort__need_collapse)
2133                root = &other->entries_collapsed;
2134        else
2135                root = other->entries_in;
2136
2137        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2138                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2139
2140                if (!hist_entry__has_pairs(pos)) {
2141                        pair = hists__add_dummy_entry(leader, pos);
2142                        if (pair == NULL)
2143                                return -1;
2144                        hist_entry__add_pair(pos, pair);
2145                }
2146        }
2147
2148        return 0;
2149}
2150
2151void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2152                          struct perf_sample *sample, bool nonany_branch_mode)
2153{
2154        struct branch_info *bi;
2155
2156        /* If we have branch cycles always annotate them. */
2157        if (bs && bs->nr && bs->entries[0].flags.cycles) {
2158                int i;
2159
2160                bi = sample__resolve_bstack(sample, al);
2161                if (bi) {
2162                        struct addr_map_symbol *prev = NULL;
2163
2164                        /*
2165                         * Ignore errors, still want to process the
2166                         * other entries.
2167                         *
2168                         * For non standard branch modes always
2169                         * force no IPC (prev == NULL)
2170                         *
2171                         * Note that perf stores branches reversed from
2172                         * program order!
2173                         */
2174                        for (i = bs->nr - 1; i >= 0; i--) {
2175                                addr_map_symbol__account_cycles(&bi[i].from,
2176                                        nonany_branch_mode ? NULL : prev,
2177                                        bi[i].flags.cycles);
2178                                prev = &bi[i].to;
2179                        }
2180                        free(bi);
2181                }
2182        }
2183}
2184
2185size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2186{
2187        struct perf_evsel *pos;
2188        size_t ret = 0;
2189
2190        evlist__for_each(evlist, pos) {
2191                ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2192                ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2193        }
2194
2195        return ret;
2196}
2197
2198
2199u64 hists__total_period(struct hists *hists)
2200{
2201        return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2202                hists->stats.total_period;
2203}
2204
2205int parse_filter_percentage(const struct option *opt __maybe_unused,
2206                            const char *arg, int unset __maybe_unused)
2207{
2208        if (!strcmp(arg, "relative"))
2209                symbol_conf.filter_relative = true;
2210        else if (!strcmp(arg, "absolute"))
2211                symbol_conf.filter_relative = false;
2212        else
2213                return -1;
2214
2215        return 0;
2216}
2217
2218int perf_hist_config(const char *var, const char *value)
2219{
2220        if (!strcmp(var, "hist.percentage"))
2221                return parse_filter_percentage(NULL, value, 0);
2222
2223        return 0;
2224}
2225
2226int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2227{
2228        memset(hists, 0, sizeof(*hists));
2229        hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2230        hists->entries_in = &hists->entries_in_array[0];
2231        hists->entries_collapsed = RB_ROOT;
2232        hists->entries = RB_ROOT;
2233        pthread_mutex_init(&hists->lock, NULL);
2234        hists->socket_filter = -1;
2235        hists->hpp_list = hpp_list;
2236        INIT_LIST_HEAD(&hists->hpp_formats);
2237        return 0;
2238}
2239
2240static void hists__delete_remaining_entries(struct rb_root *root)
2241{
2242        struct rb_node *node;
2243        struct hist_entry *he;
2244
2245        while (!RB_EMPTY_ROOT(root)) {
2246                node = rb_first(root);
2247                rb_erase(node, root);
2248
2249                he = rb_entry(node, struct hist_entry, rb_node_in);
2250                hist_entry__delete(he);
2251        }
2252}
2253
2254static void hists__delete_all_entries(struct hists *hists)
2255{
2256        hists__delete_entries(hists);
2257        hists__delete_remaining_entries(&hists->entries_in_array[0]);
2258        hists__delete_remaining_entries(&hists->entries_in_array[1]);
2259        hists__delete_remaining_entries(&hists->entries_collapsed);
2260}
2261
2262static void hists_evsel__exit(struct perf_evsel *evsel)
2263{
2264        struct hists *hists = evsel__hists(evsel);
2265        struct perf_hpp_fmt *fmt, *pos;
2266        struct perf_hpp_list_node *node, *tmp;
2267
2268        hists__delete_all_entries(hists);
2269
2270        list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2271                perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2272                        list_del(&fmt->list);
2273                        free(fmt);
2274                }
2275                list_del(&node->list);
2276                free(node);
2277        }
2278}
2279
2280static int hists_evsel__init(struct perf_evsel *evsel)
2281{
2282        struct hists *hists = evsel__hists(evsel);
2283
2284        __hists__init(hists, &perf_hpp_list);
2285        return 0;
2286}
2287
2288/*
2289 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2290 * stored in the rbtree...
2291 */
2292
2293int hists__init(void)
2294{
2295        int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2296                                            hists_evsel__init,
2297                                            hists_evsel__exit);
2298        if (err)
2299                fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2300
2301        return err;
2302}
2303
2304void perf_hpp_list__init(struct perf_hpp_list *list)
2305{
2306        INIT_LIST_HEAD(&list->fields);
2307        INIT_LIST_HEAD(&list->sorts);
2308}
2309