linux/tools/perf/util/hist.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "util.h"
   3#include "build-id.h"
   4#include "hist.h"
   5#include "map.h"
   6#include "session.h"
   7#include "namespaces.h"
   8#include "sort.h"
   9#include "evlist.h"
  10#include "evsel.h"
  11#include "annotate.h"
  12#include "srcline.h"
  13#include "thread.h"
  14#include "ui/progress.h"
  15#include <errno.h>
  16#include <math.h>
  17#include <sys/param.h>
  18
  19static bool hists__filter_entry_by_dso(struct hists *hists,
  20                                       struct hist_entry *he);
  21static bool hists__filter_entry_by_thread(struct hists *hists,
  22                                          struct hist_entry *he);
  23static bool hists__filter_entry_by_symbol(struct hists *hists,
  24                                          struct hist_entry *he);
  25static bool hists__filter_entry_by_socket(struct hists *hists,
  26                                          struct hist_entry *he);
  27
  28u16 hists__col_len(struct hists *hists, enum hist_column col)
  29{
  30        return hists->col_len[col];
  31}
  32
  33void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
  34{
  35        hists->col_len[col] = len;
  36}
  37
  38bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
  39{
  40        if (len > hists__col_len(hists, col)) {
  41                hists__set_col_len(hists, col, len);
  42                return true;
  43        }
  44        return false;
  45}
  46
  47void hists__reset_col_len(struct hists *hists)
  48{
  49        enum hist_column col;
  50
  51        for (col = 0; col < HISTC_NR_COLS; ++col)
  52                hists__set_col_len(hists, col, 0);
  53}
  54
  55static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
  56{
  57        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  58
  59        if (hists__col_len(hists, dso) < unresolved_col_width &&
  60            !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
  61            !symbol_conf.dso_list)
  62                hists__set_col_len(hists, dso, unresolved_col_width);
  63}
  64
  65void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
  66{
  67        const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
  68        int symlen;
  69        u16 len;
  70
  71        /*
  72         * +4 accounts for '[x] ' priv level info
  73         * +2 accounts for 0x prefix on raw addresses
  74         * +3 accounts for ' y ' symtab origin info
  75         */
  76        if (h->ms.sym) {
  77                symlen = h->ms.sym->namelen + 4;
  78                if (verbose > 0)
  79                        symlen += BITS_PER_LONG / 4 + 2 + 3;
  80                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  81        } else {
  82                symlen = unresolved_col_width + 4 + 2;
  83                hists__new_col_len(hists, HISTC_SYMBOL, symlen);
  84                hists__set_unres_dso_col_len(hists, HISTC_DSO);
  85        }
  86
  87        len = thread__comm_len(h->thread);
  88        if (hists__new_col_len(hists, HISTC_COMM, len))
  89                hists__set_col_len(hists, HISTC_THREAD, len + 8);
  90
  91        if (h->ms.map) {
  92                len = dso__name_len(h->ms.map->dso);
  93                hists__new_col_len(hists, HISTC_DSO, len);
  94        }
  95
  96        if (h->parent)
  97                hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
  98
  99        if (h->branch_info) {
 100                if (h->branch_info->from.sym) {
 101                        symlen = (int)h->branch_info->from.sym->namelen + 4;
 102                        if (verbose > 0)
 103                                symlen += BITS_PER_LONG / 4 + 2 + 3;
 104                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
 105
 106                        symlen = dso__name_len(h->branch_info->from.map->dso);
 107                        hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
 108                } else {
 109                        symlen = unresolved_col_width + 4 + 2;
 110                        hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
 111                        hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
 112                }
 113
 114                if (h->branch_info->to.sym) {
 115                        symlen = (int)h->branch_info->to.sym->namelen + 4;
 116                        if (verbose > 0)
 117                                symlen += BITS_PER_LONG / 4 + 2 + 3;
 118                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 119
 120                        symlen = dso__name_len(h->branch_info->to.map->dso);
 121                        hists__new_col_len(hists, HISTC_DSO_TO, symlen);
 122                } else {
 123                        symlen = unresolved_col_width + 4 + 2;
 124                        hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
 125                        hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
 126                }
 127
 128                if (h->branch_info->srcline_from)
 129                        hists__new_col_len(hists, HISTC_SRCLINE_FROM,
 130                                        strlen(h->branch_info->srcline_from));
 131                if (h->branch_info->srcline_to)
 132                        hists__new_col_len(hists, HISTC_SRCLINE_TO,
 133                                        strlen(h->branch_info->srcline_to));
 134        }
 135
 136        if (h->mem_info) {
 137                if (h->mem_info->daddr.sym) {
 138                        symlen = (int)h->mem_info->daddr.sym->namelen + 4
 139                               + unresolved_col_width + 2;
 140                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 141                                           symlen);
 142                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 143                                           symlen + 1);
 144                } else {
 145                        symlen = unresolved_col_width + 4 + 2;
 146                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
 147                                           symlen);
 148                        hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
 149                                           symlen);
 150                }
 151
 152                if (h->mem_info->iaddr.sym) {
 153                        symlen = (int)h->mem_info->iaddr.sym->namelen + 4
 154                               + unresolved_col_width + 2;
 155                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 156                                           symlen);
 157                } else {
 158                        symlen = unresolved_col_width + 4 + 2;
 159                        hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
 160                                           symlen);
 161                }
 162
 163                if (h->mem_info->daddr.map) {
 164                        symlen = dso__name_len(h->mem_info->daddr.map->dso);
 165                        hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
 166                                           symlen);
 167                } else {
 168                        symlen = unresolved_col_width + 4 + 2;
 169                        hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 170                }
 171
 172                hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
 173                                   unresolved_col_width + 4 + 2);
 174
 175        } else {
 176                symlen = unresolved_col_width + 4 + 2;
 177                hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
 178                hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
 179                hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
 180        }
 181
 182        hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
 183        hists__new_col_len(hists, HISTC_CPU, 3);
 184        hists__new_col_len(hists, HISTC_SOCKET, 6);
 185        hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
 186        hists__new_col_len(hists, HISTC_MEM_TLB, 22);
 187        hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
 188        hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
 189        hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
 190        hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
 191
 192        if (h->srcline) {
 193                len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
 194                hists__new_col_len(hists, HISTC_SRCLINE, len);
 195        }
 196
 197        if (h->srcfile)
 198                hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
 199
 200        if (h->transaction)
 201                hists__new_col_len(hists, HISTC_TRANSACTION,
 202                                   hist_entry__transaction_len());
 203
 204        if (h->trace_output)
 205                hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
 206}
 207
 208void hists__output_recalc_col_len(struct hists *hists, int max_rows)
 209{
 210        struct rb_node *next = rb_first(&hists->entries);
 211        struct hist_entry *n;
 212        int row = 0;
 213
 214        hists__reset_col_len(hists);
 215
 216        while (next && row++ < max_rows) {
 217                n = rb_entry(next, struct hist_entry, rb_node);
 218                if (!n->filtered)
 219                        hists__calc_col_len(hists, n);
 220                next = rb_next(&n->rb_node);
 221        }
 222}
 223
 224static void he_stat__add_cpumode_period(struct he_stat *he_stat,
 225                                        unsigned int cpumode, u64 period)
 226{
 227        switch (cpumode) {
 228        case PERF_RECORD_MISC_KERNEL:
 229                he_stat->period_sys += period;
 230                break;
 231        case PERF_RECORD_MISC_USER:
 232                he_stat->period_us += period;
 233                break;
 234        case PERF_RECORD_MISC_GUEST_KERNEL:
 235                he_stat->period_guest_sys += period;
 236                break;
 237        case PERF_RECORD_MISC_GUEST_USER:
 238                he_stat->period_guest_us += period;
 239                break;
 240        default:
 241                break;
 242        }
 243}
 244
 245static void he_stat__add_period(struct he_stat *he_stat, u64 period,
 246                                u64 weight)
 247{
 248
 249        he_stat->period         += period;
 250        he_stat->weight         += weight;
 251        he_stat->nr_events      += 1;
 252}
 253
 254static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
 255{
 256        dest->period            += src->period;
 257        dest->period_sys        += src->period_sys;
 258        dest->period_us         += src->period_us;
 259        dest->period_guest_sys  += src->period_guest_sys;
 260        dest->period_guest_us   += src->period_guest_us;
 261        dest->nr_events         += src->nr_events;
 262        dest->weight            += src->weight;
 263}
 264
 265static void he_stat__decay(struct he_stat *he_stat)
 266{
 267        he_stat->period = (he_stat->period * 7) / 8;
 268        he_stat->nr_events = (he_stat->nr_events * 7) / 8;
 269        /* XXX need decay for weight too? */
 270}
 271
 272static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
 273
 274static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
 275{
 276        u64 prev_period = he->stat.period;
 277        u64 diff;
 278
 279        if (prev_period == 0)
 280                return true;
 281
 282        he_stat__decay(&he->stat);
 283        if (symbol_conf.cumulate_callchain)
 284                he_stat__decay(he->stat_acc);
 285        decay_callchain(he->callchain);
 286
 287        diff = prev_period - he->stat.period;
 288
 289        if (!he->depth) {
 290                hists->stats.total_period -= diff;
 291                if (!he->filtered)
 292                        hists->stats.total_non_filtered_period -= diff;
 293        }
 294
 295        if (!he->leaf) {
 296                struct hist_entry *child;
 297                struct rb_node *node = rb_first(&he->hroot_out);
 298                while (node) {
 299                        child = rb_entry(node, struct hist_entry, rb_node);
 300                        node = rb_next(node);
 301
 302                        if (hists__decay_entry(hists, child))
 303                                hists__delete_entry(hists, child);
 304                }
 305        }
 306
 307        return he->stat.period == 0;
 308}
 309
 310static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
 311{
 312        struct rb_root *root_in;
 313        struct rb_root *root_out;
 314
 315        if (he->parent_he) {
 316                root_in  = &he->parent_he->hroot_in;
 317                root_out = &he->parent_he->hroot_out;
 318        } else {
 319                if (hists__has(hists, need_collapse))
 320                        root_in = &hists->entries_collapsed;
 321                else
 322                        root_in = hists->entries_in;
 323                root_out = &hists->entries;
 324        }
 325
 326        rb_erase(&he->rb_node_in, root_in);
 327        rb_erase(&he->rb_node, root_out);
 328
 329        --hists->nr_entries;
 330        if (!he->filtered)
 331                --hists->nr_non_filtered_entries;
 332
 333        hist_entry__delete(he);
 334}
 335
 336void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
 337{
 338        struct rb_node *next = rb_first(&hists->entries);
 339        struct hist_entry *n;
 340
 341        while (next) {
 342                n = rb_entry(next, struct hist_entry, rb_node);
 343                next = rb_next(&n->rb_node);
 344                if (((zap_user && n->level == '.') ||
 345                     (zap_kernel && n->level != '.') ||
 346                     hists__decay_entry(hists, n))) {
 347                        hists__delete_entry(hists, n);
 348                }
 349        }
 350}
 351
 352void hists__delete_entries(struct hists *hists)
 353{
 354        struct rb_node *next = rb_first(&hists->entries);
 355        struct hist_entry *n;
 356
 357        while (next) {
 358                n = rb_entry(next, struct hist_entry, rb_node);
 359                next = rb_next(&n->rb_node);
 360
 361                hists__delete_entry(hists, n);
 362        }
 363}
 364
 365/*
 366 * histogram, sorted on item, collects periods
 367 */
 368
 369static int hist_entry__init(struct hist_entry *he,
 370                            struct hist_entry *template,
 371                            bool sample_self)
 372{
 373        *he = *template;
 374
 375        if (symbol_conf.cumulate_callchain) {
 376                he->stat_acc = malloc(sizeof(he->stat));
 377                if (he->stat_acc == NULL)
 378                        return -ENOMEM;
 379                memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
 380                if (!sample_self)
 381                        memset(&he->stat, 0, sizeof(he->stat));
 382        }
 383
 384        map__get(he->ms.map);
 385
 386        if (he->branch_info) {
 387                /*
 388                 * This branch info is (a part of) allocated from
 389                 * sample__resolve_bstack() and will be freed after
 390                 * adding new entries.  So we need to save a copy.
 391                 */
 392                he->branch_info = malloc(sizeof(*he->branch_info));
 393                if (he->branch_info == NULL) {
 394                        map__zput(he->ms.map);
 395                        free(he->stat_acc);
 396                        return -ENOMEM;
 397                }
 398
 399                memcpy(he->branch_info, template->branch_info,
 400                       sizeof(*he->branch_info));
 401
 402                map__get(he->branch_info->from.map);
 403                map__get(he->branch_info->to.map);
 404        }
 405
 406        if (he->mem_info) {
 407                map__get(he->mem_info->iaddr.map);
 408                map__get(he->mem_info->daddr.map);
 409        }
 410
 411        if (symbol_conf.use_callchain)
 412                callchain_init(he->callchain);
 413
 414        if (he->raw_data) {
 415                he->raw_data = memdup(he->raw_data, he->raw_size);
 416
 417                if (he->raw_data == NULL) {
 418                        map__put(he->ms.map);
 419                        if (he->branch_info) {
 420                                map__put(he->branch_info->from.map);
 421                                map__put(he->branch_info->to.map);
 422                                free(he->branch_info);
 423                        }
 424                        if (he->mem_info) {
 425                                map__put(he->mem_info->iaddr.map);
 426                                map__put(he->mem_info->daddr.map);
 427                        }
 428                        free(he->stat_acc);
 429                        return -ENOMEM;
 430                }
 431        }
 432        INIT_LIST_HEAD(&he->pairs.node);
 433        thread__get(he->thread);
 434        he->hroot_in  = RB_ROOT;
 435        he->hroot_out = RB_ROOT;
 436
 437        if (!symbol_conf.report_hierarchy)
 438                he->leaf = true;
 439
 440        return 0;
 441}
 442
 443static void *hist_entry__zalloc(size_t size)
 444{
 445        return zalloc(size + sizeof(struct hist_entry));
 446}
 447
 448static void hist_entry__free(void *ptr)
 449{
 450        free(ptr);
 451}
 452
 453static struct hist_entry_ops default_ops = {
 454        .new    = hist_entry__zalloc,
 455        .free   = hist_entry__free,
 456};
 457
 458static struct hist_entry *hist_entry__new(struct hist_entry *template,
 459                                          bool sample_self)
 460{
 461        struct hist_entry_ops *ops = template->ops;
 462        size_t callchain_size = 0;
 463        struct hist_entry *he;
 464        int err = 0;
 465
 466        if (!ops)
 467                ops = template->ops = &default_ops;
 468
 469        if (symbol_conf.use_callchain)
 470                callchain_size = sizeof(struct callchain_root);
 471
 472        he = ops->new(callchain_size);
 473        if (he) {
 474                err = hist_entry__init(he, template, sample_self);
 475                if (err) {
 476                        ops->free(he);
 477                        he = NULL;
 478                }
 479        }
 480
 481        return he;
 482}
 483
 484static u8 symbol__parent_filter(const struct symbol *parent)
 485{
 486        if (symbol_conf.exclude_other && parent == NULL)
 487                return 1 << HIST_FILTER__PARENT;
 488        return 0;
 489}
 490
 491static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
 492{
 493        if (!symbol_conf.use_callchain)
 494                return;
 495
 496        he->hists->callchain_period += period;
 497        if (!he->filtered)
 498                he->hists->callchain_non_filtered_period += period;
 499}
 500
 501static struct hist_entry *hists__findnew_entry(struct hists *hists,
 502                                               struct hist_entry *entry,
 503                                               struct addr_location *al,
 504                                               bool sample_self)
 505{
 506        struct rb_node **p;
 507        struct rb_node *parent = NULL;
 508        struct hist_entry *he;
 509        int64_t cmp;
 510        u64 period = entry->stat.period;
 511        u64 weight = entry->stat.weight;
 512
 513        p = &hists->entries_in->rb_node;
 514
 515        while (*p != NULL) {
 516                parent = *p;
 517                he = rb_entry(parent, struct hist_entry, rb_node_in);
 518
 519                /*
 520                 * Make sure that it receives arguments in a same order as
 521                 * hist_entry__collapse() so that we can use an appropriate
 522                 * function when searching an entry regardless which sort
 523                 * keys were used.
 524                 */
 525                cmp = hist_entry__cmp(he, entry);
 526
 527                if (!cmp) {
 528                        if (sample_self) {
 529                                he_stat__add_period(&he->stat, period, weight);
 530                                hist_entry__add_callchain_period(he, period);
 531                        }
 532                        if (symbol_conf.cumulate_callchain)
 533                                he_stat__add_period(he->stat_acc, period, weight);
 534
 535                        /*
 536                         * This mem info was allocated from sample__resolve_mem
 537                         * and will not be used anymore.
 538                         */
 539                        zfree(&entry->mem_info);
 540
 541                        /* If the map of an existing hist_entry has
 542                         * become out-of-date due to an exec() or
 543                         * similar, update it.  Otherwise we will
 544                         * mis-adjust symbol addresses when computing
 545                         * the history counter to increment.
 546                         */
 547                        if (he->ms.map != entry->ms.map) {
 548                                map__put(he->ms.map);
 549                                he->ms.map = map__get(entry->ms.map);
 550                        }
 551                        goto out;
 552                }
 553
 554                if (cmp < 0)
 555                        p = &(*p)->rb_left;
 556                else
 557                        p = &(*p)->rb_right;
 558        }
 559
 560        he = hist_entry__new(entry, sample_self);
 561        if (!he)
 562                return NULL;
 563
 564        if (sample_self)
 565                hist_entry__add_callchain_period(he, period);
 566        hists->nr_entries++;
 567
 568        rb_link_node(&he->rb_node_in, parent, p);
 569        rb_insert_color(&he->rb_node_in, hists->entries_in);
 570out:
 571        if (sample_self)
 572                he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
 573        if (symbol_conf.cumulate_callchain)
 574                he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
 575        return he;
 576}
 577
 578static struct hist_entry*
 579__hists__add_entry(struct hists *hists,
 580                   struct addr_location *al,
 581                   struct symbol *sym_parent,
 582                   struct branch_info *bi,
 583                   struct mem_info *mi,
 584                   struct perf_sample *sample,
 585                   bool sample_self,
 586                   struct hist_entry_ops *ops)
 587{
 588        struct namespaces *ns = thread__namespaces(al->thread);
 589        struct hist_entry entry = {
 590                .thread = al->thread,
 591                .comm = thread__comm(al->thread),
 592                .cgroup_id = {
 593                        .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
 594                        .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
 595                },
 596                .ms = {
 597                        .map    = al->map,
 598                        .sym    = al->sym,
 599                },
 600                .srcline = al->srcline ? strdup(al->srcline) : NULL,
 601                .socket  = al->socket,
 602                .cpu     = al->cpu,
 603                .cpumode = al->cpumode,
 604                .ip      = al->addr,
 605                .level   = al->level,
 606                .stat = {
 607                        .nr_events = 1,
 608                        .period = sample->period,
 609                        .weight = sample->weight,
 610                },
 611                .parent = sym_parent,
 612                .filtered = symbol__parent_filter(sym_parent) | al->filtered,
 613                .hists  = hists,
 614                .branch_info = bi,
 615                .mem_info = mi,
 616                .transaction = sample->transaction,
 617                .raw_data = sample->raw_data,
 618                .raw_size = sample->raw_size,
 619                .ops = ops,
 620        };
 621
 622        return hists__findnew_entry(hists, &entry, al, sample_self);
 623}
 624
 625struct hist_entry *hists__add_entry(struct hists *hists,
 626                                    struct addr_location *al,
 627                                    struct symbol *sym_parent,
 628                                    struct branch_info *bi,
 629                                    struct mem_info *mi,
 630                                    struct perf_sample *sample,
 631                                    bool sample_self)
 632{
 633        return __hists__add_entry(hists, al, sym_parent, bi, mi,
 634                                  sample, sample_self, NULL);
 635}
 636
 637struct hist_entry *hists__add_entry_ops(struct hists *hists,
 638                                        struct hist_entry_ops *ops,
 639                                        struct addr_location *al,
 640                                        struct symbol *sym_parent,
 641                                        struct branch_info *bi,
 642                                        struct mem_info *mi,
 643                                        struct perf_sample *sample,
 644                                        bool sample_self)
 645{
 646        return __hists__add_entry(hists, al, sym_parent, bi, mi,
 647                                  sample, sample_self, ops);
 648}
 649
 650static int
 651iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 652                    struct addr_location *al __maybe_unused)
 653{
 654        return 0;
 655}
 656
 657static int
 658iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
 659                        struct addr_location *al __maybe_unused)
 660{
 661        return 0;
 662}
 663
 664static int
 665iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 666{
 667        struct perf_sample *sample = iter->sample;
 668        struct mem_info *mi;
 669
 670        mi = sample__resolve_mem(sample, al);
 671        if (mi == NULL)
 672                return -ENOMEM;
 673
 674        iter->priv = mi;
 675        return 0;
 676}
 677
 678static int
 679iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
 680{
 681        u64 cost;
 682        struct mem_info *mi = iter->priv;
 683        struct hists *hists = evsel__hists(iter->evsel);
 684        struct perf_sample *sample = iter->sample;
 685        struct hist_entry *he;
 686
 687        if (mi == NULL)
 688                return -EINVAL;
 689
 690        cost = sample->weight;
 691        if (!cost)
 692                cost = 1;
 693
 694        /*
 695         * must pass period=weight in order to get the correct
 696         * sorting from hists__collapse_resort() which is solely
 697         * based on periods. We want sorting be done on nr_events * weight
 698         * and this is indirectly achieved by passing period=weight here
 699         * and the he_stat__add_period() function.
 700         */
 701        sample->period = cost;
 702
 703        he = hists__add_entry(hists, al, iter->parent, NULL, mi,
 704                              sample, true);
 705        if (!he)
 706                return -ENOMEM;
 707
 708        iter->he = he;
 709        return 0;
 710}
 711
 712static int
 713iter_finish_mem_entry(struct hist_entry_iter *iter,
 714                      struct addr_location *al __maybe_unused)
 715{
 716        struct perf_evsel *evsel = iter->evsel;
 717        struct hists *hists = evsel__hists(evsel);
 718        struct hist_entry *he = iter->he;
 719        int err = -EINVAL;
 720
 721        if (he == NULL)
 722                goto out;
 723
 724        hists__inc_nr_samples(hists, he->filtered);
 725
 726        err = hist_entry__append_callchain(he, iter->sample);
 727
 728out:
 729        /*
 730         * We don't need to free iter->priv (mem_info) here since the mem info
 731         * was either already freed in hists__findnew_entry() or passed to a
 732         * new hist entry by hist_entry__new().
 733         */
 734        iter->priv = NULL;
 735
 736        iter->he = NULL;
 737        return err;
 738}
 739
 740static int
 741iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 742{
 743        struct branch_info *bi;
 744        struct perf_sample *sample = iter->sample;
 745
 746        bi = sample__resolve_bstack(sample, al);
 747        if (!bi)
 748                return -ENOMEM;
 749
 750        iter->curr = 0;
 751        iter->total = sample->branch_stack->nr;
 752
 753        iter->priv = bi;
 754        return 0;
 755}
 756
 757static int
 758iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
 759                             struct addr_location *al __maybe_unused)
 760{
 761        return 0;
 762}
 763
 764static int
 765iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 766{
 767        struct branch_info *bi = iter->priv;
 768        int i = iter->curr;
 769
 770        if (bi == NULL)
 771                return 0;
 772
 773        if (iter->curr >= iter->total)
 774                return 0;
 775
 776        al->map = bi[i].to.map;
 777        al->sym = bi[i].to.sym;
 778        al->addr = bi[i].to.addr;
 779        return 1;
 780}
 781
 782static int
 783iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
 784{
 785        struct branch_info *bi;
 786        struct perf_evsel *evsel = iter->evsel;
 787        struct hists *hists = evsel__hists(evsel);
 788        struct perf_sample *sample = iter->sample;
 789        struct hist_entry *he = NULL;
 790        int i = iter->curr;
 791        int err = 0;
 792
 793        bi = iter->priv;
 794
 795        if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
 796                goto out;
 797
 798        /*
 799         * The report shows the percentage of total branches captured
 800         * and not events sampled. Thus we use a pseudo period of 1.
 801         */
 802        sample->period = 1;
 803        sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
 804
 805        he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
 806                              sample, true);
 807        if (he == NULL)
 808                return -ENOMEM;
 809
 810        hists__inc_nr_samples(hists, he->filtered);
 811
 812out:
 813        iter->he = he;
 814        iter->curr++;
 815        return err;
 816}
 817
 818static int
 819iter_finish_branch_entry(struct hist_entry_iter *iter,
 820                         struct addr_location *al __maybe_unused)
 821{
 822        zfree(&iter->priv);
 823        iter->he = NULL;
 824
 825        return iter->curr >= iter->total ? 0 : -1;
 826}
 827
 828static int
 829iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
 830                          struct addr_location *al __maybe_unused)
 831{
 832        return 0;
 833}
 834
 835static int
 836iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
 837{
 838        struct perf_evsel *evsel = iter->evsel;
 839        struct perf_sample *sample = iter->sample;
 840        struct hist_entry *he;
 841
 842        he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
 843                              sample, true);
 844        if (he == NULL)
 845                return -ENOMEM;
 846
 847        iter->he = he;
 848        return 0;
 849}
 850
 851static int
 852iter_finish_normal_entry(struct hist_entry_iter *iter,
 853                         struct addr_location *al __maybe_unused)
 854{
 855        struct hist_entry *he = iter->he;
 856        struct perf_evsel *evsel = iter->evsel;
 857        struct perf_sample *sample = iter->sample;
 858
 859        if (he == NULL)
 860                return 0;
 861
 862        iter->he = NULL;
 863
 864        hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
 865
 866        return hist_entry__append_callchain(he, sample);
 867}
 868
 869static int
 870iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
 871                              struct addr_location *al __maybe_unused)
 872{
 873        struct hist_entry **he_cache;
 874
 875        callchain_cursor_commit(&callchain_cursor);
 876
 877        /*
 878         * This is for detecting cycles or recursions so that they're
 879         * cumulated only one time to prevent entries more than 100%
 880         * overhead.
 881         */
 882        he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
 883        if (he_cache == NULL)
 884                return -ENOMEM;
 885
 886        iter->priv = he_cache;
 887        iter->curr = 0;
 888
 889        return 0;
 890}
 891
 892static int
 893iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
 894                                 struct addr_location *al)
 895{
 896        struct perf_evsel *evsel = iter->evsel;
 897        struct hists *hists = evsel__hists(evsel);
 898        struct perf_sample *sample = iter->sample;
 899        struct hist_entry **he_cache = iter->priv;
 900        struct hist_entry *he;
 901        int err = 0;
 902
 903        he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
 904                              sample, true);
 905        if (he == NULL)
 906                return -ENOMEM;
 907
 908        iter->he = he;
 909        he_cache[iter->curr++] = he;
 910
 911        hist_entry__append_callchain(he, sample);
 912
 913        /*
 914         * We need to re-initialize the cursor since callchain_append()
 915         * advanced the cursor to the end.
 916         */
 917        callchain_cursor_commit(&callchain_cursor);
 918
 919        hists__inc_nr_samples(hists, he->filtered);
 920
 921        return err;
 922}
 923
 924static int
 925iter_next_cumulative_entry(struct hist_entry_iter *iter,
 926                           struct addr_location *al)
 927{
 928        struct callchain_cursor_node *node;
 929
 930        node = callchain_cursor_current(&callchain_cursor);
 931        if (node == NULL)
 932                return 0;
 933
 934        return fill_callchain_info(al, node, iter->hide_unresolved);
 935}
 936
 937static int
 938iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
 939                               struct addr_location *al)
 940{
 941        struct perf_evsel *evsel = iter->evsel;
 942        struct perf_sample *sample = iter->sample;
 943        struct hist_entry **he_cache = iter->priv;
 944        struct hist_entry *he;
 945        struct hist_entry he_tmp = {
 946                .hists = evsel__hists(evsel),
 947                .cpu = al->cpu,
 948                .thread = al->thread,
 949                .comm = thread__comm(al->thread),
 950                .ip = al->addr,
 951                .ms = {
 952                        .map = al->map,
 953                        .sym = al->sym,
 954                },
 955                .srcline = al->srcline ? strdup(al->srcline) : NULL,
 956                .parent = iter->parent,
 957                .raw_data = sample->raw_data,
 958                .raw_size = sample->raw_size,
 959        };
 960        int i;
 961        struct callchain_cursor cursor;
 962
 963        callchain_cursor_snapshot(&cursor, &callchain_cursor);
 964
 965        callchain_cursor_advance(&callchain_cursor);
 966
 967        /*
 968         * Check if there's duplicate entries in the callchain.
 969         * It's possible that it has cycles or recursive calls.
 970         */
 971        for (i = 0; i < iter->curr; i++) {
 972                if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
 973                        /* to avoid calling callback function */
 974                        iter->he = NULL;
 975                        return 0;
 976                }
 977        }
 978
 979        he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
 980                              sample, false);
 981        if (he == NULL)
 982                return -ENOMEM;
 983
 984        iter->he = he;
 985        he_cache[iter->curr++] = he;
 986
 987        if (symbol_conf.use_callchain)
 988                callchain_append(he->callchain, &cursor, sample->period);
 989        return 0;
 990}
 991
 992static int
 993iter_finish_cumulative_entry(struct hist_entry_iter *iter,
 994                             struct addr_location *al __maybe_unused)
 995{
 996        zfree(&iter->priv);
 997        iter->he = NULL;
 998
 999        return 0;
1000}
1001
1002const struct hist_iter_ops hist_iter_mem = {
1003        .prepare_entry          = iter_prepare_mem_entry,
1004        .add_single_entry       = iter_add_single_mem_entry,
1005        .next_entry             = iter_next_nop_entry,
1006        .add_next_entry         = iter_add_next_nop_entry,
1007        .finish_entry           = iter_finish_mem_entry,
1008};
1009
1010const struct hist_iter_ops hist_iter_branch = {
1011        .prepare_entry          = iter_prepare_branch_entry,
1012        .add_single_entry       = iter_add_single_branch_entry,
1013        .next_entry             = iter_next_branch_entry,
1014        .add_next_entry         = iter_add_next_branch_entry,
1015        .finish_entry           = iter_finish_branch_entry,
1016};
1017
1018const struct hist_iter_ops hist_iter_normal = {
1019        .prepare_entry          = iter_prepare_normal_entry,
1020        .add_single_entry       = iter_add_single_normal_entry,
1021        .next_entry             = iter_next_nop_entry,
1022        .add_next_entry         = iter_add_next_nop_entry,
1023        .finish_entry           = iter_finish_normal_entry,
1024};
1025
1026const struct hist_iter_ops hist_iter_cumulative = {
1027        .prepare_entry          = iter_prepare_cumulative_entry,
1028        .add_single_entry       = iter_add_single_cumulative_entry,
1029        .next_entry             = iter_next_cumulative_entry,
1030        .add_next_entry         = iter_add_next_cumulative_entry,
1031        .finish_entry           = iter_finish_cumulative_entry,
1032};
1033
1034int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1035                         int max_stack_depth, void *arg)
1036{
1037        int err, err2;
1038        struct map *alm = NULL;
1039
1040        if (al && al->map)
1041                alm = map__get(al->map);
1042
1043        err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1044                                        iter->evsel, al, max_stack_depth);
1045        if (err)
1046                return err;
1047
1048        iter->max_stack = max_stack_depth;
1049
1050        err = iter->ops->prepare_entry(iter, al);
1051        if (err)
1052                goto out;
1053
1054        err = iter->ops->add_single_entry(iter, al);
1055        if (err)
1056                goto out;
1057
1058        if (iter->he && iter->add_entry_cb) {
1059                err = iter->add_entry_cb(iter, al, true, arg);
1060                if (err)
1061                        goto out;
1062        }
1063
1064        while (iter->ops->next_entry(iter, al)) {
1065                err = iter->ops->add_next_entry(iter, al);
1066                if (err)
1067                        break;
1068
1069                if (iter->he && iter->add_entry_cb) {
1070                        err = iter->add_entry_cb(iter, al, false, arg);
1071                        if (err)
1072                                goto out;
1073                }
1074        }
1075
1076out:
1077        err2 = iter->ops->finish_entry(iter, al);
1078        if (!err)
1079                err = err2;
1080
1081        map__put(alm);
1082
1083        return err;
1084}
1085
1086int64_t
1087hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1088{
1089        struct hists *hists = left->hists;
1090        struct perf_hpp_fmt *fmt;
1091        int64_t cmp = 0;
1092
1093        hists__for_each_sort_list(hists, fmt) {
1094                if (perf_hpp__is_dynamic_entry(fmt) &&
1095                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1096                        continue;
1097
1098                cmp = fmt->cmp(fmt, left, right);
1099                if (cmp)
1100                        break;
1101        }
1102
1103        return cmp;
1104}
1105
1106int64_t
1107hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1108{
1109        struct hists *hists = left->hists;
1110        struct perf_hpp_fmt *fmt;
1111        int64_t cmp = 0;
1112
1113        hists__for_each_sort_list(hists, fmt) {
1114                if (perf_hpp__is_dynamic_entry(fmt) &&
1115                    !perf_hpp__defined_dynamic_entry(fmt, hists))
1116                        continue;
1117
1118                cmp = fmt->collapse(fmt, left, right);
1119                if (cmp)
1120                        break;
1121        }
1122
1123        return cmp;
1124}
1125
1126void hist_entry__delete(struct hist_entry *he)
1127{
1128        struct hist_entry_ops *ops = he->ops;
1129
1130        thread__zput(he->thread);
1131        map__zput(he->ms.map);
1132
1133        if (he->branch_info) {
1134                map__zput(he->branch_info->from.map);
1135                map__zput(he->branch_info->to.map);
1136                free_srcline(he->branch_info->srcline_from);
1137                free_srcline(he->branch_info->srcline_to);
1138                zfree(&he->branch_info);
1139        }
1140
1141        if (he->mem_info) {
1142                map__zput(he->mem_info->iaddr.map);
1143                map__zput(he->mem_info->daddr.map);
1144                zfree(&he->mem_info);
1145        }
1146
1147        zfree(&he->stat_acc);
1148        free_srcline(he->srcline);
1149        if (he->srcfile && he->srcfile[0])
1150                free(he->srcfile);
1151        free_callchain(he->callchain);
1152        free(he->trace_output);
1153        free(he->raw_data);
1154        ops->free(he);
1155}
1156
1157/*
1158 * If this is not the last column, then we need to pad it according to the
1159 * pre-calculated max lenght for this column, otherwise don't bother adding
1160 * spaces because that would break viewing this with, for instance, 'less',
1161 * that would show tons of trailing spaces when a long C++ demangled method
1162 * names is sampled.
1163*/
1164int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1165                                   struct perf_hpp_fmt *fmt, int printed)
1166{
1167        if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1168                const int width = fmt->width(fmt, hpp, he->hists);
1169                if (printed < width) {
1170                        advance_hpp(hpp, printed);
1171                        printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1172                }
1173        }
1174
1175        return printed;
1176}
1177
1178/*
1179 * collapse the histogram
1180 */
1181
1182static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1183static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1184                                       enum hist_filter type);
1185
1186typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1187
1188static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1189{
1190        return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1191}
1192
1193static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1194                                                enum hist_filter type,
1195                                                fmt_chk_fn check)
1196{
1197        struct perf_hpp_fmt *fmt;
1198        bool type_match = false;
1199        struct hist_entry *parent = he->parent_he;
1200
1201        switch (type) {
1202        case HIST_FILTER__THREAD:
1203                if (symbol_conf.comm_list == NULL &&
1204                    symbol_conf.pid_list == NULL &&
1205                    symbol_conf.tid_list == NULL)
1206                        return;
1207                break;
1208        case HIST_FILTER__DSO:
1209                if (symbol_conf.dso_list == NULL)
1210                        return;
1211                break;
1212        case HIST_FILTER__SYMBOL:
1213                if (symbol_conf.sym_list == NULL)
1214                        return;
1215                break;
1216        case HIST_FILTER__PARENT:
1217        case HIST_FILTER__GUEST:
1218        case HIST_FILTER__HOST:
1219        case HIST_FILTER__SOCKET:
1220        case HIST_FILTER__C2C:
1221        default:
1222                return;
1223        }
1224
1225        /* if it's filtered by own fmt, it has to have filter bits */
1226        perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1227                if (check(fmt)) {
1228                        type_match = true;
1229                        break;
1230                }
1231        }
1232
1233        if (type_match) {
1234                /*
1235                 * If the filter is for current level entry, propagate
1236                 * filter marker to parents.  The marker bit was
1237                 * already set by default so it only needs to clear
1238                 * non-filtered entries.
1239                 */
1240                if (!(he->filtered & (1 << type))) {
1241                        while (parent) {
1242                                parent->filtered &= ~(1 << type);
1243                                parent = parent->parent_he;
1244                        }
1245                }
1246        } else {
1247                /*
1248                 * If current entry doesn't have matching formats, set
1249                 * filter marker for upper level entries.  it will be
1250                 * cleared if its lower level entries is not filtered.
1251                 *
1252                 * For lower-level entries, it inherits parent's
1253                 * filter bit so that lower level entries of a
1254                 * non-filtered entry won't set the filter marker.
1255                 */
1256                if (parent == NULL)
1257                        he->filtered |= (1 << type);
1258                else
1259                        he->filtered |= (parent->filtered & (1 << type));
1260        }
1261}
1262
1263static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1264{
1265        hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1266                                            check_thread_entry);
1267
1268        hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1269                                            perf_hpp__is_dso_entry);
1270
1271        hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1272                                            perf_hpp__is_sym_entry);
1273
1274        hists__apply_filters(he->hists, he);
1275}
1276
1277static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1278                                                 struct rb_root *root,
1279                                                 struct hist_entry *he,
1280                                                 struct hist_entry *parent_he,
1281                                                 struct perf_hpp_list *hpp_list)
1282{
1283        struct rb_node **p = &root->rb_node;
1284        struct rb_node *parent = NULL;
1285        struct hist_entry *iter, *new;
1286        struct perf_hpp_fmt *fmt;
1287        int64_t cmp;
1288
1289        while (*p != NULL) {
1290                parent = *p;
1291                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1292
1293                cmp = 0;
1294                perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1295                        cmp = fmt->collapse(fmt, iter, he);
1296                        if (cmp)
1297                                break;
1298                }
1299
1300                if (!cmp) {
1301                        he_stat__add_stat(&iter->stat, &he->stat);
1302                        return iter;
1303                }
1304
1305                if (cmp < 0)
1306                        p = &parent->rb_left;
1307                else
1308                        p = &parent->rb_right;
1309        }
1310
1311        new = hist_entry__new(he, true);
1312        if (new == NULL)
1313                return NULL;
1314
1315        hists->nr_entries++;
1316
1317        /* save related format list for output */
1318        new->hpp_list = hpp_list;
1319        new->parent_he = parent_he;
1320
1321        hist_entry__apply_hierarchy_filters(new);
1322
1323        /* some fields are now passed to 'new' */
1324        perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1325                if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1326                        he->trace_output = NULL;
1327                else
1328                        new->trace_output = NULL;
1329
1330                if (perf_hpp__is_srcline_entry(fmt))
1331                        he->srcline = NULL;
1332                else
1333                        new->srcline = NULL;
1334
1335                if (perf_hpp__is_srcfile_entry(fmt))
1336                        he->srcfile = NULL;
1337                else
1338                        new->srcfile = NULL;
1339        }
1340
1341        rb_link_node(&new->rb_node_in, parent, p);
1342        rb_insert_color(&new->rb_node_in, root);
1343        return new;
1344}
1345
1346static int hists__hierarchy_insert_entry(struct hists *hists,
1347                                         struct rb_root *root,
1348                                         struct hist_entry *he)
1349{
1350        struct perf_hpp_list_node *node;
1351        struct hist_entry *new_he = NULL;
1352        struct hist_entry *parent = NULL;
1353        int depth = 0;
1354        int ret = 0;
1355
1356        list_for_each_entry(node, &hists->hpp_formats, list) {
1357                /* skip period (overhead) and elided columns */
1358                if (node->level == 0 || node->skip)
1359                        continue;
1360
1361                /* insert copy of 'he' for each fmt into the hierarchy */
1362                new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1363                if (new_he == NULL) {
1364                        ret = -1;
1365                        break;
1366                }
1367
1368                root = &new_he->hroot_in;
1369                new_he->depth = depth++;
1370                parent = new_he;
1371        }
1372
1373        if (new_he) {
1374                new_he->leaf = true;
1375
1376                if (symbol_conf.use_callchain) {
1377                        callchain_cursor_reset(&callchain_cursor);
1378                        if (callchain_merge(&callchain_cursor,
1379                                            new_he->callchain,
1380                                            he->callchain) < 0)
1381                                ret = -1;
1382                }
1383        }
1384
1385        /* 'he' is no longer used */
1386        hist_entry__delete(he);
1387
1388        /* return 0 (or -1) since it already applied filters */
1389        return ret;
1390}
1391
1392static int hists__collapse_insert_entry(struct hists *hists,
1393                                        struct rb_root *root,
1394                                        struct hist_entry *he)
1395{
1396        struct rb_node **p = &root->rb_node;
1397        struct rb_node *parent = NULL;
1398        struct hist_entry *iter;
1399        int64_t cmp;
1400
1401        if (symbol_conf.report_hierarchy)
1402                return hists__hierarchy_insert_entry(hists, root, he);
1403
1404        while (*p != NULL) {
1405                parent = *p;
1406                iter = rb_entry(parent, struct hist_entry, rb_node_in);
1407
1408                cmp = hist_entry__collapse(iter, he);
1409
1410                if (!cmp) {
1411                        int ret = 0;
1412
1413                        he_stat__add_stat(&iter->stat, &he->stat);
1414                        if (symbol_conf.cumulate_callchain)
1415                                he_stat__add_stat(iter->stat_acc, he->stat_acc);
1416
1417                        if (symbol_conf.use_callchain) {
1418                                callchain_cursor_reset(&callchain_cursor);
1419                                if (callchain_merge(&callchain_cursor,
1420                                                    iter->callchain,
1421                                                    he->callchain) < 0)
1422                                        ret = -1;
1423                        }
1424                        hist_entry__delete(he);
1425                        return ret;
1426                }
1427
1428                if (cmp < 0)
1429                        p = &(*p)->rb_left;
1430                else
1431                        p = &(*p)->rb_right;
1432        }
1433        hists->nr_entries++;
1434
1435        rb_link_node(&he->rb_node_in, parent, p);
1436        rb_insert_color(&he->rb_node_in, root);
1437        return 1;
1438}
1439
1440struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1441{
1442        struct rb_root *root;
1443
1444        pthread_mutex_lock(&hists->lock);
1445
1446        root = hists->entries_in;
1447        if (++hists->entries_in > &hists->entries_in_array[1])
1448                hists->entries_in = &hists->entries_in_array[0];
1449
1450        pthread_mutex_unlock(&hists->lock);
1451
1452        return root;
1453}
1454
1455static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1456{
1457        hists__filter_entry_by_dso(hists, he);
1458        hists__filter_entry_by_thread(hists, he);
1459        hists__filter_entry_by_symbol(hists, he);
1460        hists__filter_entry_by_socket(hists, he);
1461}
1462
1463int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1464{
1465        struct rb_root *root;
1466        struct rb_node *next;
1467        struct hist_entry *n;
1468        int ret;
1469
1470        if (!hists__has(hists, need_collapse))
1471                return 0;
1472
1473        hists->nr_entries = 0;
1474
1475        root = hists__get_rotate_entries_in(hists);
1476
1477        next = rb_first(root);
1478
1479        while (next) {
1480                if (session_done())
1481                        break;
1482                n = rb_entry(next, struct hist_entry, rb_node_in);
1483                next = rb_next(&n->rb_node_in);
1484
1485                rb_erase(&n->rb_node_in, root);
1486                ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1487                if (ret < 0)
1488                        return -1;
1489
1490                if (ret) {
1491                        /*
1492                         * If it wasn't combined with one of the entries already
1493                         * collapsed, we need to apply the filters that may have
1494                         * been set by, say, the hist_browser.
1495                         */
1496                        hists__apply_filters(hists, n);
1497                }
1498                if (prog)
1499                        ui_progress__update(prog, 1);
1500        }
1501        return 0;
1502}
1503
1504static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1505{
1506        struct hists *hists = a->hists;
1507        struct perf_hpp_fmt *fmt;
1508        int64_t cmp = 0;
1509
1510        hists__for_each_sort_list(hists, fmt) {
1511                if (perf_hpp__should_skip(fmt, a->hists))
1512                        continue;
1513
1514                cmp = fmt->sort(fmt, a, b);
1515                if (cmp)
1516                        break;
1517        }
1518
1519        return cmp;
1520}
1521
1522static void hists__reset_filter_stats(struct hists *hists)
1523{
1524        hists->nr_non_filtered_entries = 0;
1525        hists->stats.total_non_filtered_period = 0;
1526}
1527
1528void hists__reset_stats(struct hists *hists)
1529{
1530        hists->nr_entries = 0;
1531        hists->stats.total_period = 0;
1532
1533        hists__reset_filter_stats(hists);
1534}
1535
1536static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1537{
1538        hists->nr_non_filtered_entries++;
1539        hists->stats.total_non_filtered_period += h->stat.period;
1540}
1541
1542void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1543{
1544        if (!h->filtered)
1545                hists__inc_filter_stats(hists, h);
1546
1547        hists->nr_entries++;
1548        hists->stats.total_period += h->stat.period;
1549}
1550
1551static void hierarchy_recalc_total_periods(struct hists *hists)
1552{
1553        struct rb_node *node;
1554        struct hist_entry *he;
1555
1556        node = rb_first(&hists->entries);
1557
1558        hists->stats.total_period = 0;
1559        hists->stats.total_non_filtered_period = 0;
1560
1561        /*
1562         * recalculate total period using top-level entries only
1563         * since lower level entries only see non-filtered entries
1564         * but upper level entries have sum of both entries.
1565         */
1566        while (node) {
1567                he = rb_entry(node, struct hist_entry, rb_node);
1568                node = rb_next(node);
1569
1570                hists->stats.total_period += he->stat.period;
1571                if (!he->filtered)
1572                        hists->stats.total_non_filtered_period += he->stat.period;
1573        }
1574}
1575
1576static void hierarchy_insert_output_entry(struct rb_root *root,
1577                                          struct hist_entry *he)
1578{
1579        struct rb_node **p = &root->rb_node;
1580        struct rb_node *parent = NULL;
1581        struct hist_entry *iter;
1582        struct perf_hpp_fmt *fmt;
1583
1584        while (*p != NULL) {
1585                parent = *p;
1586                iter = rb_entry(parent, struct hist_entry, rb_node);
1587
1588                if (hist_entry__sort(he, iter) > 0)
1589                        p = &parent->rb_left;
1590                else
1591                        p = &parent->rb_right;
1592        }
1593
1594        rb_link_node(&he->rb_node, parent, p);
1595        rb_insert_color(&he->rb_node, root);
1596
1597        /* update column width of dynamic entry */
1598        perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1599                if (perf_hpp__is_dynamic_entry(fmt))
1600                        fmt->sort(fmt, he, NULL);
1601        }
1602}
1603
1604static void hists__hierarchy_output_resort(struct hists *hists,
1605                                           struct ui_progress *prog,
1606                                           struct rb_root *root_in,
1607                                           struct rb_root *root_out,
1608                                           u64 min_callchain_hits,
1609                                           bool use_callchain)
1610{
1611        struct rb_node *node;
1612        struct hist_entry *he;
1613
1614        *root_out = RB_ROOT;
1615        node = rb_first(root_in);
1616
1617        while (node) {
1618                he = rb_entry(node, struct hist_entry, rb_node_in);
1619                node = rb_next(node);
1620
1621                hierarchy_insert_output_entry(root_out, he);
1622
1623                if (prog)
1624                        ui_progress__update(prog, 1);
1625
1626                hists->nr_entries++;
1627                if (!he->filtered) {
1628                        hists->nr_non_filtered_entries++;
1629                        hists__calc_col_len(hists, he);
1630                }
1631
1632                if (!he->leaf) {
1633                        hists__hierarchy_output_resort(hists, prog,
1634                                                       &he->hroot_in,
1635                                                       &he->hroot_out,
1636                                                       min_callchain_hits,
1637                                                       use_callchain);
1638                        continue;
1639                }
1640
1641                if (!use_callchain)
1642                        continue;
1643
1644                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1645                        u64 total = he->stat.period;
1646
1647                        if (symbol_conf.cumulate_callchain)
1648                                total = he->stat_acc->period;
1649
1650                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1651                }
1652
1653                callchain_param.sort(&he->sorted_chain, he->callchain,
1654                                     min_callchain_hits, &callchain_param);
1655        }
1656}
1657
1658static void __hists__insert_output_entry(struct rb_root *entries,
1659                                         struct hist_entry *he,
1660                                         u64 min_callchain_hits,
1661                                         bool use_callchain)
1662{
1663        struct rb_node **p = &entries->rb_node;
1664        struct rb_node *parent = NULL;
1665        struct hist_entry *iter;
1666        struct perf_hpp_fmt *fmt;
1667
1668        if (use_callchain) {
1669                if (callchain_param.mode == CHAIN_GRAPH_REL) {
1670                        u64 total = he->stat.period;
1671
1672                        if (symbol_conf.cumulate_callchain)
1673                                total = he->stat_acc->period;
1674
1675                        min_callchain_hits = total * (callchain_param.min_percent / 100);
1676                }
1677                callchain_param.sort(&he->sorted_chain, he->callchain,
1678                                      min_callchain_hits, &callchain_param);
1679        }
1680
1681        while (*p != NULL) {
1682                parent = *p;
1683                iter = rb_entry(parent, struct hist_entry, rb_node);
1684
1685                if (hist_entry__sort(he, iter) > 0)
1686                        p = &(*p)->rb_left;
1687                else
1688                        p = &(*p)->rb_right;
1689        }
1690
1691        rb_link_node(&he->rb_node, parent, p);
1692        rb_insert_color(&he->rb_node, entries);
1693
1694        perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1695                if (perf_hpp__is_dynamic_entry(fmt) &&
1696                    perf_hpp__defined_dynamic_entry(fmt, he->hists))
1697                        fmt->sort(fmt, he, NULL);  /* update column width */
1698        }
1699}
1700
1701static void output_resort(struct hists *hists, struct ui_progress *prog,
1702                          bool use_callchain, hists__resort_cb_t cb)
1703{
1704        struct rb_root *root;
1705        struct rb_node *next;
1706        struct hist_entry *n;
1707        u64 callchain_total;
1708        u64 min_callchain_hits;
1709
1710        callchain_total = hists->callchain_period;
1711        if (symbol_conf.filter_relative)
1712                callchain_total = hists->callchain_non_filtered_period;
1713
1714        min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1715
1716        hists__reset_stats(hists);
1717        hists__reset_col_len(hists);
1718
1719        if (symbol_conf.report_hierarchy) {
1720                hists__hierarchy_output_resort(hists, prog,
1721                                               &hists->entries_collapsed,
1722                                               &hists->entries,
1723                                               min_callchain_hits,
1724                                               use_callchain);
1725                hierarchy_recalc_total_periods(hists);
1726                return;
1727        }
1728
1729        if (hists__has(hists, need_collapse))
1730                root = &hists->entries_collapsed;
1731        else
1732                root = hists->entries_in;
1733
1734        next = rb_first(root);
1735        hists->entries = RB_ROOT;
1736
1737        while (next) {
1738                n = rb_entry(next, struct hist_entry, rb_node_in);
1739                next = rb_next(&n->rb_node_in);
1740
1741                if (cb && cb(n))
1742                        continue;
1743
1744                __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1745                hists__inc_stats(hists, n);
1746
1747                if (!n->filtered)
1748                        hists__calc_col_len(hists, n);
1749
1750                if (prog)
1751                        ui_progress__update(prog, 1);
1752        }
1753}
1754
1755void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1756{
1757        bool use_callchain;
1758
1759        if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1760                use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1761        else
1762                use_callchain = symbol_conf.use_callchain;
1763
1764        use_callchain |= symbol_conf.show_branchflag_count;
1765
1766        output_resort(evsel__hists(evsel), prog, use_callchain, NULL);
1767}
1768
1769void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1770{
1771        output_resort(hists, prog, symbol_conf.use_callchain, NULL);
1772}
1773
1774void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1775                             hists__resort_cb_t cb)
1776{
1777        output_resort(hists, prog, symbol_conf.use_callchain, cb);
1778}
1779
1780static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1781{
1782        if (he->leaf || hmd == HMD_FORCE_SIBLING)
1783                return false;
1784
1785        if (he->unfolded || hmd == HMD_FORCE_CHILD)
1786                return true;
1787
1788        return false;
1789}
1790
1791struct rb_node *rb_hierarchy_last(struct rb_node *node)
1792{
1793        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1794
1795        while (can_goto_child(he, HMD_NORMAL)) {
1796                node = rb_last(&he->hroot_out);
1797                he = rb_entry(node, struct hist_entry, rb_node);
1798        }
1799        return node;
1800}
1801
1802struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1803{
1804        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1805
1806        if (can_goto_child(he, hmd))
1807                node = rb_first(&he->hroot_out);
1808        else
1809                node = rb_next(node);
1810
1811        while (node == NULL) {
1812                he = he->parent_he;
1813                if (he == NULL)
1814                        break;
1815
1816                node = rb_next(&he->rb_node);
1817        }
1818        return node;
1819}
1820
1821struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1822{
1823        struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1824
1825        node = rb_prev(node);
1826        if (node)
1827                return rb_hierarchy_last(node);
1828
1829        he = he->parent_he;
1830        if (he == NULL)
1831                return NULL;
1832
1833        return &he->rb_node;
1834}
1835
1836bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1837{
1838        struct rb_node *node;
1839        struct hist_entry *child;
1840        float percent;
1841
1842        if (he->leaf)
1843                return false;
1844
1845        node = rb_first(&he->hroot_out);
1846        child = rb_entry(node, struct hist_entry, rb_node);
1847
1848        while (node && child->filtered) {
1849                node = rb_next(node);
1850                child = rb_entry(node, struct hist_entry, rb_node);
1851        }
1852
1853        if (node)
1854                percent = hist_entry__get_percent_limit(child);
1855        else
1856                percent = 0;
1857
1858        return node && percent >= limit;
1859}
1860
1861static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1862                                       enum hist_filter filter)
1863{
1864        h->filtered &= ~(1 << filter);
1865
1866        if (symbol_conf.report_hierarchy) {
1867                struct hist_entry *parent = h->parent_he;
1868
1869                while (parent) {
1870                        he_stat__add_stat(&parent->stat, &h->stat);
1871
1872                        parent->filtered &= ~(1 << filter);
1873
1874                        if (parent->filtered)
1875                                goto next;
1876
1877                        /* force fold unfiltered entry for simplicity */
1878                        parent->unfolded = false;
1879                        parent->has_no_entry = false;
1880                        parent->row_offset = 0;
1881                        parent->nr_rows = 0;
1882next:
1883                        parent = parent->parent_he;
1884                }
1885        }
1886
1887        if (h->filtered)
1888                return;
1889
1890        /* force fold unfiltered entry for simplicity */
1891        h->unfolded = false;
1892        h->has_no_entry = false;
1893        h->row_offset = 0;
1894        h->nr_rows = 0;
1895
1896        hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1897
1898        hists__inc_filter_stats(hists, h);
1899        hists__calc_col_len(hists, h);
1900}
1901
1902
1903static bool hists__filter_entry_by_dso(struct hists *hists,
1904                                       struct hist_entry *he)
1905{
1906        if (hists->dso_filter != NULL &&
1907            (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1908                he->filtered |= (1 << HIST_FILTER__DSO);
1909                return true;
1910        }
1911
1912        return false;
1913}
1914
1915static bool hists__filter_entry_by_thread(struct hists *hists,
1916                                          struct hist_entry *he)
1917{
1918        if (hists->thread_filter != NULL &&
1919            he->thread != hists->thread_filter) {
1920                he->filtered |= (1 << HIST_FILTER__THREAD);
1921                return true;
1922        }
1923
1924        return false;
1925}
1926
1927static bool hists__filter_entry_by_symbol(struct hists *hists,
1928                                          struct hist_entry *he)
1929{
1930        if (hists->symbol_filter_str != NULL &&
1931            (!he->ms.sym || strstr(he->ms.sym->name,
1932                                   hists->symbol_filter_str) == NULL)) {
1933                he->filtered |= (1 << HIST_FILTER__SYMBOL);
1934                return true;
1935        }
1936
1937        return false;
1938}
1939
1940static bool hists__filter_entry_by_socket(struct hists *hists,
1941                                          struct hist_entry *he)
1942{
1943        if ((hists->socket_filter > -1) &&
1944            (he->socket != hists->socket_filter)) {
1945                he->filtered |= (1 << HIST_FILTER__SOCKET);
1946                return true;
1947        }
1948
1949        return false;
1950}
1951
1952typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1953
1954static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1955{
1956        struct rb_node *nd;
1957
1958        hists->stats.nr_non_filtered_samples = 0;
1959
1960        hists__reset_filter_stats(hists);
1961        hists__reset_col_len(hists);
1962
1963        for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1964                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1965
1966                if (filter(hists, h))
1967                        continue;
1968
1969                hists__remove_entry_filter(hists, h, type);
1970        }
1971}
1972
1973static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1974{
1975        struct rb_node **p = &root->rb_node;
1976        struct rb_node *parent = NULL;
1977        struct hist_entry *iter;
1978        struct rb_root new_root = RB_ROOT;
1979        struct rb_node *nd;
1980
1981        while (*p != NULL) {
1982                parent = *p;
1983                iter = rb_entry(parent, struct hist_entry, rb_node);
1984
1985                if (hist_entry__sort(he, iter) > 0)
1986                        p = &(*p)->rb_left;
1987                else
1988                        p = &(*p)->rb_right;
1989        }
1990
1991        rb_link_node(&he->rb_node, parent, p);
1992        rb_insert_color(&he->rb_node, root);
1993
1994        if (he->leaf || he->filtered)
1995                return;
1996
1997        nd = rb_first(&he->hroot_out);
1998        while (nd) {
1999                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2000
2001                nd = rb_next(nd);
2002                rb_erase(&h->rb_node, &he->hroot_out);
2003
2004                resort_filtered_entry(&new_root, h);
2005        }
2006
2007        he->hroot_out = new_root;
2008}
2009
2010static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2011{
2012        struct rb_node *nd;
2013        struct rb_root new_root = RB_ROOT;
2014
2015        hists->stats.nr_non_filtered_samples = 0;
2016
2017        hists__reset_filter_stats(hists);
2018        hists__reset_col_len(hists);
2019
2020        nd = rb_first(&hists->entries);
2021        while (nd) {
2022                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2023                int ret;
2024
2025                ret = hist_entry__filter(h, type, arg);
2026
2027                /*
2028                 * case 1. non-matching type
2029                 * zero out the period, set filter marker and move to child
2030                 */
2031                if (ret < 0) {
2032                        memset(&h->stat, 0, sizeof(h->stat));
2033                        h->filtered |= (1 << type);
2034
2035                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2036                }
2037                /*
2038                 * case 2. matched type (filter out)
2039                 * set filter marker and move to next
2040                 */
2041                else if (ret == 1) {
2042                        h->filtered |= (1 << type);
2043
2044                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2045                }
2046                /*
2047                 * case 3. ok (not filtered)
2048                 * add period to hists and parents, erase the filter marker
2049                 * and move to next sibling
2050                 */
2051                else {
2052                        hists__remove_entry_filter(hists, h, type);
2053
2054                        nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2055                }
2056        }
2057
2058        hierarchy_recalc_total_periods(hists);
2059
2060        /*
2061         * resort output after applying a new filter since filter in a lower
2062         * hierarchy can change periods in a upper hierarchy.
2063         */
2064        nd = rb_first(&hists->entries);
2065        while (nd) {
2066                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2067
2068                nd = rb_next(nd);
2069                rb_erase(&h->rb_node, &hists->entries);
2070
2071                resort_filtered_entry(&new_root, h);
2072        }
2073
2074        hists->entries = new_root;
2075}
2076
2077void hists__filter_by_thread(struct hists *hists)
2078{
2079        if (symbol_conf.report_hierarchy)
2080                hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2081                                        hists->thread_filter);
2082        else
2083                hists__filter_by_type(hists, HIST_FILTER__THREAD,
2084                                      hists__filter_entry_by_thread);
2085}
2086
2087void hists__filter_by_dso(struct hists *hists)
2088{
2089        if (symbol_conf.report_hierarchy)
2090                hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2091                                        hists->dso_filter);
2092        else
2093                hists__filter_by_type(hists, HIST_FILTER__DSO,
2094                                      hists__filter_entry_by_dso);
2095}
2096
2097void hists__filter_by_symbol(struct hists *hists)
2098{
2099        if (symbol_conf.report_hierarchy)
2100                hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2101                                        hists->symbol_filter_str);
2102        else
2103                hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2104                                      hists__filter_entry_by_symbol);
2105}
2106
2107void hists__filter_by_socket(struct hists *hists)
2108{
2109        if (symbol_conf.report_hierarchy)
2110                hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2111                                        &hists->socket_filter);
2112        else
2113                hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2114                                      hists__filter_entry_by_socket);
2115}
2116
2117void events_stats__inc(struct events_stats *stats, u32 type)
2118{
2119        ++stats->nr_events[0];
2120        ++stats->nr_events[type];
2121}
2122
2123void hists__inc_nr_events(struct hists *hists, u32 type)
2124{
2125        events_stats__inc(&hists->stats, type);
2126}
2127
2128void hists__inc_nr_samples(struct hists *hists, bool filtered)
2129{
2130        events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2131        if (!filtered)
2132                hists->stats.nr_non_filtered_samples++;
2133}
2134
2135static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2136                                                 struct hist_entry *pair)
2137{
2138        struct rb_root *root;
2139        struct rb_node **p;
2140        struct rb_node *parent = NULL;
2141        struct hist_entry *he;
2142        int64_t cmp;
2143
2144        if (hists__has(hists, need_collapse))
2145                root = &hists->entries_collapsed;
2146        else
2147                root = hists->entries_in;
2148
2149        p = &root->rb_node;
2150
2151        while (*p != NULL) {
2152                parent = *p;
2153                he = rb_entry(parent, struct hist_entry, rb_node_in);
2154
2155                cmp = hist_entry__collapse(he, pair);
2156
2157                if (!cmp)
2158                        goto out;
2159
2160                if (cmp < 0)
2161                        p = &(*p)->rb_left;
2162                else
2163                        p = &(*p)->rb_right;
2164        }
2165
2166        he = hist_entry__new(pair, true);
2167        if (he) {
2168                memset(&he->stat, 0, sizeof(he->stat));
2169                he->hists = hists;
2170                if (symbol_conf.cumulate_callchain)
2171                        memset(he->stat_acc, 0, sizeof(he->stat));
2172                rb_link_node(&he->rb_node_in, parent, p);
2173                rb_insert_color(&he->rb_node_in, root);
2174                hists__inc_stats(hists, he);
2175                he->dummy = true;
2176        }
2177out:
2178        return he;
2179}
2180
2181static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2182                                                    struct rb_root *root,
2183                                                    struct hist_entry *pair)
2184{
2185        struct rb_node **p;
2186        struct rb_node *parent = NULL;
2187        struct hist_entry *he;
2188        struct perf_hpp_fmt *fmt;
2189
2190        p = &root->rb_node;
2191        while (*p != NULL) {
2192                int64_t cmp = 0;
2193
2194                parent = *p;
2195                he = rb_entry(parent, struct hist_entry, rb_node_in);
2196
2197                perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2198                        cmp = fmt->collapse(fmt, he, pair);
2199                        if (cmp)
2200                                break;
2201                }
2202                if (!cmp)
2203                        goto out;
2204
2205                if (cmp < 0)
2206                        p = &parent->rb_left;
2207                else
2208                        p = &parent->rb_right;
2209        }
2210
2211        he = hist_entry__new(pair, true);
2212        if (he) {
2213                rb_link_node(&he->rb_node_in, parent, p);
2214                rb_insert_color(&he->rb_node_in, root);
2215
2216                he->dummy = true;
2217                he->hists = hists;
2218                memset(&he->stat, 0, sizeof(he->stat));
2219                hists__inc_stats(hists, he);
2220        }
2221out:
2222        return he;
2223}
2224
2225static struct hist_entry *hists__find_entry(struct hists *hists,
2226                                            struct hist_entry *he)
2227{
2228        struct rb_node *n;
2229
2230        if (hists__has(hists, need_collapse))
2231                n = hists->entries_collapsed.rb_node;
2232        else
2233                n = hists->entries_in->rb_node;
2234
2235        while (n) {
2236                struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2237                int64_t cmp = hist_entry__collapse(iter, he);
2238
2239                if (cmp < 0)
2240                        n = n->rb_left;
2241                else if (cmp > 0)
2242                        n = n->rb_right;
2243                else
2244                        return iter;
2245        }
2246
2247        return NULL;
2248}
2249
2250static struct hist_entry *hists__find_hierarchy_entry(struct rb_root *root,
2251                                                      struct hist_entry *he)
2252{
2253        struct rb_node *n = root->rb_node;
2254
2255        while (n) {
2256                struct hist_entry *iter;
2257                struct perf_hpp_fmt *fmt;
2258                int64_t cmp = 0;
2259
2260                iter = rb_entry(n, struct hist_entry, rb_node_in);
2261                perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2262                        cmp = fmt->collapse(fmt, iter, he);
2263                        if (cmp)
2264                                break;
2265                }
2266
2267                if (cmp < 0)
2268                        n = n->rb_left;
2269                else if (cmp > 0)
2270                        n = n->rb_right;
2271                else
2272                        return iter;
2273        }
2274
2275        return NULL;
2276}
2277
2278static void hists__match_hierarchy(struct rb_root *leader_root,
2279                                   struct rb_root *other_root)
2280{
2281        struct rb_node *nd;
2282        struct hist_entry *pos, *pair;
2283
2284        for (nd = rb_first(leader_root); nd; nd = rb_next(nd)) {
2285                pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2286                pair = hists__find_hierarchy_entry(other_root, pos);
2287
2288                if (pair) {
2289                        hist_entry__add_pair(pair, pos);
2290                        hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2291                }
2292        }
2293}
2294
2295/*
2296 * Look for pairs to link to the leader buckets (hist_entries):
2297 */
2298void hists__match(struct hists *leader, struct hists *other)
2299{
2300        struct rb_root *root;
2301        struct rb_node *nd;
2302        struct hist_entry *pos, *pair;
2303
2304        if (symbol_conf.report_hierarchy) {
2305                /* hierarchy report always collapses entries */
2306                return hists__match_hierarchy(&leader->entries_collapsed,
2307                                              &other->entries_collapsed);
2308        }
2309
2310        if (hists__has(leader, need_collapse))
2311                root = &leader->entries_collapsed;
2312        else
2313                root = leader->entries_in;
2314
2315        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2316                pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2317                pair = hists__find_entry(other, pos);
2318
2319                if (pair)
2320                        hist_entry__add_pair(pair, pos);
2321        }
2322}
2323
2324static int hists__link_hierarchy(struct hists *leader_hists,
2325                                 struct hist_entry *parent,
2326                                 struct rb_root *leader_root,
2327                                 struct rb_root *other_root)
2328{
2329        struct rb_node *nd;
2330        struct hist_entry *pos, *leader;
2331
2332        for (nd = rb_first(other_root); nd; nd = rb_next(nd)) {
2333                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2334
2335                if (hist_entry__has_pairs(pos)) {
2336                        bool found = false;
2337
2338                        list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2339                                if (leader->hists == leader_hists) {
2340                                        found = true;
2341                                        break;
2342                                }
2343                        }
2344                        if (!found)
2345                                return -1;
2346                } else {
2347                        leader = add_dummy_hierarchy_entry(leader_hists,
2348                                                           leader_root, pos);
2349                        if (leader == NULL)
2350                                return -1;
2351
2352                        /* do not point parent in the pos */
2353                        leader->parent_he = parent;
2354
2355                        hist_entry__add_pair(pos, leader);
2356                }
2357
2358                if (!pos->leaf) {
2359                        if (hists__link_hierarchy(leader_hists, leader,
2360                                                  &leader->hroot_in,
2361                                                  &pos->hroot_in) < 0)
2362                                return -1;
2363                }
2364        }
2365        return 0;
2366}
2367
2368/*
2369 * Look for entries in the other hists that are not present in the leader, if
2370 * we find them, just add a dummy entry on the leader hists, with period=0,
2371 * nr_events=0, to serve as the list header.
2372 */
2373int hists__link(struct hists *leader, struct hists *other)
2374{
2375        struct rb_root *root;
2376        struct rb_node *nd;
2377        struct hist_entry *pos, *pair;
2378
2379        if (symbol_conf.report_hierarchy) {
2380                /* hierarchy report always collapses entries */
2381                return hists__link_hierarchy(leader, NULL,
2382                                             &leader->entries_collapsed,
2383                                             &other->entries_collapsed);
2384        }
2385
2386        if (hists__has(other, need_collapse))
2387                root = &other->entries_collapsed;
2388        else
2389                root = other->entries_in;
2390
2391        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2392                pos = rb_entry(nd, struct hist_entry, rb_node_in);
2393
2394                if (!hist_entry__has_pairs(pos)) {
2395                        pair = hists__add_dummy_entry(leader, pos);
2396                        if (pair == NULL)
2397                                return -1;
2398                        hist_entry__add_pair(pos, pair);
2399                }
2400        }
2401
2402        return 0;
2403}
2404
2405void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2406                          struct perf_sample *sample, bool nonany_branch_mode)
2407{
2408        struct branch_info *bi;
2409
2410        /* If we have branch cycles always annotate them. */
2411        if (bs && bs->nr && bs->entries[0].flags.cycles) {
2412                int i;
2413
2414                bi = sample__resolve_bstack(sample, al);
2415                if (bi) {
2416                        struct addr_map_symbol *prev = NULL;
2417
2418                        /*
2419                         * Ignore errors, still want to process the
2420                         * other entries.
2421                         *
2422                         * For non standard branch modes always
2423                         * force no IPC (prev == NULL)
2424                         *
2425                         * Note that perf stores branches reversed from
2426                         * program order!
2427                         */
2428                        for (i = bs->nr - 1; i >= 0; i--) {
2429                                addr_map_symbol__account_cycles(&bi[i].from,
2430                                        nonany_branch_mode ? NULL : prev,
2431                                        bi[i].flags.cycles);
2432                                prev = &bi[i].to;
2433                        }
2434                        free(bi);
2435                }
2436        }
2437}
2438
2439size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2440{
2441        struct perf_evsel *pos;
2442        size_t ret = 0;
2443
2444        evlist__for_each_entry(evlist, pos) {
2445                ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2446                ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2447        }
2448
2449        return ret;
2450}
2451
2452
2453u64 hists__total_period(struct hists *hists)
2454{
2455        return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2456                hists->stats.total_period;
2457}
2458
2459int parse_filter_percentage(const struct option *opt __maybe_unused,
2460                            const char *arg, int unset __maybe_unused)
2461{
2462        if (!strcmp(arg, "relative"))
2463                symbol_conf.filter_relative = true;
2464        else if (!strcmp(arg, "absolute"))
2465                symbol_conf.filter_relative = false;
2466        else {
2467                pr_debug("Invalid percentage: %s\n", arg);
2468                return -1;
2469        }
2470
2471        return 0;
2472}
2473
2474int perf_hist_config(const char *var, const char *value)
2475{
2476        if (!strcmp(var, "hist.percentage"))
2477                return parse_filter_percentage(NULL, value, 0);
2478
2479        return 0;
2480}
2481
2482int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2483{
2484        memset(hists, 0, sizeof(*hists));
2485        hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2486        hists->entries_in = &hists->entries_in_array[0];
2487        hists->entries_collapsed = RB_ROOT;
2488        hists->entries = RB_ROOT;
2489        pthread_mutex_init(&hists->lock, NULL);
2490        hists->socket_filter = -1;
2491        hists->hpp_list = hpp_list;
2492        INIT_LIST_HEAD(&hists->hpp_formats);
2493        return 0;
2494}
2495
2496static void hists__delete_remaining_entries(struct rb_root *root)
2497{
2498        struct rb_node *node;
2499        struct hist_entry *he;
2500
2501        while (!RB_EMPTY_ROOT(root)) {
2502                node = rb_first(root);
2503                rb_erase(node, root);
2504
2505                he = rb_entry(node, struct hist_entry, rb_node_in);
2506                hist_entry__delete(he);
2507        }
2508}
2509
2510static void hists__delete_all_entries(struct hists *hists)
2511{
2512        hists__delete_entries(hists);
2513        hists__delete_remaining_entries(&hists->entries_in_array[0]);
2514        hists__delete_remaining_entries(&hists->entries_in_array[1]);
2515        hists__delete_remaining_entries(&hists->entries_collapsed);
2516}
2517
2518static void hists_evsel__exit(struct perf_evsel *evsel)
2519{
2520        struct hists *hists = evsel__hists(evsel);
2521        struct perf_hpp_fmt *fmt, *pos;
2522        struct perf_hpp_list_node *node, *tmp;
2523
2524        hists__delete_all_entries(hists);
2525
2526        list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2527                perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2528                        list_del(&fmt->list);
2529                        free(fmt);
2530                }
2531                list_del(&node->list);
2532                free(node);
2533        }
2534}
2535
2536static int hists_evsel__init(struct perf_evsel *evsel)
2537{
2538        struct hists *hists = evsel__hists(evsel);
2539
2540        __hists__init(hists, &perf_hpp_list);
2541        return 0;
2542}
2543
2544/*
2545 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2546 * stored in the rbtree...
2547 */
2548
2549int hists__init(void)
2550{
2551        int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2552                                            hists_evsel__init,
2553                                            hists_evsel__exit);
2554        if (err)
2555                fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2556
2557        return err;
2558}
2559
2560void perf_hpp_list__init(struct perf_hpp_list *list)
2561{
2562        INIT_LIST_HEAD(&list->fields);
2563        INIT_LIST_HEAD(&list->sorts);
2564}
2565