linux/tools/perf/util/stat-shadow.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <stdio.h>
   3#include "evsel.h"
   4#include "stat.h"
   5#include "color.h"
   6#include "pmu.h"
   7#include "rblist.h"
   8#include "evlist.h"
   9#include "expr.h"
  10#include "metricgroup.h"
  11#include "cgroup.h"
  12#include "units.h"
  13#include <linux/zalloc.h>
  14#include "iostat.h"
  15
  16/*
  17 * AGGR_GLOBAL: Use CPU 0
  18 * AGGR_SOCKET: Use first CPU of socket
  19 * AGGR_DIE: Use first CPU of die
  20 * AGGR_CORE: Use first CPU of core
  21 * AGGR_NONE: Use matching CPU
  22 * AGGR_THREAD: Not supported?
  23 */
  24
  25struct runtime_stat rt_stat;
  26struct stats walltime_nsecs_stats;
  27
  28struct saved_value {
  29        struct rb_node rb_node;
  30        struct evsel *evsel;
  31        enum stat_type type;
  32        int ctx;
  33        int cpu;
  34        struct cgroup *cgrp;
  35        struct runtime_stat *stat;
  36        struct stats stats;
  37        u64 metric_total;
  38        int metric_other;
  39};
  40
  41static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
  42{
  43        struct saved_value *a = container_of(rb_node,
  44                                             struct saved_value,
  45                                             rb_node);
  46        const struct saved_value *b = entry;
  47
  48        if (a->cpu != b->cpu)
  49                return a->cpu - b->cpu;
  50
  51        /*
  52         * Previously the rbtree was used to link generic metrics.
  53         * The keys were evsel/cpu. Now the rbtree is extended to support
  54         * per-thread shadow stats. For shadow stats case, the keys
  55         * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
  56         * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
  57         */
  58        if (a->type != b->type)
  59                return a->type - b->type;
  60
  61        if (a->ctx != b->ctx)
  62                return a->ctx - b->ctx;
  63
  64        if (a->cgrp != b->cgrp)
  65                return (char *)a->cgrp < (char *)b->cgrp ? -1 : +1;
  66
  67        if (a->evsel == NULL && b->evsel == NULL) {
  68                if (a->stat == b->stat)
  69                        return 0;
  70
  71                if ((char *)a->stat < (char *)b->stat)
  72                        return -1;
  73
  74                return 1;
  75        }
  76
  77        if (a->evsel == b->evsel)
  78                return 0;
  79        if ((char *)a->evsel < (char *)b->evsel)
  80                return -1;
  81        return +1;
  82}
  83
  84static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
  85                                     const void *entry)
  86{
  87        struct saved_value *nd = malloc(sizeof(struct saved_value));
  88
  89        if (!nd)
  90                return NULL;
  91        memcpy(nd, entry, sizeof(struct saved_value));
  92        return &nd->rb_node;
  93}
  94
  95static void saved_value_delete(struct rblist *rblist __maybe_unused,
  96                               struct rb_node *rb_node)
  97{
  98        struct saved_value *v;
  99
 100        BUG_ON(!rb_node);
 101        v = container_of(rb_node, struct saved_value, rb_node);
 102        free(v);
 103}
 104
 105static struct saved_value *saved_value_lookup(struct evsel *evsel,
 106                                              int cpu,
 107                                              bool create,
 108                                              enum stat_type type,
 109                                              int ctx,
 110                                              struct runtime_stat *st,
 111                                              struct cgroup *cgrp)
 112{
 113        struct rblist *rblist;
 114        struct rb_node *nd;
 115        struct saved_value dm = {
 116                .cpu = cpu,
 117                .evsel = evsel,
 118                .type = type,
 119                .ctx = ctx,
 120                .stat = st,
 121                .cgrp = cgrp,
 122        };
 123
 124        rblist = &st->value_list;
 125
 126        /* don't use context info for clock events */
 127        if (type == STAT_NSECS)
 128                dm.ctx = 0;
 129
 130        nd = rblist__find(rblist, &dm);
 131        if (nd)
 132                return container_of(nd, struct saved_value, rb_node);
 133        if (create) {
 134                rblist__add_node(rblist, &dm);
 135                nd = rblist__find(rblist, &dm);
 136                if (nd)
 137                        return container_of(nd, struct saved_value, rb_node);
 138        }
 139        return NULL;
 140}
 141
 142void runtime_stat__init(struct runtime_stat *st)
 143{
 144        struct rblist *rblist = &st->value_list;
 145
 146        rblist__init(rblist);
 147        rblist->node_cmp = saved_value_cmp;
 148        rblist->node_new = saved_value_new;
 149        rblist->node_delete = saved_value_delete;
 150}
 151
 152void runtime_stat__exit(struct runtime_stat *st)
 153{
 154        rblist__exit(&st->value_list);
 155}
 156
 157void perf_stat__init_shadow_stats(void)
 158{
 159        runtime_stat__init(&rt_stat);
 160}
 161
 162static int evsel_context(struct evsel *evsel)
 163{
 164        int ctx = 0;
 165
 166        if (evsel->core.attr.exclude_kernel)
 167                ctx |= CTX_BIT_KERNEL;
 168        if (evsel->core.attr.exclude_user)
 169                ctx |= CTX_BIT_USER;
 170        if (evsel->core.attr.exclude_hv)
 171                ctx |= CTX_BIT_HV;
 172        if (evsel->core.attr.exclude_host)
 173                ctx |= CTX_BIT_HOST;
 174        if (evsel->core.attr.exclude_idle)
 175                ctx |= CTX_BIT_IDLE;
 176
 177        return ctx;
 178}
 179
 180static void reset_stat(struct runtime_stat *st)
 181{
 182        struct rblist *rblist;
 183        struct rb_node *pos, *next;
 184
 185        rblist = &st->value_list;
 186        next = rb_first_cached(&rblist->entries);
 187        while (next) {
 188                pos = next;
 189                next = rb_next(pos);
 190                memset(&container_of(pos, struct saved_value, rb_node)->stats,
 191                       0,
 192                       sizeof(struct stats));
 193        }
 194}
 195
 196void perf_stat__reset_shadow_stats(void)
 197{
 198        reset_stat(&rt_stat);
 199        memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
 200}
 201
 202void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
 203{
 204        reset_stat(st);
 205}
 206
 207struct runtime_stat_data {
 208        int ctx;
 209        struct cgroup *cgrp;
 210};
 211
 212static void update_runtime_stat(struct runtime_stat *st,
 213                                enum stat_type type,
 214                                int cpu, u64 count,
 215                                struct runtime_stat_data *rsd)
 216{
 217        struct saved_value *v = saved_value_lookup(NULL, cpu, true, type,
 218                                                   rsd->ctx, st, rsd->cgrp);
 219
 220        if (v)
 221                update_stats(&v->stats, count);
 222}
 223
 224/*
 225 * Update various tracking values we maintain to print
 226 * more semantic information such as miss/hit ratios,
 227 * instruction rates, etc:
 228 */
 229void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
 230                                    int cpu, struct runtime_stat *st)
 231{
 232        u64 count_ns = count;
 233        struct saved_value *v;
 234        struct runtime_stat_data rsd = {
 235                .ctx = evsel_context(counter),
 236                .cgrp = counter->cgrp,
 237        };
 238
 239        count *= counter->scale;
 240
 241        if (evsel__is_clock(counter))
 242                update_runtime_stat(st, STAT_NSECS, cpu, count_ns, &rsd);
 243        else if (evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
 244                update_runtime_stat(st, STAT_CYCLES, cpu, count, &rsd);
 245        else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
 246                update_runtime_stat(st, STAT_CYCLES_IN_TX, cpu, count, &rsd);
 247        else if (perf_stat_evsel__is(counter, TRANSACTION_START))
 248                update_runtime_stat(st, STAT_TRANSACTION, cpu, count, &rsd);
 249        else if (perf_stat_evsel__is(counter, ELISION_START))
 250                update_runtime_stat(st, STAT_ELISION, cpu, count, &rsd);
 251        else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
 252                update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
 253                                    cpu, count, &rsd);
 254        else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
 255                update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
 256                                    cpu, count, &rsd);
 257        else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
 258                update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
 259                                    cpu, count, &rsd);
 260        else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
 261                update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
 262                                    cpu, count, &rsd);
 263        else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
 264                update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
 265                                    cpu, count, &rsd);
 266        else if (perf_stat_evsel__is(counter, TOPDOWN_RETIRING))
 267                update_runtime_stat(st, STAT_TOPDOWN_RETIRING,
 268                                    cpu, count, &rsd);
 269        else if (perf_stat_evsel__is(counter, TOPDOWN_BAD_SPEC))
 270                update_runtime_stat(st, STAT_TOPDOWN_BAD_SPEC,
 271                                    cpu, count, &rsd);
 272        else if (perf_stat_evsel__is(counter, TOPDOWN_FE_BOUND))
 273                update_runtime_stat(st, STAT_TOPDOWN_FE_BOUND,
 274                                    cpu, count, &rsd);
 275        else if (perf_stat_evsel__is(counter, TOPDOWN_BE_BOUND))
 276                update_runtime_stat(st, STAT_TOPDOWN_BE_BOUND,
 277                                    cpu, count, &rsd);
 278        else if (perf_stat_evsel__is(counter, TOPDOWN_HEAVY_OPS))
 279                update_runtime_stat(st, STAT_TOPDOWN_HEAVY_OPS,
 280                                    cpu, count, &rsd);
 281        else if (perf_stat_evsel__is(counter, TOPDOWN_BR_MISPREDICT))
 282                update_runtime_stat(st, STAT_TOPDOWN_BR_MISPREDICT,
 283                                    cpu, count, &rsd);
 284        else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_LAT))
 285                update_runtime_stat(st, STAT_TOPDOWN_FETCH_LAT,
 286                                    cpu, count, &rsd);
 287        else if (perf_stat_evsel__is(counter, TOPDOWN_MEM_BOUND))
 288                update_runtime_stat(st, STAT_TOPDOWN_MEM_BOUND,
 289                                    cpu, count, &rsd);
 290        else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
 291                update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
 292                                    cpu, count, &rsd);
 293        else if (evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
 294                update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
 295                                    cpu, count, &rsd);
 296        else if (evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
 297                update_runtime_stat(st, STAT_BRANCHES, cpu, count, &rsd);
 298        else if (evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
 299                update_runtime_stat(st, STAT_CACHEREFS, cpu, count, &rsd);
 300        else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
 301                update_runtime_stat(st, STAT_L1_DCACHE, cpu, count, &rsd);
 302        else if (evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
 303                update_runtime_stat(st, STAT_L1_ICACHE, cpu, count, &rsd);
 304        else if (evsel__match(counter, HW_CACHE, HW_CACHE_LL))
 305                update_runtime_stat(st, STAT_LL_CACHE, cpu, count, &rsd);
 306        else if (evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
 307                update_runtime_stat(st, STAT_DTLB_CACHE, cpu, count, &rsd);
 308        else if (evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
 309                update_runtime_stat(st, STAT_ITLB_CACHE, cpu, count, &rsd);
 310        else if (perf_stat_evsel__is(counter, SMI_NUM))
 311                update_runtime_stat(st, STAT_SMI_NUM, cpu, count, &rsd);
 312        else if (perf_stat_evsel__is(counter, APERF))
 313                update_runtime_stat(st, STAT_APERF, cpu, count, &rsd);
 314
 315        if (counter->collect_stat) {
 316                v = saved_value_lookup(counter, cpu, true, STAT_NONE, 0, st,
 317                                       rsd.cgrp);
 318                update_stats(&v->stats, count);
 319                if (counter->metric_leader)
 320                        v->metric_total += count;
 321        } else if (counter->metric_leader) {
 322                v = saved_value_lookup(counter->metric_leader,
 323                                       cpu, true, STAT_NONE, 0, st, rsd.cgrp);
 324                v->metric_total += count;
 325                v->metric_other++;
 326        }
 327}
 328
 329/* used for get_ratio_color() */
 330enum grc_type {
 331        GRC_STALLED_CYCLES_FE,
 332        GRC_STALLED_CYCLES_BE,
 333        GRC_CACHE_MISSES,
 334        GRC_MAX_NR
 335};
 336
 337static const char *get_ratio_color(enum grc_type type, double ratio)
 338{
 339        static const double grc_table[GRC_MAX_NR][3] = {
 340                [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
 341                [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
 342                [GRC_CACHE_MISSES]      = { 20.0, 10.0, 5.0 },
 343        };
 344        const char *color = PERF_COLOR_NORMAL;
 345
 346        if (ratio > grc_table[type][0])
 347                color = PERF_COLOR_RED;
 348        else if (ratio > grc_table[type][1])
 349                color = PERF_COLOR_MAGENTA;
 350        else if (ratio > grc_table[type][2])
 351                color = PERF_COLOR_YELLOW;
 352
 353        return color;
 354}
 355
 356static struct evsel *perf_stat__find_event(struct evlist *evsel_list,
 357                                                const char *name)
 358{
 359        struct evsel *c2;
 360
 361        evlist__for_each_entry (evsel_list, c2) {
 362                if (!strcasecmp(c2->name, name) && !c2->collect_stat)
 363                        return c2;
 364        }
 365        return NULL;
 366}
 367
 368/* Mark MetricExpr target events and link events using them to them. */
 369void perf_stat__collect_metric_expr(struct evlist *evsel_list)
 370{
 371        struct evsel *counter, *leader, **metric_events, *oc;
 372        bool found;
 373        struct expr_parse_ctx ctx;
 374        struct hashmap_entry *cur;
 375        size_t bkt;
 376        int i;
 377
 378        expr__ctx_init(&ctx);
 379        evlist__for_each_entry(evsel_list, counter) {
 380                bool invalid = false;
 381
 382                leader = evsel__leader(counter);
 383                if (!counter->metric_expr)
 384                        continue;
 385
 386                expr__ctx_clear(&ctx);
 387                metric_events = counter->metric_events;
 388                if (!metric_events) {
 389                        if (expr__find_other(counter->metric_expr,
 390                                             counter->name,
 391                                             &ctx, 1) < 0)
 392                                continue;
 393
 394                        metric_events = calloc(sizeof(struct evsel *),
 395                                               hashmap__size(&ctx.ids) + 1);
 396                        if (!metric_events) {
 397                                expr__ctx_clear(&ctx);
 398                                return;
 399                        }
 400                        counter->metric_events = metric_events;
 401                }
 402
 403                i = 0;
 404                hashmap__for_each_entry((&ctx.ids), cur, bkt) {
 405                        const char *metric_name = (const char *)cur->key;
 406
 407                        found = false;
 408                        if (leader) {
 409                                /* Search in group */
 410                                for_each_group_member (oc, leader) {
 411                                        if (!strcasecmp(oc->name,
 412                                                        metric_name) &&
 413                                                !oc->collect_stat) {
 414                                                found = true;
 415                                                break;
 416                                        }
 417                                }
 418                        }
 419                        if (!found) {
 420                                /* Search ignoring groups */
 421                                oc = perf_stat__find_event(evsel_list,
 422                                                           metric_name);
 423                        }
 424                        if (!oc) {
 425                                /* Deduping one is good enough to handle duplicated PMUs. */
 426                                static char *printed;
 427
 428                                /*
 429                                 * Adding events automatically would be difficult, because
 430                                 * it would risk creating groups that are not schedulable.
 431                                 * perf stat doesn't understand all the scheduling constraints
 432                                 * of events. So we ask the user instead to add the missing
 433                                 * events.
 434                                 */
 435                                if (!printed ||
 436                                    strcasecmp(printed, metric_name)) {
 437                                        fprintf(stderr,
 438                                                "Add %s event to groups to get metric expression for %s\n",
 439                                                metric_name,
 440                                                counter->name);
 441                                        printed = strdup(metric_name);
 442                                }
 443                                invalid = true;
 444                                continue;
 445                        }
 446                        metric_events[i++] = oc;
 447                        oc->collect_stat = true;
 448                }
 449                metric_events[i] = NULL;
 450                if (invalid) {
 451                        free(metric_events);
 452                        counter->metric_events = NULL;
 453                        counter->metric_expr = NULL;
 454                }
 455        }
 456        expr__ctx_clear(&ctx);
 457}
 458
 459static double runtime_stat_avg(struct runtime_stat *st,
 460                               enum stat_type type, int cpu,
 461                               struct runtime_stat_data *rsd)
 462{
 463        struct saved_value *v;
 464
 465        v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
 466        if (!v)
 467                return 0.0;
 468
 469        return avg_stats(&v->stats);
 470}
 471
 472static double runtime_stat_n(struct runtime_stat *st,
 473                             enum stat_type type, int cpu,
 474                             struct runtime_stat_data *rsd)
 475{
 476        struct saved_value *v;
 477
 478        v = saved_value_lookup(NULL, cpu, false, type, rsd->ctx, st, rsd->cgrp);
 479        if (!v)
 480                return 0.0;
 481
 482        return v->stats.n;
 483}
 484
 485static void print_stalled_cycles_frontend(struct perf_stat_config *config,
 486                                          int cpu, double avg,
 487                                          struct perf_stat_output_ctx *out,
 488                                          struct runtime_stat *st,
 489                                          struct runtime_stat_data *rsd)
 490{
 491        double total, ratio = 0.0;
 492        const char *color;
 493
 494        total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 495
 496        if (total)
 497                ratio = avg / total * 100.0;
 498
 499        color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
 500
 501        if (ratio)
 502                out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
 503                                  ratio);
 504        else
 505                out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
 506}
 507
 508static void print_stalled_cycles_backend(struct perf_stat_config *config,
 509                                         int cpu, double avg,
 510                                         struct perf_stat_output_ctx *out,
 511                                         struct runtime_stat *st,
 512                                         struct runtime_stat_data *rsd)
 513{
 514        double total, ratio = 0.0;
 515        const char *color;
 516
 517        total = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 518
 519        if (total)
 520                ratio = avg / total * 100.0;
 521
 522        color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
 523
 524        out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
 525}
 526
 527static void print_branch_misses(struct perf_stat_config *config,
 528                                int cpu, double avg,
 529                                struct perf_stat_output_ctx *out,
 530                                struct runtime_stat *st,
 531                                struct runtime_stat_data *rsd)
 532{
 533        double total, ratio = 0.0;
 534        const char *color;
 535
 536        total = runtime_stat_avg(st, STAT_BRANCHES, cpu, rsd);
 537
 538        if (total)
 539                ratio = avg / total * 100.0;
 540
 541        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 542
 543        out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
 544}
 545
 546static void print_l1_dcache_misses(struct perf_stat_config *config,
 547                                   int cpu, double avg,
 548                                   struct perf_stat_output_ctx *out,
 549                                   struct runtime_stat *st,
 550                                   struct runtime_stat_data *rsd)
 551{
 552        double total, ratio = 0.0;
 553        const char *color;
 554
 555        total = runtime_stat_avg(st, STAT_L1_DCACHE, cpu, rsd);
 556
 557        if (total)
 558                ratio = avg / total * 100.0;
 559
 560        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 561
 562        out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
 563}
 564
 565static void print_l1_icache_misses(struct perf_stat_config *config,
 566                                   int cpu, double avg,
 567                                   struct perf_stat_output_ctx *out,
 568                                   struct runtime_stat *st,
 569                                   struct runtime_stat_data *rsd)
 570{
 571        double total, ratio = 0.0;
 572        const char *color;
 573
 574        total = runtime_stat_avg(st, STAT_L1_ICACHE, cpu, rsd);
 575
 576        if (total)
 577                ratio = avg / total * 100.0;
 578
 579        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 580        out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
 581}
 582
 583static void print_dtlb_cache_misses(struct perf_stat_config *config,
 584                                    int cpu, double avg,
 585                                    struct perf_stat_output_ctx *out,
 586                                    struct runtime_stat *st,
 587                                    struct runtime_stat_data *rsd)
 588{
 589        double total, ratio = 0.0;
 590        const char *color;
 591
 592        total = runtime_stat_avg(st, STAT_DTLB_CACHE, cpu, rsd);
 593
 594        if (total)
 595                ratio = avg / total * 100.0;
 596
 597        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 598        out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
 599}
 600
 601static void print_itlb_cache_misses(struct perf_stat_config *config,
 602                                    int cpu, double avg,
 603                                    struct perf_stat_output_ctx *out,
 604                                    struct runtime_stat *st,
 605                                    struct runtime_stat_data *rsd)
 606{
 607        double total, ratio = 0.0;
 608        const char *color;
 609
 610        total = runtime_stat_avg(st, STAT_ITLB_CACHE, cpu, rsd);
 611
 612        if (total)
 613                ratio = avg / total * 100.0;
 614
 615        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 616        out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
 617}
 618
 619static void print_ll_cache_misses(struct perf_stat_config *config,
 620                                  int cpu, double avg,
 621                                  struct perf_stat_output_ctx *out,
 622                                  struct runtime_stat *st,
 623                                  struct runtime_stat_data *rsd)
 624{
 625        double total, ratio = 0.0;
 626        const char *color;
 627
 628        total = runtime_stat_avg(st, STAT_LL_CACHE, cpu, rsd);
 629
 630        if (total)
 631                ratio = avg / total * 100.0;
 632
 633        color = get_ratio_color(GRC_CACHE_MISSES, ratio);
 634        out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
 635}
 636
 637/*
 638 * High level "TopDown" CPU core pipe line bottleneck break down.
 639 *
 640 * Basic concept following
 641 * Yasin, A Top Down Method for Performance analysis and Counter architecture
 642 * ISPASS14
 643 *
 644 * The CPU pipeline is divided into 4 areas that can be bottlenecks:
 645 *
 646 * Frontend -> Backend -> Retiring
 647 * BadSpeculation in addition means out of order execution that is thrown away
 648 * (for example branch mispredictions)
 649 * Frontend is instruction decoding.
 650 * Backend is execution, like computation and accessing data in memory
 651 * Retiring is good execution that is not directly bottlenecked
 652 *
 653 * The formulas are computed in slots.
 654 * A slot is an entry in the pipeline each for the pipeline width
 655 * (for example a 4-wide pipeline has 4 slots for each cycle)
 656 *
 657 * Formulas:
 658 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
 659 *                      TotalSlots
 660 * Retiring = SlotsRetired / TotalSlots
 661 * FrontendBound = FetchBubbles / TotalSlots
 662 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
 663 *
 664 * The kernel provides the mapping to the low level CPU events and any scaling
 665 * needed for the CPU pipeline width, for example:
 666 *
 667 * TotalSlots = Cycles * 4
 668 *
 669 * The scaling factor is communicated in the sysfs unit.
 670 *
 671 * In some cases the CPU may not be able to measure all the formulas due to
 672 * missing events. In this case multiple formulas are combined, as possible.
 673 *
 674 * Full TopDown supports more levels to sub-divide each area: for example
 675 * BackendBound into computing bound and memory bound. For now we only
 676 * support Level 1 TopDown.
 677 */
 678
 679static double sanitize_val(double x)
 680{
 681        if (x < 0 && x >= -0.02)
 682                return 0.0;
 683        return x;
 684}
 685
 686static double td_total_slots(int cpu, struct runtime_stat *st,
 687                             struct runtime_stat_data *rsd)
 688{
 689        return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, cpu, rsd);
 690}
 691
 692static double td_bad_spec(int cpu, struct runtime_stat *st,
 693                          struct runtime_stat_data *rsd)
 694{
 695        double bad_spec = 0;
 696        double total_slots;
 697        double total;
 698
 699        total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, cpu, rsd) -
 700                runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, cpu, rsd) +
 701                runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, cpu, rsd);
 702
 703        total_slots = td_total_slots(cpu, st, rsd);
 704        if (total_slots)
 705                bad_spec = total / total_slots;
 706        return sanitize_val(bad_spec);
 707}
 708
 709static double td_retiring(int cpu, struct runtime_stat *st,
 710                          struct runtime_stat_data *rsd)
 711{
 712        double retiring = 0;
 713        double total_slots = td_total_slots(cpu, st, rsd);
 714        double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
 715                                            cpu, rsd);
 716
 717        if (total_slots)
 718                retiring = ret_slots / total_slots;
 719        return retiring;
 720}
 721
 722static double td_fe_bound(int cpu, struct runtime_stat *st,
 723                          struct runtime_stat_data *rsd)
 724{
 725        double fe_bound = 0;
 726        double total_slots = td_total_slots(cpu, st, rsd);
 727        double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
 728                                            cpu, rsd);
 729
 730        if (total_slots)
 731                fe_bound = fetch_bub / total_slots;
 732        return fe_bound;
 733}
 734
 735static double td_be_bound(int cpu, struct runtime_stat *st,
 736                          struct runtime_stat_data *rsd)
 737{
 738        double sum = (td_fe_bound(cpu, st, rsd) +
 739                      td_bad_spec(cpu, st, rsd) +
 740                      td_retiring(cpu, st, rsd));
 741        if (sum == 0)
 742                return 0;
 743        return sanitize_val(1.0 - sum);
 744}
 745
 746/*
 747 * Kernel reports metrics multiplied with slots. To get back
 748 * the ratios we need to recreate the sum.
 749 */
 750
 751static double td_metric_ratio(int cpu, enum stat_type type,
 752                              struct runtime_stat *stat,
 753                              struct runtime_stat_data *rsd)
 754{
 755        double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) +
 756                runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) +
 757                runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) +
 758                runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd);
 759        double d = runtime_stat_avg(stat, type, cpu, rsd);
 760
 761        if (sum)
 762                return d / sum;
 763        return 0;
 764}
 765
 766/*
 767 * ... but only if most of the values are actually available.
 768 * We allow two missing.
 769 */
 770
 771static bool full_td(int cpu, struct runtime_stat *stat,
 772                    struct runtime_stat_data *rsd)
 773{
 774        int c = 0;
 775
 776        if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, cpu, rsd) > 0)
 777                c++;
 778        if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, cpu, rsd) > 0)
 779                c++;
 780        if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, cpu, rsd) > 0)
 781                c++;
 782        if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, cpu, rsd) > 0)
 783                c++;
 784        return c >= 2;
 785}
 786
 787static void print_smi_cost(struct perf_stat_config *config, int cpu,
 788                           struct perf_stat_output_ctx *out,
 789                           struct runtime_stat *st,
 790                           struct runtime_stat_data *rsd)
 791{
 792        double smi_num, aperf, cycles, cost = 0.0;
 793        const char *color = NULL;
 794
 795        smi_num = runtime_stat_avg(st, STAT_SMI_NUM, cpu, rsd);
 796        aperf = runtime_stat_avg(st, STAT_APERF, cpu, rsd);
 797        cycles = runtime_stat_avg(st, STAT_CYCLES, cpu, rsd);
 798
 799        if ((cycles == 0) || (aperf == 0))
 800                return;
 801
 802        if (smi_num)
 803                cost = (aperf - cycles) / aperf * 100.00;
 804
 805        if (cost > 10)
 806                color = PERF_COLOR_RED;
 807        out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
 808        out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
 809}
 810
 811static int prepare_metric(struct evsel **metric_events,
 812                          struct metric_ref *metric_refs,
 813                          struct expr_parse_ctx *pctx,
 814                          int cpu,
 815                          struct runtime_stat *st)
 816{
 817        double scale;
 818        char *n, *pn;
 819        int i, j, ret;
 820
 821        expr__ctx_init(pctx);
 822        for (i = 0; metric_events[i]; i++) {
 823                struct saved_value *v;
 824                struct stats *stats;
 825                u64 metric_total = 0;
 826
 827                if (!strcmp(metric_events[i]->name, "duration_time")) {
 828                        stats = &walltime_nsecs_stats;
 829                        scale = 1e-9;
 830                } else {
 831                        v = saved_value_lookup(metric_events[i], cpu, false,
 832                                               STAT_NONE, 0, st,
 833                                               metric_events[i]->cgrp);
 834                        if (!v)
 835                                break;
 836                        stats = &v->stats;
 837                        scale = 1.0;
 838
 839                        if (v->metric_other)
 840                                metric_total = v->metric_total;
 841                }
 842
 843                n = strdup(metric_events[i]->name);
 844                if (!n)
 845                        return -ENOMEM;
 846                /*
 847                 * This display code with --no-merge adds [cpu] postfixes.
 848                 * These are not supported by the parser. Remove everything
 849                 * after the space.
 850                 */
 851                pn = strchr(n, ' ');
 852                if (pn)
 853                        *pn = 0;
 854
 855                if (metric_total)
 856                        expr__add_id_val(pctx, n, metric_total);
 857                else
 858                        expr__add_id_val(pctx, n, avg_stats(stats)*scale);
 859        }
 860
 861        for (j = 0; metric_refs && metric_refs[j].metric_name; j++) {
 862                ret = expr__add_ref(pctx, &metric_refs[j]);
 863                if (ret)
 864                        return ret;
 865        }
 866
 867        return i;
 868}
 869
 870static void generic_metric(struct perf_stat_config *config,
 871                           const char *metric_expr,
 872                           struct evsel **metric_events,
 873                           struct metric_ref *metric_refs,
 874                           char *name,
 875                           const char *metric_name,
 876                           const char *metric_unit,
 877                           int runtime,
 878                           int cpu,
 879                           struct perf_stat_output_ctx *out,
 880                           struct runtime_stat *st)
 881{
 882        print_metric_t print_metric = out->print_metric;
 883        struct expr_parse_ctx pctx;
 884        double ratio, scale;
 885        int i;
 886        void *ctxp = out->ctx;
 887
 888        i = prepare_metric(metric_events, metric_refs, &pctx, cpu, st);
 889        if (i < 0)
 890                return;
 891
 892        if (!metric_events[i]) {
 893                if (expr__parse(&ratio, &pctx, metric_expr, runtime) == 0) {
 894                        char *unit;
 895                        char metric_bf[64];
 896
 897                        if (metric_unit && metric_name) {
 898                                if (perf_pmu__convert_scale(metric_unit,
 899                                        &unit, &scale) >= 0) {
 900                                        ratio *= scale;
 901                                }
 902                                if (strstr(metric_expr, "?"))
 903                                        scnprintf(metric_bf, sizeof(metric_bf),
 904                                          "%s  %s_%d", unit, metric_name, runtime);
 905                                else
 906                                        scnprintf(metric_bf, sizeof(metric_bf),
 907                                          "%s  %s", unit, metric_name);
 908
 909                                print_metric(config, ctxp, NULL, "%8.1f",
 910                                             metric_bf, ratio);
 911                        } else {
 912                                print_metric(config, ctxp, NULL, "%8.2f",
 913                                        metric_name ?
 914                                        metric_name :
 915                                        out->force_header ?  name : "",
 916                                        ratio);
 917                        }
 918                } else {
 919                        print_metric(config, ctxp, NULL, NULL,
 920                                     out->force_header ?
 921                                     (metric_name ? metric_name : name) : "", 0);
 922                }
 923        } else {
 924                print_metric(config, ctxp, NULL, NULL,
 925                             out->force_header ?
 926                             (metric_name ? metric_name : name) : "", 0);
 927        }
 928
 929        expr__ctx_clear(&pctx);
 930}
 931
 932double test_generic_metric(struct metric_expr *mexp, int cpu, struct runtime_stat *st)
 933{
 934        struct expr_parse_ctx pctx;
 935        double ratio = 0.0;
 936
 937        if (prepare_metric(mexp->metric_events, mexp->metric_refs, &pctx, cpu, st) < 0)
 938                goto out;
 939
 940        if (expr__parse(&ratio, &pctx, mexp->metric_expr, 1))
 941                ratio = 0.0;
 942
 943out:
 944        expr__ctx_clear(&pctx);
 945        return ratio;
 946}
 947
 948void perf_stat__print_shadow_stats(struct perf_stat_config *config,
 949                                   struct evsel *evsel,
 950                                   double avg, int cpu,
 951                                   struct perf_stat_output_ctx *out,
 952                                   struct rblist *metric_events,
 953                                   struct runtime_stat *st)
 954{
 955        void *ctxp = out->ctx;
 956        print_metric_t print_metric = out->print_metric;
 957        double total, ratio = 0.0, total2;
 958        const char *color = NULL;
 959        struct runtime_stat_data rsd = {
 960                .ctx = evsel_context(evsel),
 961                .cgrp = evsel->cgrp,
 962        };
 963        struct metric_event *me;
 964        int num = 1;
 965
 966        if (config->iostat_run) {
 967                iostat_print_metric(config, evsel, out);
 968        } else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
 969                total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
 970
 971                if (total) {
 972                        ratio = avg / total;
 973                        print_metric(config, ctxp, NULL, "%7.2f ",
 974                                        "insn per cycle", ratio);
 975                } else {
 976                        print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
 977                }
 978
 979                total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT, cpu, &rsd);
 980
 981                total = max(total, runtime_stat_avg(st,
 982                                                    STAT_STALLED_CYCLES_BACK,
 983                                                    cpu, &rsd));
 984
 985                if (total && avg) {
 986                        out->new_line(config, ctxp);
 987                        ratio = total / avg;
 988                        print_metric(config, ctxp, NULL, "%7.2f ",
 989                                        "stalled cycles per insn",
 990                                        ratio);
 991                }
 992        } else if (evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
 993                if (runtime_stat_n(st, STAT_BRANCHES, cpu, &rsd) != 0)
 994                        print_branch_misses(config, cpu, avg, out, st, &rsd);
 995                else
 996                        print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
 997        } else if (
 998                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
 999                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_L1D |
1000                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1001                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1002
1003                if (runtime_stat_n(st, STAT_L1_DCACHE, cpu, &rsd) != 0)
1004                        print_l1_dcache_misses(config, cpu, avg, out, st, &rsd);
1005                else
1006                        print_metric(config, ctxp, NULL, NULL, "of all L1-dcache accesses", 0);
1007        } else if (
1008                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1009                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_L1I |
1010                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1011                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1012
1013                if (runtime_stat_n(st, STAT_L1_ICACHE, cpu, &rsd) != 0)
1014                        print_l1_icache_misses(config, cpu, avg, out, st, &rsd);
1015                else
1016                        print_metric(config, ctxp, NULL, NULL, "of all L1-icache accesses", 0);
1017        } else if (
1018                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1019                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_DTLB |
1020                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1021                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1022
1023                if (runtime_stat_n(st, STAT_DTLB_CACHE, cpu, &rsd) != 0)
1024                        print_dtlb_cache_misses(config, cpu, avg, out, st, &rsd);
1025                else
1026                        print_metric(config, ctxp, NULL, NULL, "of all dTLB cache accesses", 0);
1027        } else if (
1028                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1029                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_ITLB |
1030                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1031                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1032
1033                if (runtime_stat_n(st, STAT_ITLB_CACHE, cpu, &rsd) != 0)
1034                        print_itlb_cache_misses(config, cpu, avg, out, st, &rsd);
1035                else
1036                        print_metric(config, ctxp, NULL, NULL, "of all iTLB cache accesses", 0);
1037        } else if (
1038                evsel->core.attr.type == PERF_TYPE_HW_CACHE &&
1039                evsel->core.attr.config ==  ( PERF_COUNT_HW_CACHE_LL |
1040                                        ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
1041                                         ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
1042
1043                if (runtime_stat_n(st, STAT_LL_CACHE, cpu, &rsd) != 0)
1044                        print_ll_cache_misses(config, cpu, avg, out, st, &rsd);
1045                else
1046                        print_metric(config, ctxp, NULL, NULL, "of all LL-cache accesses", 0);
1047        } else if (evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
1048                total = runtime_stat_avg(st, STAT_CACHEREFS, cpu, &rsd);
1049
1050                if (total)
1051                        ratio = avg * 100 / total;
1052
1053                if (runtime_stat_n(st, STAT_CACHEREFS, cpu, &rsd) != 0)
1054                        print_metric(config, ctxp, NULL, "%8.3f %%",
1055                                     "of all cache refs", ratio);
1056                else
1057                        print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
1058        } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
1059                print_stalled_cycles_frontend(config, cpu, avg, out, st, &rsd);
1060        } else if (evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
1061                print_stalled_cycles_backend(config, cpu, avg, out, st, &rsd);
1062        } else if (evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
1063                total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
1064
1065                if (total) {
1066                        ratio = avg / total;
1067                        print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
1068                } else {
1069                        print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
1070                }
1071        } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
1072                total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
1073
1074                if (total)
1075                        print_metric(config, ctxp, NULL,
1076                                        "%7.2f%%", "transactional cycles",
1077                                        100.0 * (avg / total));
1078                else
1079                        print_metric(config, ctxp, NULL, NULL, "transactional cycles",
1080                                     0);
1081        } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
1082                total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
1083                total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
1084
1085                if (total2 < avg)
1086                        total2 = avg;
1087                if (total)
1088                        print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
1089                                100.0 * ((total2-avg) / total));
1090                else
1091                        print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
1092        } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
1093                total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
1094
1095                if (avg)
1096                        ratio = total / avg;
1097
1098                if (runtime_stat_n(st, STAT_CYCLES_IN_TX, cpu, &rsd) != 0)
1099                        print_metric(config, ctxp, NULL, "%8.0f",
1100                                     "cycles / transaction", ratio);
1101                else
1102                        print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
1103                                      0);
1104        } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
1105                total = runtime_stat_avg(st, STAT_CYCLES_IN_TX, cpu, &rsd);
1106
1107                if (avg)
1108                        ratio = total / avg;
1109
1110                print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
1111        } else if (evsel__is_clock(evsel)) {
1112                if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
1113                        print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
1114                                     avg / (ratio * evsel->scale));
1115                else
1116                        print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
1117        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
1118                double fe_bound = td_fe_bound(cpu, st, &rsd);
1119
1120                if (fe_bound > 0.2)
1121                        color = PERF_COLOR_RED;
1122                print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
1123                                fe_bound * 100.);
1124        } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
1125                double retiring = td_retiring(cpu, st, &rsd);
1126
1127                if (retiring > 0.7)
1128                        color = PERF_COLOR_GREEN;
1129                print_metric(config, ctxp, color, "%8.1f%%", "retiring",
1130                                retiring * 100.);
1131        } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
1132                double bad_spec = td_bad_spec(cpu, st, &rsd);
1133
1134                if (bad_spec > 0.1)
1135                        color = PERF_COLOR_RED;
1136                print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
1137                                bad_spec * 100.);
1138        } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
1139                double be_bound = td_be_bound(cpu, st, &rsd);
1140                const char *name = "backend bound";
1141                static int have_recovery_bubbles = -1;
1142
1143                /* In case the CPU does not support topdown-recovery-bubbles */
1144                if (have_recovery_bubbles < 0)
1145                        have_recovery_bubbles = pmu_have_event("cpu",
1146                                        "topdown-recovery-bubbles");
1147                if (!have_recovery_bubbles)
1148                        name = "backend bound/bad spec";
1149
1150                if (be_bound > 0.2)
1151                        color = PERF_COLOR_RED;
1152                if (td_total_slots(cpu, st, &rsd) > 0)
1153                        print_metric(config, ctxp, color, "%8.1f%%", name,
1154                                        be_bound * 100.);
1155                else
1156                        print_metric(config, ctxp, NULL, NULL, name, 0);
1157        } else if (perf_stat_evsel__is(evsel, TOPDOWN_RETIRING) &&
1158                   full_td(cpu, st, &rsd)) {
1159                double retiring = td_metric_ratio(cpu,
1160                                                  STAT_TOPDOWN_RETIRING, st,
1161                                                  &rsd);
1162                if (retiring > 0.7)
1163                        color = PERF_COLOR_GREEN;
1164                print_metric(config, ctxp, color, "%8.1f%%", "retiring",
1165                                retiring * 100.);
1166        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FE_BOUND) &&
1167                   full_td(cpu, st, &rsd)) {
1168                double fe_bound = td_metric_ratio(cpu,
1169                                                  STAT_TOPDOWN_FE_BOUND, st,
1170                                                  &rsd);
1171                if (fe_bound > 0.2)
1172                        color = PERF_COLOR_RED;
1173                print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
1174                                fe_bound * 100.);
1175        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BE_BOUND) &&
1176                   full_td(cpu, st, &rsd)) {
1177                double be_bound = td_metric_ratio(cpu,
1178                                                  STAT_TOPDOWN_BE_BOUND, st,
1179                                                  &rsd);
1180                if (be_bound > 0.2)
1181                        color = PERF_COLOR_RED;
1182                print_metric(config, ctxp, color, "%8.1f%%", "backend bound",
1183                                be_bound * 100.);
1184        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BAD_SPEC) &&
1185                   full_td(cpu, st, &rsd)) {
1186                double bad_spec = td_metric_ratio(cpu,
1187                                                  STAT_TOPDOWN_BAD_SPEC, st,
1188                                                  &rsd);
1189                if (bad_spec > 0.1)
1190                        color = PERF_COLOR_RED;
1191                print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
1192                                bad_spec * 100.);
1193        } else if (perf_stat_evsel__is(evsel, TOPDOWN_HEAVY_OPS) &&
1194                        full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
1195                double retiring = td_metric_ratio(cpu,
1196                                                  STAT_TOPDOWN_RETIRING, st,
1197                                                  &rsd);
1198                double heavy_ops = td_metric_ratio(cpu,
1199                                                   STAT_TOPDOWN_HEAVY_OPS, st,
1200                                                   &rsd);
1201                double light_ops = retiring - heavy_ops;
1202
1203                if (retiring > 0.7 && heavy_ops > 0.1)
1204                        color = PERF_COLOR_GREEN;
1205                print_metric(config, ctxp, color, "%8.1f%%", "heavy operations",
1206                                heavy_ops * 100.);
1207                if (retiring > 0.7 && light_ops > 0.6)
1208                        color = PERF_COLOR_GREEN;
1209                else
1210                        color = NULL;
1211                print_metric(config, ctxp, color, "%8.1f%%", "light operations",
1212                                light_ops * 100.);
1213        } else if (perf_stat_evsel__is(evsel, TOPDOWN_BR_MISPREDICT) &&
1214                        full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
1215                double bad_spec = td_metric_ratio(cpu,
1216                                                  STAT_TOPDOWN_BAD_SPEC, st,
1217                                                  &rsd);
1218                double br_mis = td_metric_ratio(cpu,
1219                                                STAT_TOPDOWN_BR_MISPREDICT, st,
1220                                                &rsd);
1221                double m_clears = bad_spec - br_mis;
1222
1223                if (bad_spec > 0.1 && br_mis > 0.05)
1224                        color = PERF_COLOR_RED;
1225                print_metric(config, ctxp, color, "%8.1f%%", "branch mispredict",
1226                                br_mis * 100.);
1227                if (bad_spec > 0.1 && m_clears > 0.05)
1228                        color = PERF_COLOR_RED;
1229                else
1230                        color = NULL;
1231                print_metric(config, ctxp, color, "%8.1f%%", "machine clears",
1232                                m_clears * 100.);
1233        } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_LAT) &&
1234                        full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
1235                double fe_bound = td_metric_ratio(cpu,
1236                                                  STAT_TOPDOWN_FE_BOUND, st,
1237                                                  &rsd);
1238                double fetch_lat = td_metric_ratio(cpu,
1239                                                   STAT_TOPDOWN_FETCH_LAT, st,
1240                                                   &rsd);
1241                double fetch_bw = fe_bound - fetch_lat;
1242
1243                if (fe_bound > 0.2 && fetch_lat > 0.15)
1244                        color = PERF_COLOR_RED;
1245                print_metric(config, ctxp, color, "%8.1f%%", "fetch latency",
1246                                fetch_lat * 100.);
1247                if (fe_bound > 0.2 && fetch_bw > 0.1)
1248                        color = PERF_COLOR_RED;
1249                else
1250                        color = NULL;
1251                print_metric(config, ctxp, color, "%8.1f%%", "fetch bandwidth",
1252                                fetch_bw * 100.);
1253        } else if (perf_stat_evsel__is(evsel, TOPDOWN_MEM_BOUND) &&
1254                        full_td(cpu, st, &rsd) && (config->topdown_level > 1)) {
1255                double be_bound = td_metric_ratio(cpu,
1256                                                  STAT_TOPDOWN_BE_BOUND, st,
1257                                                  &rsd);
1258                double mem_bound = td_metric_ratio(cpu,
1259                                                   STAT_TOPDOWN_MEM_BOUND, st,
1260                                                   &rsd);
1261                double core_bound = be_bound - mem_bound;
1262
1263                if (be_bound > 0.2 && mem_bound > 0.2)
1264                        color = PERF_COLOR_RED;
1265                print_metric(config, ctxp, color, "%8.1f%%", "memory bound",
1266                                mem_bound * 100.);
1267                if (be_bound > 0.2 && core_bound > 0.1)
1268                        color = PERF_COLOR_RED;
1269                else
1270                        color = NULL;
1271                print_metric(config, ctxp, color, "%8.1f%%", "Core bound",
1272                                core_bound * 100.);
1273        } else if (evsel->metric_expr) {
1274                generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
1275                                evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
1276        } else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) {
1277                char unit = ' ';
1278                char unit_buf[10] = "/sec";
1279
1280                total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
1281                if (total)
1282                        ratio = convert_unit_double(1000000000.0 * avg / total, &unit);
1283
1284                if (unit != ' ')
1285                        snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
1286                print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
1287        } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
1288                print_smi_cost(config, cpu, out, st, &rsd);
1289        } else {
1290                num = 0;
1291        }
1292
1293        if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
1294                struct metric_expr *mexp;
1295
1296                list_for_each_entry (mexp, &me->head, nd) {
1297                        if (num++ > 0)
1298                                out->new_line(config, ctxp);
1299                        generic_metric(config, mexp->metric_expr, mexp->metric_events,
1300                                        mexp->metric_refs, evsel->name, mexp->metric_name,
1301                                        mexp->metric_unit, mexp->runtime, cpu, out, st);
1302                }
1303        }
1304        if (num == 0)
1305                print_metric(config, ctxp, NULL, NULL, NULL, 0);
1306}
1307