linux/tools/perf/util/evlist-hybrid.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <errno.h>
   3#include <inttypes.h>
   4#include "cpumap.h"
   5#include "evlist.h"
   6#include "evsel.h"
   7#include "../perf.h"
   8#include "util/pmu-hybrid.h"
   9#include "util/evlist-hybrid.h"
  10#include "debug.h"
  11#include <unistd.h>
  12#include <stdlib.h>
  13#include <linux/err.h>
  14#include <linux/string.h>
  15#include <perf/evlist.h>
  16#include <perf/evsel.h>
  17#include <perf/cpumap.h>
  18
  19int evlist__add_default_hybrid(struct evlist *evlist, bool precise)
  20{
  21        struct evsel *evsel;
  22        struct perf_pmu *pmu;
  23        __u64 config;
  24        struct perf_cpu_map *cpus;
  25
  26        perf_pmu__for_each_hybrid_pmu(pmu) {
  27                config = PERF_COUNT_HW_CPU_CYCLES |
  28                         ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT);
  29                evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
  30                                          config);
  31                if (!evsel)
  32                        return -ENOMEM;
  33
  34                cpus = perf_cpu_map__get(pmu->cpus);
  35                evsel->core.cpus = cpus;
  36                evsel->core.own_cpus = perf_cpu_map__get(cpus);
  37                evsel->pmu_name = strdup(pmu->name);
  38                evlist__add(evlist, evsel);
  39        }
  40
  41        return 0;
  42}
  43
  44static bool group_hybrid_conflict(struct evsel *leader)
  45{
  46        struct evsel *pos, *prev = NULL;
  47
  48        for_each_group_evsel(pos, leader) {
  49                if (!evsel__is_hybrid(pos))
  50                        continue;
  51
  52                if (prev && strcmp(prev->pmu_name, pos->pmu_name))
  53                        return true;
  54
  55                prev = pos;
  56        }
  57
  58        return false;
  59}
  60
  61void evlist__warn_hybrid_group(struct evlist *evlist)
  62{
  63        struct evsel *evsel;
  64
  65        evlist__for_each_entry(evlist, evsel) {
  66                if (evsel__is_group_leader(evsel) &&
  67                    evsel->core.nr_members > 1 &&
  68                    group_hybrid_conflict(evsel)) {
  69                        pr_warning("WARNING: events in group from "
  70                                   "different hybrid PMUs!\n");
  71                        return;
  72                }
  73        }
  74}
  75
  76bool evlist__has_hybrid(struct evlist *evlist)
  77{
  78        struct evsel *evsel;
  79
  80        evlist__for_each_entry(evlist, evsel) {
  81                if (evsel->pmu_name &&
  82                    perf_pmu__is_hybrid(evsel->pmu_name)) {
  83                        return true;
  84                }
  85        }
  86
  87        return false;
  88}
  89
  90int evlist__fix_hybrid_cpus(struct evlist *evlist, const char *cpu_list)
  91{
  92        struct perf_cpu_map *cpus;
  93        struct evsel *evsel, *tmp;
  94        struct perf_pmu *pmu;
  95        int ret, unmatched_count = 0, events_nr = 0;
  96
  97        if (!perf_pmu__has_hybrid() || !cpu_list)
  98                return 0;
  99
 100        cpus = perf_cpu_map__new(cpu_list);
 101        if (!cpus)
 102                return -1;
 103
 104        /*
 105         * The evsels are created with hybrid pmu's cpus. But now we
 106         * need to check and adjust the cpus of evsel by cpu_list because
 107         * cpu_list may cause conflicts with cpus of evsel. For example,
 108         * cpus of evsel is cpu0-7, but the cpu_list is cpu6-8, we need
 109         * to adjust the cpus of evsel to cpu6-7. And then propatate maps
 110         * in evlist__create_maps().
 111         */
 112        evlist__for_each_entry_safe(evlist, tmp, evsel) {
 113                struct perf_cpu_map *matched_cpus, *unmatched_cpus;
 114                char buf1[128], buf2[128];
 115
 116                pmu = perf_pmu__find_hybrid_pmu(evsel->pmu_name);
 117                if (!pmu)
 118                        continue;
 119
 120                ret = perf_pmu__cpus_match(pmu, cpus, &matched_cpus,
 121                                           &unmatched_cpus);
 122                if (ret)
 123                        goto out;
 124
 125                events_nr++;
 126
 127                if (matched_cpus->nr > 0 && (unmatched_cpus->nr > 0 ||
 128                    matched_cpus->nr < cpus->nr ||
 129                    matched_cpus->nr < pmu->cpus->nr)) {
 130                        perf_cpu_map__put(evsel->core.cpus);
 131                        perf_cpu_map__put(evsel->core.own_cpus);
 132                        evsel->core.cpus = perf_cpu_map__get(matched_cpus);
 133                        evsel->core.own_cpus = perf_cpu_map__get(matched_cpus);
 134
 135                        if (unmatched_cpus->nr > 0) {
 136                                cpu_map__snprint(matched_cpus, buf1, sizeof(buf1));
 137                                pr_warning("WARNING: use %s in '%s' for '%s', skip other cpus in list.\n",
 138                                           buf1, pmu->name, evsel->name);
 139                        }
 140                }
 141
 142                if (matched_cpus->nr == 0) {
 143                        evlist__remove(evlist, evsel);
 144                        evsel__delete(evsel);
 145
 146                        cpu_map__snprint(cpus, buf1, sizeof(buf1));
 147                        cpu_map__snprint(pmu->cpus, buf2, sizeof(buf2));
 148                        pr_warning("WARNING: %s isn't a '%s', please use a CPU list in the '%s' range (%s)\n",
 149                                   buf1, pmu->name, pmu->name, buf2);
 150                        unmatched_count++;
 151                }
 152
 153                perf_cpu_map__put(matched_cpus);
 154                perf_cpu_map__put(unmatched_cpus);
 155        }
 156
 157        ret = (unmatched_count == events_nr) ? -1 : 0;
 158out:
 159        perf_cpu_map__put(cpus);
 160        return ret;
 161}
 162