linux/tools/perf/util/env.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "cpumap.h"
   3#include "env.h"
   4#include <linux/ctype.h>
   5#include <linux/zalloc.h>
   6#include "bpf-event.h"
   7#include <errno.h>
   8#include <sys/utsname.h>
   9#include <bpf/libbpf.h>
  10#include <stdlib.h>
  11
  12struct perf_env perf_env;
  13
  14void perf_env__insert_bpf_prog_info(struct perf_env *env,
  15                                    struct bpf_prog_info_node *info_node)
  16{
  17        __u32 prog_id = info_node->info_linear->info.id;
  18        struct bpf_prog_info_node *node;
  19        struct rb_node *parent = NULL;
  20        struct rb_node **p;
  21
  22        down_write(&env->bpf_progs.lock);
  23        p = &env->bpf_progs.infos.rb_node;
  24
  25        while (*p != NULL) {
  26                parent = *p;
  27                node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
  28                if (prog_id < node->info_linear->info.id) {
  29                        p = &(*p)->rb_left;
  30                } else if (prog_id > node->info_linear->info.id) {
  31                        p = &(*p)->rb_right;
  32                } else {
  33                        pr_debug("duplicated bpf prog info %u\n", prog_id);
  34                        goto out;
  35                }
  36        }
  37
  38        rb_link_node(&info_node->rb_node, parent, p);
  39        rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
  40        env->bpf_progs.infos_cnt++;
  41out:
  42        up_write(&env->bpf_progs.lock);
  43}
  44
  45struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
  46                                                        __u32 prog_id)
  47{
  48        struct bpf_prog_info_node *node = NULL;
  49        struct rb_node *n;
  50
  51        down_read(&env->bpf_progs.lock);
  52        n = env->bpf_progs.infos.rb_node;
  53
  54        while (n) {
  55                node = rb_entry(n, struct bpf_prog_info_node, rb_node);
  56                if (prog_id < node->info_linear->info.id)
  57                        n = n->rb_left;
  58                else if (prog_id > node->info_linear->info.id)
  59                        n = n->rb_right;
  60                else
  61                        goto out;
  62        }
  63        node = NULL;
  64
  65out:
  66        up_read(&env->bpf_progs.lock);
  67        return node;
  68}
  69
  70void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
  71{
  72        struct rb_node *parent = NULL;
  73        __u32 btf_id = btf_node->id;
  74        struct btf_node *node;
  75        struct rb_node **p;
  76
  77        down_write(&env->bpf_progs.lock);
  78        p = &env->bpf_progs.btfs.rb_node;
  79
  80        while (*p != NULL) {
  81                parent = *p;
  82                node = rb_entry(parent, struct btf_node, rb_node);
  83                if (btf_id < node->id) {
  84                        p = &(*p)->rb_left;
  85                } else if (btf_id > node->id) {
  86                        p = &(*p)->rb_right;
  87                } else {
  88                        pr_debug("duplicated btf %u\n", btf_id);
  89                        goto out;
  90                }
  91        }
  92
  93        rb_link_node(&btf_node->rb_node, parent, p);
  94        rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
  95        env->bpf_progs.btfs_cnt++;
  96out:
  97        up_write(&env->bpf_progs.lock);
  98}
  99
 100struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
 101{
 102        struct btf_node *node = NULL;
 103        struct rb_node *n;
 104
 105        down_read(&env->bpf_progs.lock);
 106        n = env->bpf_progs.btfs.rb_node;
 107
 108        while (n) {
 109                node = rb_entry(n, struct btf_node, rb_node);
 110                if (btf_id < node->id)
 111                        n = n->rb_left;
 112                else if (btf_id > node->id)
 113                        n = n->rb_right;
 114                else
 115                        goto out;
 116        }
 117        node = NULL;
 118
 119out:
 120        up_read(&env->bpf_progs.lock);
 121        return node;
 122}
 123
 124/* purge data in bpf_progs.infos tree */
 125static void perf_env__purge_bpf(struct perf_env *env)
 126{
 127        struct rb_root *root;
 128        struct rb_node *next;
 129
 130        down_write(&env->bpf_progs.lock);
 131
 132        root = &env->bpf_progs.infos;
 133        next = rb_first(root);
 134
 135        while (next) {
 136                struct bpf_prog_info_node *node;
 137
 138                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
 139                next = rb_next(&node->rb_node);
 140                rb_erase(&node->rb_node, root);
 141                free(node);
 142        }
 143
 144        env->bpf_progs.infos_cnt = 0;
 145
 146        root = &env->bpf_progs.btfs;
 147        next = rb_first(root);
 148
 149        while (next) {
 150                struct btf_node *node;
 151
 152                node = rb_entry(next, struct btf_node, rb_node);
 153                next = rb_next(&node->rb_node);
 154                rb_erase(&node->rb_node, root);
 155                free(node);
 156        }
 157
 158        env->bpf_progs.btfs_cnt = 0;
 159
 160        up_write(&env->bpf_progs.lock);
 161}
 162
 163void perf_env__exit(struct perf_env *env)
 164{
 165        int i;
 166
 167        perf_env__purge_bpf(env);
 168        zfree(&env->hostname);
 169        zfree(&env->os_release);
 170        zfree(&env->version);
 171        zfree(&env->arch);
 172        zfree(&env->cpu_desc);
 173        zfree(&env->cpuid);
 174        zfree(&env->cmdline);
 175        zfree(&env->cmdline_argv);
 176        zfree(&env->sibling_cores);
 177        zfree(&env->sibling_threads);
 178        zfree(&env->pmu_mappings);
 179        zfree(&env->cpu);
 180
 181        for (i = 0; i < env->nr_numa_nodes; i++)
 182                cpu_map__put(env->numa_nodes[i].map);
 183        zfree(&env->numa_nodes);
 184
 185        for (i = 0; i < env->caches_cnt; i++)
 186                cpu_cache_level__free(&env->caches[i]);
 187        zfree(&env->caches);
 188
 189        for (i = 0; i < env->nr_memory_nodes; i++)
 190                zfree(&env->memory_nodes[i].set);
 191        zfree(&env->memory_nodes);
 192}
 193
 194void perf_env__init(struct perf_env *env)
 195{
 196        env->bpf_progs.infos = RB_ROOT;
 197        env->bpf_progs.btfs = RB_ROOT;
 198        init_rwsem(&env->bpf_progs.lock);
 199}
 200
 201int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
 202{
 203        int i;
 204
 205        /* do not include NULL termination */
 206        env->cmdline_argv = calloc(argc, sizeof(char *));
 207        if (env->cmdline_argv == NULL)
 208                goto out_enomem;
 209
 210        /*
 211         * Must copy argv contents because it gets moved around during option
 212         * parsing:
 213         */
 214        for (i = 0; i < argc ; i++) {
 215                env->cmdline_argv[i] = argv[i];
 216                if (env->cmdline_argv[i] == NULL)
 217                        goto out_free;
 218        }
 219
 220        env->nr_cmdline = argc;
 221
 222        return 0;
 223out_free:
 224        zfree(&env->cmdline_argv);
 225out_enomem:
 226        return -ENOMEM;
 227}
 228
 229int perf_env__read_cpu_topology_map(struct perf_env *env)
 230{
 231        int cpu, nr_cpus;
 232
 233        if (env->cpu != NULL)
 234                return 0;
 235
 236        if (env->nr_cpus_avail == 0)
 237                env->nr_cpus_avail = cpu__max_present_cpu();
 238
 239        nr_cpus = env->nr_cpus_avail;
 240        if (nr_cpus == -1)
 241                return -EINVAL;
 242
 243        env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
 244        if (env->cpu == NULL)
 245                return -ENOMEM;
 246
 247        for (cpu = 0; cpu < nr_cpus; ++cpu) {
 248                env->cpu[cpu].core_id   = cpu_map__get_core_id(cpu);
 249                env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
 250                env->cpu[cpu].die_id    = cpu_map__get_die_id(cpu);
 251        }
 252
 253        env->nr_cpus_avail = nr_cpus;
 254        return 0;
 255}
 256
 257static int perf_env__read_arch(struct perf_env *env)
 258{
 259        struct utsname uts;
 260
 261        if (env->arch)
 262                return 0;
 263
 264        if (!uname(&uts))
 265                env->arch = strdup(uts.machine);
 266
 267        return env->arch ? 0 : -ENOMEM;
 268}
 269
 270static int perf_env__read_nr_cpus_avail(struct perf_env *env)
 271{
 272        if (env->nr_cpus_avail == 0)
 273                env->nr_cpus_avail = cpu__max_present_cpu();
 274
 275        return env->nr_cpus_avail ? 0 : -ENOENT;
 276}
 277
 278const char *perf_env__raw_arch(struct perf_env *env)
 279{
 280        return env && !perf_env__read_arch(env) ? env->arch : "unknown";
 281}
 282
 283int perf_env__nr_cpus_avail(struct perf_env *env)
 284{
 285        return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
 286}
 287
 288void cpu_cache_level__free(struct cpu_cache_level *cache)
 289{
 290        zfree(&cache->type);
 291        zfree(&cache->map);
 292        zfree(&cache->size);
 293}
 294
 295/*
 296 * Return architecture name in a normalized form.
 297 * The conversion logic comes from the Makefile.
 298 */
 299static const char *normalize_arch(char *arch)
 300{
 301        if (!strcmp(arch, "x86_64"))
 302                return "x86";
 303        if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
 304                return "x86";
 305        if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
 306                return "sparc";
 307        if (!strcmp(arch, "aarch64") || !strcmp(arch, "arm64"))
 308                return "arm64";
 309        if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
 310                return "arm";
 311        if (!strncmp(arch, "s390", 4))
 312                return "s390";
 313        if (!strncmp(arch, "parisc", 6))
 314                return "parisc";
 315        if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
 316                return "powerpc";
 317        if (!strncmp(arch, "mips", 4))
 318                return "mips";
 319        if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
 320                return "sh";
 321
 322        return arch;
 323}
 324
 325const char *perf_env__arch(struct perf_env *env)
 326{
 327        struct utsname uts;
 328        char *arch_name;
 329
 330        if (!env || !env->arch) { /* Assume local operation */
 331                if (uname(&uts) < 0)
 332                        return NULL;
 333                arch_name = uts.machine;
 334        } else
 335                arch_name = env->arch;
 336
 337        return normalize_arch(arch_name);
 338}
 339