linux/tools/perf/util/env.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "cpumap.h"
   3#include "debug.h"
   4#include "env.h"
   5#include "util/header.h"
   6#include <linux/ctype.h>
   7#include <linux/zalloc.h>
   8#include "cgroup.h"
   9#include <errno.h>
  10#include <sys/utsname.h>
  11#include <stdlib.h>
  12#include <string.h>
  13#include "strbuf.h"
  14
  15struct perf_env perf_env;
  16
  17#ifdef HAVE_LIBBPF_SUPPORT
  18#include "bpf-event.h"
  19#include <bpf/libbpf.h>
  20
  21void perf_env__insert_bpf_prog_info(struct perf_env *env,
  22                                    struct bpf_prog_info_node *info_node)
  23{
  24        __u32 prog_id = info_node->info_linear->info.id;
  25        struct bpf_prog_info_node *node;
  26        struct rb_node *parent = NULL;
  27        struct rb_node **p;
  28
  29        down_write(&env->bpf_progs.lock);
  30        p = &env->bpf_progs.infos.rb_node;
  31
  32        while (*p != NULL) {
  33                parent = *p;
  34                node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
  35                if (prog_id < node->info_linear->info.id) {
  36                        p = &(*p)->rb_left;
  37                } else if (prog_id > node->info_linear->info.id) {
  38                        p = &(*p)->rb_right;
  39                } else {
  40                        pr_debug("duplicated bpf prog info %u\n", prog_id);
  41                        goto out;
  42                }
  43        }
  44
  45        rb_link_node(&info_node->rb_node, parent, p);
  46        rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
  47        env->bpf_progs.infos_cnt++;
  48out:
  49        up_write(&env->bpf_progs.lock);
  50}
  51
  52struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
  53                                                        __u32 prog_id)
  54{
  55        struct bpf_prog_info_node *node = NULL;
  56        struct rb_node *n;
  57
  58        down_read(&env->bpf_progs.lock);
  59        n = env->bpf_progs.infos.rb_node;
  60
  61        while (n) {
  62                node = rb_entry(n, struct bpf_prog_info_node, rb_node);
  63                if (prog_id < node->info_linear->info.id)
  64                        n = n->rb_left;
  65                else if (prog_id > node->info_linear->info.id)
  66                        n = n->rb_right;
  67                else
  68                        goto out;
  69        }
  70        node = NULL;
  71
  72out:
  73        up_read(&env->bpf_progs.lock);
  74        return node;
  75}
  76
  77void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
  78{
  79        struct rb_node *parent = NULL;
  80        __u32 btf_id = btf_node->id;
  81        struct btf_node *node;
  82        struct rb_node **p;
  83
  84        down_write(&env->bpf_progs.lock);
  85        p = &env->bpf_progs.btfs.rb_node;
  86
  87        while (*p != NULL) {
  88                parent = *p;
  89                node = rb_entry(parent, struct btf_node, rb_node);
  90                if (btf_id < node->id) {
  91                        p = &(*p)->rb_left;
  92                } else if (btf_id > node->id) {
  93                        p = &(*p)->rb_right;
  94                } else {
  95                        pr_debug("duplicated btf %u\n", btf_id);
  96                        goto out;
  97                }
  98        }
  99
 100        rb_link_node(&btf_node->rb_node, parent, p);
 101        rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
 102        env->bpf_progs.btfs_cnt++;
 103out:
 104        up_write(&env->bpf_progs.lock);
 105}
 106
 107struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
 108{
 109        struct btf_node *node = NULL;
 110        struct rb_node *n;
 111
 112        down_read(&env->bpf_progs.lock);
 113        n = env->bpf_progs.btfs.rb_node;
 114
 115        while (n) {
 116                node = rb_entry(n, struct btf_node, rb_node);
 117                if (btf_id < node->id)
 118                        n = n->rb_left;
 119                else if (btf_id > node->id)
 120                        n = n->rb_right;
 121                else
 122                        goto out;
 123        }
 124        node = NULL;
 125
 126out:
 127        up_read(&env->bpf_progs.lock);
 128        return node;
 129}
 130
 131/* purge data in bpf_progs.infos tree */
 132static void perf_env__purge_bpf(struct perf_env *env)
 133{
 134        struct rb_root *root;
 135        struct rb_node *next;
 136
 137        down_write(&env->bpf_progs.lock);
 138
 139        root = &env->bpf_progs.infos;
 140        next = rb_first(root);
 141
 142        while (next) {
 143                struct bpf_prog_info_node *node;
 144
 145                node = rb_entry(next, struct bpf_prog_info_node, rb_node);
 146                next = rb_next(&node->rb_node);
 147                rb_erase(&node->rb_node, root);
 148                free(node->info_linear);
 149                free(node);
 150        }
 151
 152        env->bpf_progs.infos_cnt = 0;
 153
 154        root = &env->bpf_progs.btfs;
 155        next = rb_first(root);
 156
 157        while (next) {
 158                struct btf_node *node;
 159
 160                node = rb_entry(next, struct btf_node, rb_node);
 161                next = rb_next(&node->rb_node);
 162                rb_erase(&node->rb_node, root);
 163                free(node);
 164        }
 165
 166        env->bpf_progs.btfs_cnt = 0;
 167
 168        up_write(&env->bpf_progs.lock);
 169}
 170#else // HAVE_LIBBPF_SUPPORT
 171static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
 172{
 173}
 174#endif // HAVE_LIBBPF_SUPPORT
 175
 176void perf_env__exit(struct perf_env *env)
 177{
 178        int i;
 179
 180        perf_env__purge_bpf(env);
 181        perf_env__purge_cgroups(env);
 182        zfree(&env->hostname);
 183        zfree(&env->os_release);
 184        zfree(&env->version);
 185        zfree(&env->arch);
 186        zfree(&env->cpu_desc);
 187        zfree(&env->cpuid);
 188        zfree(&env->cmdline);
 189        zfree(&env->cmdline_argv);
 190        zfree(&env->sibling_dies);
 191        zfree(&env->sibling_cores);
 192        zfree(&env->sibling_threads);
 193        zfree(&env->pmu_mappings);
 194        zfree(&env->cpu);
 195        zfree(&env->cpu_pmu_caps);
 196        zfree(&env->numa_map);
 197
 198        for (i = 0; i < env->nr_numa_nodes; i++)
 199                perf_cpu_map__put(env->numa_nodes[i].map);
 200        zfree(&env->numa_nodes);
 201
 202        for (i = 0; i < env->caches_cnt; i++)
 203                cpu_cache_level__free(&env->caches[i]);
 204        zfree(&env->caches);
 205
 206        for (i = 0; i < env->nr_memory_nodes; i++)
 207                zfree(&env->memory_nodes[i].set);
 208        zfree(&env->memory_nodes);
 209
 210        for (i = 0; i < env->nr_hybrid_nodes; i++) {
 211                zfree(&env->hybrid_nodes[i].pmu_name);
 212                zfree(&env->hybrid_nodes[i].cpus);
 213        }
 214        zfree(&env->hybrid_nodes);
 215
 216        for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) {
 217                zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps);
 218                zfree(&env->hybrid_cpc_nodes[i].pmu_name);
 219        }
 220        zfree(&env->hybrid_cpc_nodes);
 221}
 222
 223void perf_env__init(struct perf_env *env)
 224{
 225#ifdef HAVE_LIBBPF_SUPPORT
 226        env->bpf_progs.infos = RB_ROOT;
 227        env->bpf_progs.btfs = RB_ROOT;
 228        init_rwsem(&env->bpf_progs.lock);
 229#endif
 230        env->kernel_is_64_bit = -1;
 231}
 232
 233static void perf_env__init_kernel_mode(struct perf_env *env)
 234{
 235        const char *arch = perf_env__raw_arch(env);
 236
 237        if (!strncmp(arch, "x86_64", 6) || !strncmp(arch, "aarch64", 7) ||
 238            !strncmp(arch, "arm64", 5) || !strncmp(arch, "mips64", 6) ||
 239            !strncmp(arch, "parisc64", 8) || !strncmp(arch, "riscv64", 7) ||
 240            !strncmp(arch, "s390x", 5) || !strncmp(arch, "sparc64", 7))
 241                env->kernel_is_64_bit = 1;
 242        else
 243                env->kernel_is_64_bit = 0;
 244}
 245
 246int perf_env__kernel_is_64_bit(struct perf_env *env)
 247{
 248        if (env->kernel_is_64_bit == -1)
 249                perf_env__init_kernel_mode(env);
 250
 251        return env->kernel_is_64_bit;
 252}
 253
 254int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
 255{
 256        int i;
 257
 258        /* do not include NULL termination */
 259        env->cmdline_argv = calloc(argc, sizeof(char *));
 260        if (env->cmdline_argv == NULL)
 261                goto out_enomem;
 262
 263        /*
 264         * Must copy argv contents because it gets moved around during option
 265         * parsing:
 266         */
 267        for (i = 0; i < argc ; i++) {
 268                env->cmdline_argv[i] = argv[i];
 269                if (env->cmdline_argv[i] == NULL)
 270                        goto out_free;
 271        }
 272
 273        env->nr_cmdline = argc;
 274
 275        return 0;
 276out_free:
 277        zfree(&env->cmdline_argv);
 278out_enomem:
 279        return -ENOMEM;
 280}
 281
 282int perf_env__read_cpu_topology_map(struct perf_env *env)
 283{
 284        int cpu, nr_cpus;
 285
 286        if (env->cpu != NULL)
 287                return 0;
 288
 289        if (env->nr_cpus_avail == 0)
 290                env->nr_cpus_avail = cpu__max_present_cpu();
 291
 292        nr_cpus = env->nr_cpus_avail;
 293        if (nr_cpus == -1)
 294                return -EINVAL;
 295
 296        env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
 297        if (env->cpu == NULL)
 298                return -ENOMEM;
 299
 300        for (cpu = 0; cpu < nr_cpus; ++cpu) {
 301                env->cpu[cpu].core_id   = cpu_map__get_core_id(cpu);
 302                env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
 303                env->cpu[cpu].die_id    = cpu_map__get_die_id(cpu);
 304        }
 305
 306        env->nr_cpus_avail = nr_cpus;
 307        return 0;
 308}
 309
 310int perf_env__read_pmu_mappings(struct perf_env *env)
 311{
 312        struct perf_pmu *pmu = NULL;
 313        u32 pmu_num = 0;
 314        struct strbuf sb;
 315
 316        while ((pmu = perf_pmu__scan(pmu))) {
 317                if (!pmu->name)
 318                        continue;
 319                pmu_num++;
 320        }
 321        if (!pmu_num) {
 322                pr_debug("pmu mappings not available\n");
 323                return -ENOENT;
 324        }
 325        env->nr_pmu_mappings = pmu_num;
 326
 327        if (strbuf_init(&sb, 128 * pmu_num) < 0)
 328                return -ENOMEM;
 329
 330        while ((pmu = perf_pmu__scan(pmu))) {
 331                if (!pmu->name)
 332                        continue;
 333                if (strbuf_addf(&sb, "%u:%s", pmu->type, pmu->name) < 0)
 334                        goto error;
 335                /* include a NULL character at the end */
 336                if (strbuf_add(&sb, "", 1) < 0)
 337                        goto error;
 338        }
 339
 340        env->pmu_mappings = strbuf_detach(&sb, NULL);
 341
 342        return 0;
 343
 344error:
 345        strbuf_release(&sb);
 346        return -1;
 347}
 348
 349int perf_env__read_cpuid(struct perf_env *env)
 350{
 351        char cpuid[128];
 352        int err = get_cpuid(cpuid, sizeof(cpuid));
 353
 354        if (err)
 355                return err;
 356
 357        free(env->cpuid);
 358        env->cpuid = strdup(cpuid);
 359        if (env->cpuid == NULL)
 360                return ENOMEM;
 361        return 0;
 362}
 363
 364static int perf_env__read_arch(struct perf_env *env)
 365{
 366        struct utsname uts;
 367
 368        if (env->arch)
 369                return 0;
 370
 371        if (!uname(&uts))
 372                env->arch = strdup(uts.machine);
 373
 374        return env->arch ? 0 : -ENOMEM;
 375}
 376
 377static int perf_env__read_nr_cpus_avail(struct perf_env *env)
 378{
 379        if (env->nr_cpus_avail == 0)
 380                env->nr_cpus_avail = cpu__max_present_cpu();
 381
 382        return env->nr_cpus_avail ? 0 : -ENOENT;
 383}
 384
 385const char *perf_env__raw_arch(struct perf_env *env)
 386{
 387        return env && !perf_env__read_arch(env) ? env->arch : "unknown";
 388}
 389
 390int perf_env__nr_cpus_avail(struct perf_env *env)
 391{
 392        return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
 393}
 394
 395void cpu_cache_level__free(struct cpu_cache_level *cache)
 396{
 397        zfree(&cache->type);
 398        zfree(&cache->map);
 399        zfree(&cache->size);
 400}
 401
 402/*
 403 * Return architecture name in a normalized form.
 404 * The conversion logic comes from the Makefile.
 405 */
 406static const char *normalize_arch(char *arch)
 407{
 408        if (!strcmp(arch, "x86_64"))
 409                return "x86";
 410        if (arch[0] == 'i' && arch[2] == '8' && arch[3] == '6')
 411                return "x86";
 412        if (!strcmp(arch, "sun4u") || !strncmp(arch, "sparc", 5))
 413                return "sparc";
 414        if (!strncmp(arch, "aarch64", 7) || !strncmp(arch, "arm64", 5))
 415                return "arm64";
 416        if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110"))
 417                return "arm";
 418        if (!strncmp(arch, "s390", 4))
 419                return "s390";
 420        if (!strncmp(arch, "parisc", 6))
 421                return "parisc";
 422        if (!strncmp(arch, "powerpc", 7) || !strncmp(arch, "ppc", 3))
 423                return "powerpc";
 424        if (!strncmp(arch, "mips", 4))
 425                return "mips";
 426        if (!strncmp(arch, "sh", 2) && isdigit(arch[2]))
 427                return "sh";
 428
 429        return arch;
 430}
 431
 432const char *perf_env__arch(struct perf_env *env)
 433{
 434        char *arch_name;
 435
 436        if (!env || !env->arch) { /* Assume local operation */
 437                static struct utsname uts = { .machine[0] = '\0', };
 438                if (uts.machine[0] == '\0' && uname(&uts) < 0)
 439                        return NULL;
 440                arch_name = uts.machine;
 441        } else
 442                arch_name = env->arch;
 443
 444        return normalize_arch(arch_name);
 445}
 446
 447const char *perf_env__cpuid(struct perf_env *env)
 448{
 449        int status;
 450
 451        if (!env || !env->cpuid) { /* Assume local operation */
 452                status = perf_env__read_cpuid(env);
 453                if (status)
 454                        return NULL;
 455        }
 456
 457        return env->cpuid;
 458}
 459
 460int perf_env__nr_pmu_mappings(struct perf_env *env)
 461{
 462        int status;
 463
 464        if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
 465                status = perf_env__read_pmu_mappings(env);
 466                if (status)
 467                        return 0;
 468        }
 469
 470        return env->nr_pmu_mappings;
 471}
 472
 473const char *perf_env__pmu_mappings(struct perf_env *env)
 474{
 475        int status;
 476
 477        if (!env || !env->pmu_mappings) { /* Assume local operation */
 478                status = perf_env__read_pmu_mappings(env);
 479                if (status)
 480                        return NULL;
 481        }
 482
 483        return env->pmu_mappings;
 484}
 485
 486int perf_env__numa_node(struct perf_env *env, int cpu)
 487{
 488        if (!env->nr_numa_map) {
 489                struct numa_node *nn;
 490                int i, nr = 0;
 491
 492                for (i = 0; i < env->nr_numa_nodes; i++) {
 493                        nn = &env->numa_nodes[i];
 494                        nr = max(nr, perf_cpu_map__max(nn->map));
 495                }
 496
 497                nr++;
 498
 499                /*
 500                 * We initialize the numa_map array to prepare
 501                 * it for missing cpus, which return node -1
 502                 */
 503                env->numa_map = malloc(nr * sizeof(int));
 504                if (!env->numa_map)
 505                        return -1;
 506
 507                for (i = 0; i < nr; i++)
 508                        env->numa_map[i] = -1;
 509
 510                env->nr_numa_map = nr;
 511
 512                for (i = 0; i < env->nr_numa_nodes; i++) {
 513                        int tmp, j;
 514
 515                        nn = &env->numa_nodes[i];
 516                        perf_cpu_map__for_each_cpu(j, tmp, nn->map)
 517                                env->numa_map[j] = i;
 518                }
 519        }
 520
 521        return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
 522}
 523