linux/tools/perf/util/cpumap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include "util.h"
   3#include <api/fs/fs.h>
   4#include "../perf.h"
   5#include "cpumap.h"
   6#include <assert.h>
   7#include <dirent.h>
   8#include <stdio.h>
   9#include <stdlib.h>
  10#include <linux/bitmap.h>
  11#include "asm/bug.h"
  12
  13#include "sane_ctype.h"
  14
  15static int max_cpu_num;
  16static int max_present_cpu_num;
  17static int max_node_num;
  18static int *cpunode_map;
  19
  20static struct cpu_map *cpu_map__default_new(void)
  21{
  22        struct cpu_map *cpus;
  23        int nr_cpus;
  24
  25        nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  26        if (nr_cpus < 0)
  27                return NULL;
  28
  29        cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
  30        if (cpus != NULL) {
  31                int i;
  32                for (i = 0; i < nr_cpus; ++i)
  33                        cpus->map[i] = i;
  34
  35                cpus->nr = nr_cpus;
  36                refcount_set(&cpus->refcnt, 1);
  37        }
  38
  39        return cpus;
  40}
  41
  42static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
  43{
  44        size_t payload_size = nr_cpus * sizeof(int);
  45        struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
  46
  47        if (cpus != NULL) {
  48                cpus->nr = nr_cpus;
  49                memcpy(cpus->map, tmp_cpus, payload_size);
  50                refcount_set(&cpus->refcnt, 1);
  51        }
  52
  53        return cpus;
  54}
  55
  56struct cpu_map *cpu_map__read(FILE *file)
  57{
  58        struct cpu_map *cpus = NULL;
  59        int nr_cpus = 0;
  60        int *tmp_cpus = NULL, *tmp;
  61        int max_entries = 0;
  62        int n, cpu, prev;
  63        char sep;
  64
  65        sep = 0;
  66        prev = -1;
  67        for (;;) {
  68                n = fscanf(file, "%u%c", &cpu, &sep);
  69                if (n <= 0)
  70                        break;
  71                if (prev >= 0) {
  72                        int new_max = nr_cpus + cpu - prev - 1;
  73
  74                        if (new_max >= max_entries) {
  75                                max_entries = new_max + MAX_NR_CPUS / 2;
  76                                tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  77                                if (tmp == NULL)
  78                                        goto out_free_tmp;
  79                                tmp_cpus = tmp;
  80                        }
  81
  82                        while (++prev < cpu)
  83                                tmp_cpus[nr_cpus++] = prev;
  84                }
  85                if (nr_cpus == max_entries) {
  86                        max_entries += MAX_NR_CPUS;
  87                        tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  88                        if (tmp == NULL)
  89                                goto out_free_tmp;
  90                        tmp_cpus = tmp;
  91                }
  92
  93                tmp_cpus[nr_cpus++] = cpu;
  94                if (n == 2 && sep == '-')
  95                        prev = cpu;
  96                else
  97                        prev = -1;
  98                if (n == 1 || sep == '\n')
  99                        break;
 100        }
 101
 102        if (nr_cpus > 0)
 103                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 104        else
 105                cpus = cpu_map__default_new();
 106out_free_tmp:
 107        free(tmp_cpus);
 108        return cpus;
 109}
 110
 111static struct cpu_map *cpu_map__read_all_cpu_map(void)
 112{
 113        struct cpu_map *cpus = NULL;
 114        FILE *onlnf;
 115
 116        onlnf = fopen("/sys/devices/system/cpu/online", "r");
 117        if (!onlnf)
 118                return cpu_map__default_new();
 119
 120        cpus = cpu_map__read(onlnf);
 121        fclose(onlnf);
 122        return cpus;
 123}
 124
 125struct cpu_map *cpu_map__new(const char *cpu_list)
 126{
 127        struct cpu_map *cpus = NULL;
 128        unsigned long start_cpu, end_cpu = 0;
 129        char *p = NULL;
 130        int i, nr_cpus = 0;
 131        int *tmp_cpus = NULL, *tmp;
 132        int max_entries = 0;
 133
 134        if (!cpu_list)
 135                return cpu_map__read_all_cpu_map();
 136
 137        if (!isdigit(*cpu_list))
 138                goto out;
 139
 140        while (isdigit(*cpu_list)) {
 141                p = NULL;
 142                start_cpu = strtoul(cpu_list, &p, 0);
 143                if (start_cpu >= INT_MAX
 144                    || (*p != '\0' && *p != ',' && *p != '-'))
 145                        goto invalid;
 146
 147                if (*p == '-') {
 148                        cpu_list = ++p;
 149                        p = NULL;
 150                        end_cpu = strtoul(cpu_list, &p, 0);
 151
 152                        if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
 153                                goto invalid;
 154
 155                        if (end_cpu < start_cpu)
 156                                goto invalid;
 157                } else {
 158                        end_cpu = start_cpu;
 159                }
 160
 161                for (; start_cpu <= end_cpu; start_cpu++) {
 162                        /* check for duplicates */
 163                        for (i = 0; i < nr_cpus; i++)
 164                                if (tmp_cpus[i] == (int)start_cpu)
 165                                        goto invalid;
 166
 167                        if (nr_cpus == max_entries) {
 168                                max_entries += MAX_NR_CPUS;
 169                                tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 170                                if (tmp == NULL)
 171                                        goto invalid;
 172                                tmp_cpus = tmp;
 173                        }
 174                        tmp_cpus[nr_cpus++] = (int)start_cpu;
 175                }
 176                if (*p)
 177                        ++p;
 178
 179                cpu_list = p;
 180        }
 181
 182        if (nr_cpus > 0)
 183                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 184        else
 185                cpus = cpu_map__default_new();
 186invalid:
 187        free(tmp_cpus);
 188out:
 189        return cpus;
 190}
 191
 192static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
 193{
 194        struct cpu_map *map;
 195
 196        map = cpu_map__empty_new(cpus->nr);
 197        if (map) {
 198                unsigned i;
 199
 200                for (i = 0; i < cpus->nr; i++) {
 201                        /*
 202                         * Special treatment for -1, which is not real cpu number,
 203                         * and we need to use (int) -1 to initialize map[i],
 204                         * otherwise it would become 65535.
 205                         */
 206                        if (cpus->cpu[i] == (u16) -1)
 207                                map->map[i] = -1;
 208                        else
 209                                map->map[i] = (int) cpus->cpu[i];
 210                }
 211        }
 212
 213        return map;
 214}
 215
 216static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
 217{
 218        struct cpu_map *map;
 219        int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
 220
 221        nr = bitmap_weight(mask->mask, nbits);
 222
 223        map = cpu_map__empty_new(nr);
 224        if (map) {
 225                int cpu, i = 0;
 226
 227                for_each_set_bit(cpu, mask->mask, nbits)
 228                        map->map[i++] = cpu;
 229        }
 230        return map;
 231
 232}
 233
 234struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
 235{
 236        if (data->type == PERF_CPU_MAP__CPUS)
 237                return cpu_map__from_entries((struct cpu_map_entries *)data->data);
 238        else
 239                return cpu_map__from_mask((struct cpu_map_mask *)data->data);
 240}
 241
 242size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
 243{
 244#define BUFSIZE 1024
 245        char buf[BUFSIZE];
 246
 247        cpu_map__snprint(map, buf, sizeof(buf));
 248        return fprintf(fp, "%s\n", buf);
 249#undef BUFSIZE
 250}
 251
 252struct cpu_map *cpu_map__dummy_new(void)
 253{
 254        struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
 255
 256        if (cpus != NULL) {
 257                cpus->nr = 1;
 258                cpus->map[0] = -1;
 259                refcount_set(&cpus->refcnt, 1);
 260        }
 261
 262        return cpus;
 263}
 264
 265struct cpu_map *cpu_map__empty_new(int nr)
 266{
 267        struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
 268
 269        if (cpus != NULL) {
 270                int i;
 271
 272                cpus->nr = nr;
 273                for (i = 0; i < nr; i++)
 274                        cpus->map[i] = -1;
 275
 276                refcount_set(&cpus->refcnt, 1);
 277        }
 278
 279        return cpus;
 280}
 281
 282static void cpu_map__delete(struct cpu_map *map)
 283{
 284        if (map) {
 285                WARN_ONCE(refcount_read(&map->refcnt) != 0,
 286                          "cpu_map refcnt unbalanced\n");
 287                free(map);
 288        }
 289}
 290
 291struct cpu_map *cpu_map__get(struct cpu_map *map)
 292{
 293        if (map)
 294                refcount_inc(&map->refcnt);
 295        return map;
 296}
 297
 298void cpu_map__put(struct cpu_map *map)
 299{
 300        if (map && refcount_dec_and_test(&map->refcnt))
 301                cpu_map__delete(map);
 302}
 303
 304static int cpu__get_topology_int(int cpu, const char *name, int *value)
 305{
 306        char path[PATH_MAX];
 307
 308        snprintf(path, PATH_MAX,
 309                "devices/system/cpu/cpu%d/topology/%s", cpu, name);
 310
 311        return sysfs__read_int(path, value);
 312}
 313
 314int cpu_map__get_socket_id(int cpu)
 315{
 316        int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
 317        return ret ?: value;
 318}
 319
 320int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
 321{
 322        int cpu;
 323
 324        if (idx > map->nr)
 325                return -1;
 326
 327        cpu = map->map[idx];
 328
 329        return cpu_map__get_socket_id(cpu);
 330}
 331
 332static int cmp_ids(const void *a, const void *b)
 333{
 334        return *(int *)a - *(int *)b;
 335}
 336
 337int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
 338                       int (*f)(struct cpu_map *map, int cpu, void *data),
 339                       void *data)
 340{
 341        struct cpu_map *c;
 342        int nr = cpus->nr;
 343        int cpu, s1, s2;
 344
 345        /* allocate as much as possible */
 346        c = calloc(1, sizeof(*c) + nr * sizeof(int));
 347        if (!c)
 348                return -1;
 349
 350        for (cpu = 0; cpu < nr; cpu++) {
 351                s1 = f(cpus, cpu, data);
 352                for (s2 = 0; s2 < c->nr; s2++) {
 353                        if (s1 == c->map[s2])
 354                                break;
 355                }
 356                if (s2 == c->nr) {
 357                        c->map[c->nr] = s1;
 358                        c->nr++;
 359                }
 360        }
 361        /* ensure we process id in increasing order */
 362        qsort(c->map, c->nr, sizeof(int), cmp_ids);
 363
 364        refcount_set(&c->refcnt, 1);
 365        *res = c;
 366        return 0;
 367}
 368
 369int cpu_map__get_core_id(int cpu)
 370{
 371        int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
 372        return ret ?: value;
 373}
 374
 375int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
 376{
 377        int cpu, s;
 378
 379        if (idx > map->nr)
 380                return -1;
 381
 382        cpu = map->map[idx];
 383
 384        cpu = cpu_map__get_core_id(cpu);
 385
 386        s = cpu_map__get_socket(map, idx, data);
 387        if (s == -1)
 388                return -1;
 389
 390        /*
 391         * encode socket in upper 16 bits
 392         * core_id is relative to socket, and
 393         * we need a global id. So we combine
 394         * socket+ core id
 395         */
 396        return (s << 16) | (cpu & 0xffff);
 397}
 398
 399int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
 400{
 401        return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
 402}
 403
 404int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
 405{
 406        return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
 407}
 408
 409/* setup simple routines to easily access node numbers given a cpu number */
 410static int get_max_num(char *path, int *max)
 411{
 412        size_t num;
 413        char *buf;
 414        int err = 0;
 415
 416        if (filename__read_str(path, &buf, &num))
 417                return -1;
 418
 419        buf[num] = '\0';
 420
 421        /* start on the right, to find highest node num */
 422        while (--num) {
 423                if ((buf[num] == ',') || (buf[num] == '-')) {
 424                        num++;
 425                        break;
 426                }
 427        }
 428        if (sscanf(&buf[num], "%d", max) < 1) {
 429                err = -1;
 430                goto out;
 431        }
 432
 433        /* convert from 0-based to 1-based */
 434        (*max)++;
 435
 436out:
 437        free(buf);
 438        return err;
 439}
 440
 441/* Determine highest possible cpu in the system for sparse allocation */
 442static void set_max_cpu_num(void)
 443{
 444        const char *mnt;
 445        char path[PATH_MAX];
 446        int ret = -1;
 447
 448        /* set up default */
 449        max_cpu_num = 4096;
 450        max_present_cpu_num = 4096;
 451
 452        mnt = sysfs__mountpoint();
 453        if (!mnt)
 454                goto out;
 455
 456        /* get the highest possible cpu number for a sparse allocation */
 457        ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
 458        if (ret == PATH_MAX) {
 459                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 460                goto out;
 461        }
 462
 463        ret = get_max_num(path, &max_cpu_num);
 464        if (ret)
 465                goto out;
 466
 467        /* get the highest present cpu number for a sparse allocation */
 468        ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
 469        if (ret == PATH_MAX) {
 470                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 471                goto out;
 472        }
 473
 474        ret = get_max_num(path, &max_present_cpu_num);
 475
 476out:
 477        if (ret)
 478                pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
 479}
 480
 481/* Determine highest possible node in the system for sparse allocation */
 482static void set_max_node_num(void)
 483{
 484        const char *mnt;
 485        char path[PATH_MAX];
 486        int ret = -1;
 487
 488        /* set up default */
 489        max_node_num = 8;
 490
 491        mnt = sysfs__mountpoint();
 492        if (!mnt)
 493                goto out;
 494
 495        /* get the highest possible cpu number for a sparse allocation */
 496        ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
 497        if (ret == PATH_MAX) {
 498                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 499                goto out;
 500        }
 501
 502        ret = get_max_num(path, &max_node_num);
 503
 504out:
 505        if (ret)
 506                pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
 507}
 508
 509int cpu__max_node(void)
 510{
 511        if (unlikely(!max_node_num))
 512                set_max_node_num();
 513
 514        return max_node_num;
 515}
 516
 517int cpu__max_cpu(void)
 518{
 519        if (unlikely(!max_cpu_num))
 520                set_max_cpu_num();
 521
 522        return max_cpu_num;
 523}
 524
 525int cpu__max_present_cpu(void)
 526{
 527        if (unlikely(!max_present_cpu_num))
 528                set_max_cpu_num();
 529
 530        return max_present_cpu_num;
 531}
 532
 533
 534int cpu__get_node(int cpu)
 535{
 536        if (unlikely(cpunode_map == NULL)) {
 537                pr_debug("cpu_map not initialized\n");
 538                return -1;
 539        }
 540
 541        return cpunode_map[cpu];
 542}
 543
 544static int init_cpunode_map(void)
 545{
 546        int i;
 547
 548        set_max_cpu_num();
 549        set_max_node_num();
 550
 551        cpunode_map = calloc(max_cpu_num, sizeof(int));
 552        if (!cpunode_map) {
 553                pr_err("%s: calloc failed\n", __func__);
 554                return -1;
 555        }
 556
 557        for (i = 0; i < max_cpu_num; i++)
 558                cpunode_map[i] = -1;
 559
 560        return 0;
 561}
 562
 563int cpu__setup_cpunode_map(void)
 564{
 565        struct dirent *dent1, *dent2;
 566        DIR *dir1, *dir2;
 567        unsigned int cpu, mem;
 568        char buf[PATH_MAX];
 569        char path[PATH_MAX];
 570        const char *mnt;
 571        int n;
 572
 573        /* initialize globals */
 574        if (init_cpunode_map())
 575                return -1;
 576
 577        mnt = sysfs__mountpoint();
 578        if (!mnt)
 579                return 0;
 580
 581        n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
 582        if (n == PATH_MAX) {
 583                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 584                return -1;
 585        }
 586
 587        dir1 = opendir(path);
 588        if (!dir1)
 589                return 0;
 590
 591        /* walk tree and setup map */
 592        while ((dent1 = readdir(dir1)) != NULL) {
 593                if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
 594                        continue;
 595
 596                n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
 597                if (n == PATH_MAX) {
 598                        pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 599                        continue;
 600                }
 601
 602                dir2 = opendir(buf);
 603                if (!dir2)
 604                        continue;
 605                while ((dent2 = readdir(dir2)) != NULL) {
 606                        if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
 607                                continue;
 608                        cpunode_map[cpu] = mem;
 609                }
 610                closedir(dir2);
 611        }
 612        closedir(dir1);
 613        return 0;
 614}
 615
 616bool cpu_map__has(struct cpu_map *cpus, int cpu)
 617{
 618        return cpu_map__idx(cpus, cpu) != -1;
 619}
 620
 621int cpu_map__idx(struct cpu_map *cpus, int cpu)
 622{
 623        int i;
 624
 625        for (i = 0; i < cpus->nr; ++i) {
 626                if (cpus->map[i] == cpu)
 627                        return i;
 628        }
 629
 630        return -1;
 631}
 632
 633int cpu_map__cpu(struct cpu_map *cpus, int idx)
 634{
 635        return cpus->map[idx];
 636}
 637
 638size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
 639{
 640        int i, cpu, start = -1;
 641        bool first = true;
 642        size_t ret = 0;
 643
 644#define COMMA first ? "" : ","
 645
 646        for (i = 0; i < map->nr + 1; i++) {
 647                bool last = i == map->nr;
 648
 649                cpu = last ? INT_MAX : map->map[i];
 650
 651                if (start == -1) {
 652                        start = i;
 653                        if (last) {
 654                                ret += snprintf(buf + ret, size - ret,
 655                                                "%s%d", COMMA,
 656                                                map->map[i]);
 657                        }
 658                } else if (((i - start) != (cpu - map->map[start])) || last) {
 659                        int end = i - 1;
 660
 661                        if (start == end) {
 662                                ret += snprintf(buf + ret, size - ret,
 663                                                "%s%d", COMMA,
 664                                                map->map[start]);
 665                        } else {
 666                                ret += snprintf(buf + ret, size - ret,
 667                                                "%s%d-%d", COMMA,
 668                                                map->map[start], map->map[end]);
 669                        }
 670                        first = false;
 671                        start = i;
 672                }
 673        }
 674
 675#undef COMMA
 676
 677        pr_debug("cpumask list: %s\n", buf);
 678        return ret;
 679}
 680
 681static char hex_char(unsigned char val)
 682{
 683        if (val < 10)
 684                return val + '0';
 685        if (val < 16)
 686                return val - 10 + 'a';
 687        return '?';
 688}
 689
 690size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
 691{
 692        int i, cpu;
 693        char *ptr = buf;
 694        unsigned char *bitmap;
 695        int last_cpu = cpu_map__cpu(map, map->nr - 1);
 696
 697        bitmap = zalloc((last_cpu + 7) / 8);
 698        if (bitmap == NULL) {
 699                buf[0] = '\0';
 700                return 0;
 701        }
 702
 703        for (i = 0; i < map->nr; i++) {
 704                cpu = cpu_map__cpu(map, i);
 705                bitmap[cpu / 8] |= 1 << (cpu % 8);
 706        }
 707
 708        for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
 709                unsigned char bits = bitmap[cpu / 8];
 710
 711                if (cpu % 8)
 712                        bits >>= 4;
 713                else
 714                        bits &= 0xf;
 715
 716                *ptr++ = hex_char(bits);
 717                if ((cpu % 32) == 0 && cpu > 0)
 718                        *ptr++ = ',';
 719        }
 720        *ptr = '\0';
 721        free(bitmap);
 722
 723        buf[size - 1] = '\0';
 724        return ptr - buf;
 725}
 726