linux/tools/perf/util/cpumap.c
<<
>>
Prefs
   1#include "util.h"
   2#include <api/fs/fs.h>
   3#include "../perf.h"
   4#include "cpumap.h"
   5#include <assert.h>
   6#include <stdio.h>
   7#include <stdlib.h>
   8#include <linux/bitmap.h>
   9#include "asm/bug.h"
  10
  11static int max_cpu_num;
  12static int max_node_num;
  13static int *cpunode_map;
  14
  15static struct cpu_map *cpu_map__default_new(void)
  16{
  17        struct cpu_map *cpus;
  18        int nr_cpus;
  19
  20        nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  21        if (nr_cpus < 0)
  22                return NULL;
  23
  24        cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
  25        if (cpus != NULL) {
  26                int i;
  27                for (i = 0; i < nr_cpus; ++i)
  28                        cpus->map[i] = i;
  29
  30                cpus->nr = nr_cpus;
  31                atomic_set(&cpus->refcnt, 1);
  32        }
  33
  34        return cpus;
  35}
  36
  37static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
  38{
  39        size_t payload_size = nr_cpus * sizeof(int);
  40        struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
  41
  42        if (cpus != NULL) {
  43                cpus->nr = nr_cpus;
  44                memcpy(cpus->map, tmp_cpus, payload_size);
  45                atomic_set(&cpus->refcnt, 1);
  46        }
  47
  48        return cpus;
  49}
  50
  51struct cpu_map *cpu_map__read(FILE *file)
  52{
  53        struct cpu_map *cpus = NULL;
  54        int nr_cpus = 0;
  55        int *tmp_cpus = NULL, *tmp;
  56        int max_entries = 0;
  57        int n, cpu, prev;
  58        char sep;
  59
  60        sep = 0;
  61        prev = -1;
  62        for (;;) {
  63                n = fscanf(file, "%u%c", &cpu, &sep);
  64                if (n <= 0)
  65                        break;
  66                if (prev >= 0) {
  67                        int new_max = nr_cpus + cpu - prev - 1;
  68
  69                        if (new_max >= max_entries) {
  70                                max_entries = new_max + MAX_NR_CPUS / 2;
  71                                tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  72                                if (tmp == NULL)
  73                                        goto out_free_tmp;
  74                                tmp_cpus = tmp;
  75                        }
  76
  77                        while (++prev < cpu)
  78                                tmp_cpus[nr_cpus++] = prev;
  79                }
  80                if (nr_cpus == max_entries) {
  81                        max_entries += MAX_NR_CPUS;
  82                        tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  83                        if (tmp == NULL)
  84                                goto out_free_tmp;
  85                        tmp_cpus = tmp;
  86                }
  87
  88                tmp_cpus[nr_cpus++] = cpu;
  89                if (n == 2 && sep == '-')
  90                        prev = cpu;
  91                else
  92                        prev = -1;
  93                if (n == 1 || sep == '\n')
  94                        break;
  95        }
  96
  97        if (nr_cpus > 0)
  98                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
  99        else
 100                cpus = cpu_map__default_new();
 101out_free_tmp:
 102        free(tmp_cpus);
 103        return cpus;
 104}
 105
 106static struct cpu_map *cpu_map__read_all_cpu_map(void)
 107{
 108        struct cpu_map *cpus = NULL;
 109        FILE *onlnf;
 110
 111        onlnf = fopen("/sys/devices/system/cpu/online", "r");
 112        if (!onlnf)
 113                return cpu_map__default_new();
 114
 115        cpus = cpu_map__read(onlnf);
 116        fclose(onlnf);
 117        return cpus;
 118}
 119
 120struct cpu_map *cpu_map__new(const char *cpu_list)
 121{
 122        struct cpu_map *cpus = NULL;
 123        unsigned long start_cpu, end_cpu = 0;
 124        char *p = NULL;
 125        int i, nr_cpus = 0;
 126        int *tmp_cpus = NULL, *tmp;
 127        int max_entries = 0;
 128
 129        if (!cpu_list)
 130                return cpu_map__read_all_cpu_map();
 131
 132        if (!isdigit(*cpu_list))
 133                goto out;
 134
 135        while (isdigit(*cpu_list)) {
 136                p = NULL;
 137                start_cpu = strtoul(cpu_list, &p, 0);
 138                if (start_cpu >= INT_MAX
 139                    || (*p != '\0' && *p != ',' && *p != '-'))
 140                        goto invalid;
 141
 142                if (*p == '-') {
 143                        cpu_list = ++p;
 144                        p = NULL;
 145                        end_cpu = strtoul(cpu_list, &p, 0);
 146
 147                        if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
 148                                goto invalid;
 149
 150                        if (end_cpu < start_cpu)
 151                                goto invalid;
 152                } else {
 153                        end_cpu = start_cpu;
 154                }
 155
 156                for (; start_cpu <= end_cpu; start_cpu++) {
 157                        /* check for duplicates */
 158                        for (i = 0; i < nr_cpus; i++)
 159                                if (tmp_cpus[i] == (int)start_cpu)
 160                                        goto invalid;
 161
 162                        if (nr_cpus == max_entries) {
 163                                max_entries += MAX_NR_CPUS;
 164                                tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 165                                if (tmp == NULL)
 166                                        goto invalid;
 167                                tmp_cpus = tmp;
 168                        }
 169                        tmp_cpus[nr_cpus++] = (int)start_cpu;
 170                }
 171                if (*p)
 172                        ++p;
 173
 174                cpu_list = p;
 175        }
 176
 177        if (nr_cpus > 0)
 178                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 179        else
 180                cpus = cpu_map__default_new();
 181invalid:
 182        free(tmp_cpus);
 183out:
 184        return cpus;
 185}
 186
 187static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
 188{
 189        struct cpu_map *map;
 190
 191        map = cpu_map__empty_new(cpus->nr);
 192        if (map) {
 193                unsigned i;
 194
 195                for (i = 0; i < cpus->nr; i++) {
 196                        /*
 197                         * Special treatment for -1, which is not real cpu number,
 198                         * and we need to use (int) -1 to initialize map[i],
 199                         * otherwise it would become 65535.
 200                         */
 201                        if (cpus->cpu[i] == (u16) -1)
 202                                map->map[i] = -1;
 203                        else
 204                                map->map[i] = (int) cpus->cpu[i];
 205                }
 206        }
 207
 208        return map;
 209}
 210
 211static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
 212{
 213        struct cpu_map *map;
 214        int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
 215
 216        nr = bitmap_weight(mask->mask, nbits);
 217
 218        map = cpu_map__empty_new(nr);
 219        if (map) {
 220                int cpu, i = 0;
 221
 222                for_each_set_bit(cpu, mask->mask, nbits)
 223                        map->map[i++] = cpu;
 224        }
 225        return map;
 226
 227}
 228
 229struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
 230{
 231        if (data->type == PERF_CPU_MAP__CPUS)
 232                return cpu_map__from_entries((struct cpu_map_entries *)data->data);
 233        else
 234                return cpu_map__from_mask((struct cpu_map_mask *)data->data);
 235}
 236
 237size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
 238{
 239#define BUFSIZE 1024
 240        char buf[BUFSIZE];
 241
 242        cpu_map__snprint(map, buf, sizeof(buf));
 243        return fprintf(fp, "%s\n", buf);
 244#undef BUFSIZE
 245}
 246
 247struct cpu_map *cpu_map__dummy_new(void)
 248{
 249        struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
 250
 251        if (cpus != NULL) {
 252                cpus->nr = 1;
 253                cpus->map[0] = -1;
 254                atomic_set(&cpus->refcnt, 1);
 255        }
 256
 257        return cpus;
 258}
 259
 260struct cpu_map *cpu_map__empty_new(int nr)
 261{
 262        struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
 263
 264        if (cpus != NULL) {
 265                int i;
 266
 267                cpus->nr = nr;
 268                for (i = 0; i < nr; i++)
 269                        cpus->map[i] = -1;
 270
 271                atomic_set(&cpus->refcnt, 1);
 272        }
 273
 274        return cpus;
 275}
 276
 277static void cpu_map__delete(struct cpu_map *map)
 278{
 279        if (map) {
 280                WARN_ONCE(atomic_read(&map->refcnt) != 0,
 281                          "cpu_map refcnt unbalanced\n");
 282                free(map);
 283        }
 284}
 285
 286struct cpu_map *cpu_map__get(struct cpu_map *map)
 287{
 288        if (map)
 289                atomic_inc(&map->refcnt);
 290        return map;
 291}
 292
 293void cpu_map__put(struct cpu_map *map)
 294{
 295        if (map && atomic_dec_and_test(&map->refcnt))
 296                cpu_map__delete(map);
 297}
 298
 299static int cpu__get_topology_int(int cpu, const char *name, int *value)
 300{
 301        char path[PATH_MAX];
 302
 303        snprintf(path, PATH_MAX,
 304                "devices/system/cpu/cpu%d/topology/%s", cpu, name);
 305
 306        return sysfs__read_int(path, value);
 307}
 308
 309int cpu_map__get_socket_id(int cpu)
 310{
 311        int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
 312        return ret ?: value;
 313}
 314
 315int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
 316{
 317        int cpu;
 318
 319        if (idx > map->nr)
 320                return -1;
 321
 322        cpu = map->map[idx];
 323
 324        return cpu_map__get_socket_id(cpu);
 325}
 326
 327static int cmp_ids(const void *a, const void *b)
 328{
 329        return *(int *)a - *(int *)b;
 330}
 331
 332int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
 333                       int (*f)(struct cpu_map *map, int cpu, void *data),
 334                       void *data)
 335{
 336        struct cpu_map *c;
 337        int nr = cpus->nr;
 338        int cpu, s1, s2;
 339
 340        /* allocate as much as possible */
 341        c = calloc(1, sizeof(*c) + nr * sizeof(int));
 342        if (!c)
 343                return -1;
 344
 345        for (cpu = 0; cpu < nr; cpu++) {
 346                s1 = f(cpus, cpu, data);
 347                for (s2 = 0; s2 < c->nr; s2++) {
 348                        if (s1 == c->map[s2])
 349                                break;
 350                }
 351                if (s2 == c->nr) {
 352                        c->map[c->nr] = s1;
 353                        c->nr++;
 354                }
 355        }
 356        /* ensure we process id in increasing order */
 357        qsort(c->map, c->nr, sizeof(int), cmp_ids);
 358
 359        atomic_set(&c->refcnt, 1);
 360        *res = c;
 361        return 0;
 362}
 363
 364int cpu_map__get_core_id(int cpu)
 365{
 366        int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
 367        return ret ?: value;
 368}
 369
 370int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
 371{
 372        int cpu, s;
 373
 374        if (idx > map->nr)
 375                return -1;
 376
 377        cpu = map->map[idx];
 378
 379        cpu = cpu_map__get_core_id(cpu);
 380
 381        s = cpu_map__get_socket(map, idx, data);
 382        if (s == -1)
 383                return -1;
 384
 385        /*
 386         * encode socket in upper 16 bits
 387         * core_id is relative to socket, and
 388         * we need a global id. So we combine
 389         * socket+ core id
 390         */
 391        return (s << 16) | (cpu & 0xffff);
 392}
 393
 394int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
 395{
 396        return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
 397}
 398
 399int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
 400{
 401        return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
 402}
 403
 404/* setup simple routines to easily access node numbers given a cpu number */
 405static int get_max_num(char *path, int *max)
 406{
 407        size_t num;
 408        char *buf;
 409        int err = 0;
 410
 411        if (filename__read_str(path, &buf, &num))
 412                return -1;
 413
 414        buf[num] = '\0';
 415
 416        /* start on the right, to find highest node num */
 417        while (--num) {
 418                if ((buf[num] == ',') || (buf[num] == '-')) {
 419                        num++;
 420                        break;
 421                }
 422        }
 423        if (sscanf(&buf[num], "%d", max) < 1) {
 424                err = -1;
 425                goto out;
 426        }
 427
 428        /* convert from 0-based to 1-based */
 429        (*max)++;
 430
 431out:
 432        free(buf);
 433        return err;
 434}
 435
 436/* Determine highest possible cpu in the system for sparse allocation */
 437static void set_max_cpu_num(void)
 438{
 439        const char *mnt;
 440        char path[PATH_MAX];
 441        int ret = -1;
 442
 443        /* set up default */
 444        max_cpu_num = 4096;
 445
 446        mnt = sysfs__mountpoint();
 447        if (!mnt)
 448                goto out;
 449
 450        /* get the highest possible cpu number for a sparse allocation */
 451        ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
 452        if (ret == PATH_MAX) {
 453                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 454                goto out;
 455        }
 456
 457        ret = get_max_num(path, &max_cpu_num);
 458
 459out:
 460        if (ret)
 461                pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
 462}
 463
 464/* Determine highest possible node in the system for sparse allocation */
 465static void set_max_node_num(void)
 466{
 467        const char *mnt;
 468        char path[PATH_MAX];
 469        int ret = -1;
 470
 471        /* set up default */
 472        max_node_num = 8;
 473
 474        mnt = sysfs__mountpoint();
 475        if (!mnt)
 476                goto out;
 477
 478        /* get the highest possible cpu number for a sparse allocation */
 479        ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
 480        if (ret == PATH_MAX) {
 481                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 482                goto out;
 483        }
 484
 485        ret = get_max_num(path, &max_node_num);
 486
 487out:
 488        if (ret)
 489                pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
 490}
 491
 492int cpu__max_node(void)
 493{
 494        if (unlikely(!max_node_num))
 495                set_max_node_num();
 496
 497        return max_node_num;
 498}
 499
 500int cpu__max_cpu(void)
 501{
 502        if (unlikely(!max_cpu_num))
 503                set_max_cpu_num();
 504
 505        return max_cpu_num;
 506}
 507
 508int cpu__get_node(int cpu)
 509{
 510        if (unlikely(cpunode_map == NULL)) {
 511                pr_debug("cpu_map not initialized\n");
 512                return -1;
 513        }
 514
 515        return cpunode_map[cpu];
 516}
 517
 518static int init_cpunode_map(void)
 519{
 520        int i;
 521
 522        set_max_cpu_num();
 523        set_max_node_num();
 524
 525        cpunode_map = calloc(max_cpu_num, sizeof(int));
 526        if (!cpunode_map) {
 527                pr_err("%s: calloc failed\n", __func__);
 528                return -1;
 529        }
 530
 531        for (i = 0; i < max_cpu_num; i++)
 532                cpunode_map[i] = -1;
 533
 534        return 0;
 535}
 536
 537int cpu__setup_cpunode_map(void)
 538{
 539        struct dirent *dent1, *dent2;
 540        DIR *dir1, *dir2;
 541        unsigned int cpu, mem;
 542        char buf[PATH_MAX];
 543        char path[PATH_MAX];
 544        const char *mnt;
 545        int n;
 546
 547        /* initialize globals */
 548        if (init_cpunode_map())
 549                return -1;
 550
 551        mnt = sysfs__mountpoint();
 552        if (!mnt)
 553                return 0;
 554
 555        n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
 556        if (n == PATH_MAX) {
 557                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 558                return -1;
 559        }
 560
 561        dir1 = opendir(path);
 562        if (!dir1)
 563                return 0;
 564
 565        /* walk tree and setup map */
 566        while ((dent1 = readdir(dir1)) != NULL) {
 567                if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
 568                        continue;
 569
 570                n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
 571                if (n == PATH_MAX) {
 572                        pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 573                        continue;
 574                }
 575
 576                dir2 = opendir(buf);
 577                if (!dir2)
 578                        continue;
 579                while ((dent2 = readdir(dir2)) != NULL) {
 580                        if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
 581                                continue;
 582                        cpunode_map[cpu] = mem;
 583                }
 584                closedir(dir2);
 585        }
 586        closedir(dir1);
 587        return 0;
 588}
 589
 590bool cpu_map__has(struct cpu_map *cpus, int cpu)
 591{
 592        return cpu_map__idx(cpus, cpu) != -1;
 593}
 594
 595int cpu_map__idx(struct cpu_map *cpus, int cpu)
 596{
 597        int i;
 598
 599        for (i = 0; i < cpus->nr; ++i) {
 600                if (cpus->map[i] == cpu)
 601                        return i;
 602        }
 603
 604        return -1;
 605}
 606
 607int cpu_map__cpu(struct cpu_map *cpus, int idx)
 608{
 609        return cpus->map[idx];
 610}
 611
 612size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
 613{
 614        int i, cpu, start = -1;
 615        bool first = true;
 616        size_t ret = 0;
 617
 618#define COMMA first ? "" : ","
 619
 620        for (i = 0; i < map->nr + 1; i++) {
 621                bool last = i == map->nr;
 622
 623                cpu = last ? INT_MAX : map->map[i];
 624
 625                if (start == -1) {
 626                        start = i;
 627                        if (last) {
 628                                ret += snprintf(buf + ret, size - ret,
 629                                                "%s%d", COMMA,
 630                                                map->map[i]);
 631                        }
 632                } else if (((i - start) != (cpu - map->map[start])) || last) {
 633                        int end = i - 1;
 634
 635                        if (start == end) {
 636                                ret += snprintf(buf + ret, size - ret,
 637                                                "%s%d", COMMA,
 638                                                map->map[start]);
 639                        } else {
 640                                ret += snprintf(buf + ret, size - ret,
 641                                                "%s%d-%d", COMMA,
 642                                                map->map[start], map->map[end]);
 643                        }
 644                        first = false;
 645                        start = i;
 646                }
 647        }
 648
 649#undef COMMA
 650
 651        pr_debug("cpumask list: %s\n", buf);
 652        return ret;
 653}
 654