linux/tools/perf/util/cpumap.c
<<
>>
Prefs
   1#include "util.h"
   2#include <api/fs/fs.h>
   3#include "../perf.h"
   4#include "cpumap.h"
   5#include <assert.h>
   6#include <dirent.h>
   7#include <stdio.h>
   8#include <stdlib.h>
   9#include <linux/bitmap.h>
  10#include "asm/bug.h"
  11
  12#include "sane_ctype.h"
  13
  14static int max_cpu_num;
  15static int max_present_cpu_num;
  16static int max_node_num;
  17static int *cpunode_map;
  18
  19static struct cpu_map *cpu_map__default_new(void)
  20{
  21        struct cpu_map *cpus;
  22        int nr_cpus;
  23
  24        nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
  25        if (nr_cpus < 0)
  26                return NULL;
  27
  28        cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
  29        if (cpus != NULL) {
  30                int i;
  31                for (i = 0; i < nr_cpus; ++i)
  32                        cpus->map[i] = i;
  33
  34                cpus->nr = nr_cpus;
  35                refcount_set(&cpus->refcnt, 1);
  36        }
  37
  38        return cpus;
  39}
  40
  41static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
  42{
  43        size_t payload_size = nr_cpus * sizeof(int);
  44        struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
  45
  46        if (cpus != NULL) {
  47                cpus->nr = nr_cpus;
  48                memcpy(cpus->map, tmp_cpus, payload_size);
  49                refcount_set(&cpus->refcnt, 1);
  50        }
  51
  52        return cpus;
  53}
  54
  55struct cpu_map *cpu_map__read(FILE *file)
  56{
  57        struct cpu_map *cpus = NULL;
  58        int nr_cpus = 0;
  59        int *tmp_cpus = NULL, *tmp;
  60        int max_entries = 0;
  61        int n, cpu, prev;
  62        char sep;
  63
  64        sep = 0;
  65        prev = -1;
  66        for (;;) {
  67                n = fscanf(file, "%u%c", &cpu, &sep);
  68                if (n <= 0)
  69                        break;
  70                if (prev >= 0) {
  71                        int new_max = nr_cpus + cpu - prev - 1;
  72
  73                        if (new_max >= max_entries) {
  74                                max_entries = new_max + MAX_NR_CPUS / 2;
  75                                tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  76                                if (tmp == NULL)
  77                                        goto out_free_tmp;
  78                                tmp_cpus = tmp;
  79                        }
  80
  81                        while (++prev < cpu)
  82                                tmp_cpus[nr_cpus++] = prev;
  83                }
  84                if (nr_cpus == max_entries) {
  85                        max_entries += MAX_NR_CPUS;
  86                        tmp = realloc(tmp_cpus, max_entries * sizeof(int));
  87                        if (tmp == NULL)
  88                                goto out_free_tmp;
  89                        tmp_cpus = tmp;
  90                }
  91
  92                tmp_cpus[nr_cpus++] = cpu;
  93                if (n == 2 && sep == '-')
  94                        prev = cpu;
  95                else
  96                        prev = -1;
  97                if (n == 1 || sep == '\n')
  98                        break;
  99        }
 100
 101        if (nr_cpus > 0)
 102                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 103        else
 104                cpus = cpu_map__default_new();
 105out_free_tmp:
 106        free(tmp_cpus);
 107        return cpus;
 108}
 109
 110static struct cpu_map *cpu_map__read_all_cpu_map(void)
 111{
 112        struct cpu_map *cpus = NULL;
 113        FILE *onlnf;
 114
 115        onlnf = fopen("/sys/devices/system/cpu/online", "r");
 116        if (!onlnf)
 117                return cpu_map__default_new();
 118
 119        cpus = cpu_map__read(onlnf);
 120        fclose(onlnf);
 121        return cpus;
 122}
 123
 124struct cpu_map *cpu_map__new(const char *cpu_list)
 125{
 126        struct cpu_map *cpus = NULL;
 127        unsigned long start_cpu, end_cpu = 0;
 128        char *p = NULL;
 129        int i, nr_cpus = 0;
 130        int *tmp_cpus = NULL, *tmp;
 131        int max_entries = 0;
 132
 133        if (!cpu_list)
 134                return cpu_map__read_all_cpu_map();
 135
 136        if (!isdigit(*cpu_list))
 137                goto out;
 138
 139        while (isdigit(*cpu_list)) {
 140                p = NULL;
 141                start_cpu = strtoul(cpu_list, &p, 0);
 142                if (start_cpu >= INT_MAX
 143                    || (*p != '\0' && *p != ',' && *p != '-'))
 144                        goto invalid;
 145
 146                if (*p == '-') {
 147                        cpu_list = ++p;
 148                        p = NULL;
 149                        end_cpu = strtoul(cpu_list, &p, 0);
 150
 151                        if (end_cpu >= INT_MAX || (*p != '\0' && *p != ','))
 152                                goto invalid;
 153
 154                        if (end_cpu < start_cpu)
 155                                goto invalid;
 156                } else {
 157                        end_cpu = start_cpu;
 158                }
 159
 160                for (; start_cpu <= end_cpu; start_cpu++) {
 161                        /* check for duplicates */
 162                        for (i = 0; i < nr_cpus; i++)
 163                                if (tmp_cpus[i] == (int)start_cpu)
 164                                        goto invalid;
 165
 166                        if (nr_cpus == max_entries) {
 167                                max_entries += MAX_NR_CPUS;
 168                                tmp = realloc(tmp_cpus, max_entries * sizeof(int));
 169                                if (tmp == NULL)
 170                                        goto invalid;
 171                                tmp_cpus = tmp;
 172                        }
 173                        tmp_cpus[nr_cpus++] = (int)start_cpu;
 174                }
 175                if (*p)
 176                        ++p;
 177
 178                cpu_list = p;
 179        }
 180
 181        if (nr_cpus > 0)
 182                cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
 183        else
 184                cpus = cpu_map__default_new();
 185invalid:
 186        free(tmp_cpus);
 187out:
 188        return cpus;
 189}
 190
 191static struct cpu_map *cpu_map__from_entries(struct cpu_map_entries *cpus)
 192{
 193        struct cpu_map *map;
 194
 195        map = cpu_map__empty_new(cpus->nr);
 196        if (map) {
 197                unsigned i;
 198
 199                for (i = 0; i < cpus->nr; i++) {
 200                        /*
 201                         * Special treatment for -1, which is not real cpu number,
 202                         * and we need to use (int) -1 to initialize map[i],
 203                         * otherwise it would become 65535.
 204                         */
 205                        if (cpus->cpu[i] == (u16) -1)
 206                                map->map[i] = -1;
 207                        else
 208                                map->map[i] = (int) cpus->cpu[i];
 209                }
 210        }
 211
 212        return map;
 213}
 214
 215static struct cpu_map *cpu_map__from_mask(struct cpu_map_mask *mask)
 216{
 217        struct cpu_map *map;
 218        int nr, nbits = mask->nr * mask->long_size * BITS_PER_BYTE;
 219
 220        nr = bitmap_weight(mask->mask, nbits);
 221
 222        map = cpu_map__empty_new(nr);
 223        if (map) {
 224                int cpu, i = 0;
 225
 226                for_each_set_bit(cpu, mask->mask, nbits)
 227                        map->map[i++] = cpu;
 228        }
 229        return map;
 230
 231}
 232
 233struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
 234{
 235        if (data->type == PERF_CPU_MAP__CPUS)
 236                return cpu_map__from_entries((struct cpu_map_entries *)data->data);
 237        else
 238                return cpu_map__from_mask((struct cpu_map_mask *)data->data);
 239}
 240
 241size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
 242{
 243#define BUFSIZE 1024
 244        char buf[BUFSIZE];
 245
 246        cpu_map__snprint(map, buf, sizeof(buf));
 247        return fprintf(fp, "%s\n", buf);
 248#undef BUFSIZE
 249}
 250
 251struct cpu_map *cpu_map__dummy_new(void)
 252{
 253        struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
 254
 255        if (cpus != NULL) {
 256                cpus->nr = 1;
 257                cpus->map[0] = -1;
 258                refcount_set(&cpus->refcnt, 1);
 259        }
 260
 261        return cpus;
 262}
 263
 264struct cpu_map *cpu_map__empty_new(int nr)
 265{
 266        struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int) * nr);
 267
 268        if (cpus != NULL) {
 269                int i;
 270
 271                cpus->nr = nr;
 272                for (i = 0; i < nr; i++)
 273                        cpus->map[i] = -1;
 274
 275                refcount_set(&cpus->refcnt, 1);
 276        }
 277
 278        return cpus;
 279}
 280
 281static void cpu_map__delete(struct cpu_map *map)
 282{
 283        if (map) {
 284                WARN_ONCE(refcount_read(&map->refcnt) != 0,
 285                          "cpu_map refcnt unbalanced\n");
 286                free(map);
 287        }
 288}
 289
 290struct cpu_map *cpu_map__get(struct cpu_map *map)
 291{
 292        if (map)
 293                refcount_inc(&map->refcnt);
 294        return map;
 295}
 296
 297void cpu_map__put(struct cpu_map *map)
 298{
 299        if (map && refcount_dec_and_test(&map->refcnt))
 300                cpu_map__delete(map);
 301}
 302
 303static int cpu__get_topology_int(int cpu, const char *name, int *value)
 304{
 305        char path[PATH_MAX];
 306
 307        snprintf(path, PATH_MAX,
 308                "devices/system/cpu/cpu%d/topology/%s", cpu, name);
 309
 310        return sysfs__read_int(path, value);
 311}
 312
 313int cpu_map__get_socket_id(int cpu)
 314{
 315        int value, ret = cpu__get_topology_int(cpu, "physical_package_id", &value);
 316        return ret ?: value;
 317}
 318
 319int cpu_map__get_socket(struct cpu_map *map, int idx, void *data __maybe_unused)
 320{
 321        int cpu;
 322
 323        if (idx > map->nr)
 324                return -1;
 325
 326        cpu = map->map[idx];
 327
 328        return cpu_map__get_socket_id(cpu);
 329}
 330
 331static int cmp_ids(const void *a, const void *b)
 332{
 333        return *(int *)a - *(int *)b;
 334}
 335
 336int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
 337                       int (*f)(struct cpu_map *map, int cpu, void *data),
 338                       void *data)
 339{
 340        struct cpu_map *c;
 341        int nr = cpus->nr;
 342        int cpu, s1, s2;
 343
 344        /* allocate as much as possible */
 345        c = calloc(1, sizeof(*c) + nr * sizeof(int));
 346        if (!c)
 347                return -1;
 348
 349        for (cpu = 0; cpu < nr; cpu++) {
 350                s1 = f(cpus, cpu, data);
 351                for (s2 = 0; s2 < c->nr; s2++) {
 352                        if (s1 == c->map[s2])
 353                                break;
 354                }
 355                if (s2 == c->nr) {
 356                        c->map[c->nr] = s1;
 357                        c->nr++;
 358                }
 359        }
 360        /* ensure we process id in increasing order */
 361        qsort(c->map, c->nr, sizeof(int), cmp_ids);
 362
 363        refcount_set(&c->refcnt, 1);
 364        *res = c;
 365        return 0;
 366}
 367
 368int cpu_map__get_core_id(int cpu)
 369{
 370        int value, ret = cpu__get_topology_int(cpu, "core_id", &value);
 371        return ret ?: value;
 372}
 373
 374int cpu_map__get_core(struct cpu_map *map, int idx, void *data)
 375{
 376        int cpu, s;
 377
 378        if (idx > map->nr)
 379                return -1;
 380
 381        cpu = map->map[idx];
 382
 383        cpu = cpu_map__get_core_id(cpu);
 384
 385        s = cpu_map__get_socket(map, idx, data);
 386        if (s == -1)
 387                return -1;
 388
 389        /*
 390         * encode socket in upper 16 bits
 391         * core_id is relative to socket, and
 392         * we need a global id. So we combine
 393         * socket+ core id
 394         */
 395        return (s << 16) | (cpu & 0xffff);
 396}
 397
 398int cpu_map__build_socket_map(struct cpu_map *cpus, struct cpu_map **sockp)
 399{
 400        return cpu_map__build_map(cpus, sockp, cpu_map__get_socket, NULL);
 401}
 402
 403int cpu_map__build_core_map(struct cpu_map *cpus, struct cpu_map **corep)
 404{
 405        return cpu_map__build_map(cpus, corep, cpu_map__get_core, NULL);
 406}
 407
 408/* setup simple routines to easily access node numbers given a cpu number */
 409static int get_max_num(char *path, int *max)
 410{
 411        size_t num;
 412        char *buf;
 413        int err = 0;
 414
 415        if (filename__read_str(path, &buf, &num))
 416                return -1;
 417
 418        buf[num] = '\0';
 419
 420        /* start on the right, to find highest node num */
 421        while (--num) {
 422                if ((buf[num] == ',') || (buf[num] == '-')) {
 423                        num++;
 424                        break;
 425                }
 426        }
 427        if (sscanf(&buf[num], "%d", max) < 1) {
 428                err = -1;
 429                goto out;
 430        }
 431
 432        /* convert from 0-based to 1-based */
 433        (*max)++;
 434
 435out:
 436        free(buf);
 437        return err;
 438}
 439
 440/* Determine highest possible cpu in the system for sparse allocation */
 441static void set_max_cpu_num(void)
 442{
 443        const char *mnt;
 444        char path[PATH_MAX];
 445        int ret = -1;
 446
 447        /* set up default */
 448        max_cpu_num = 4096;
 449        max_present_cpu_num = 4096;
 450
 451        mnt = sysfs__mountpoint();
 452        if (!mnt)
 453                goto out;
 454
 455        /* get the highest possible cpu number for a sparse allocation */
 456        ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
 457        if (ret == PATH_MAX) {
 458                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 459                goto out;
 460        }
 461
 462        ret = get_max_num(path, &max_cpu_num);
 463        if (ret)
 464                goto out;
 465
 466        /* get the highest present cpu number for a sparse allocation */
 467        ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
 468        if (ret == PATH_MAX) {
 469                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 470                goto out;
 471        }
 472
 473        ret = get_max_num(path, &max_present_cpu_num);
 474
 475out:
 476        if (ret)
 477                pr_err("Failed to read max cpus, using default of %d\n", max_cpu_num);
 478}
 479
 480/* Determine highest possible node in the system for sparse allocation */
 481static void set_max_node_num(void)
 482{
 483        const char *mnt;
 484        char path[PATH_MAX];
 485        int ret = -1;
 486
 487        /* set up default */
 488        max_node_num = 8;
 489
 490        mnt = sysfs__mountpoint();
 491        if (!mnt)
 492                goto out;
 493
 494        /* get the highest possible cpu number for a sparse allocation */
 495        ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
 496        if (ret == PATH_MAX) {
 497                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 498                goto out;
 499        }
 500
 501        ret = get_max_num(path, &max_node_num);
 502
 503out:
 504        if (ret)
 505                pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
 506}
 507
 508int cpu__max_node(void)
 509{
 510        if (unlikely(!max_node_num))
 511                set_max_node_num();
 512
 513        return max_node_num;
 514}
 515
 516int cpu__max_cpu(void)
 517{
 518        if (unlikely(!max_cpu_num))
 519                set_max_cpu_num();
 520
 521        return max_cpu_num;
 522}
 523
 524int cpu__max_present_cpu(void)
 525{
 526        if (unlikely(!max_present_cpu_num))
 527                set_max_cpu_num();
 528
 529        return max_present_cpu_num;
 530}
 531
 532
 533int cpu__get_node(int cpu)
 534{
 535        if (unlikely(cpunode_map == NULL)) {
 536                pr_debug("cpu_map not initialized\n");
 537                return -1;
 538        }
 539
 540        return cpunode_map[cpu];
 541}
 542
 543static int init_cpunode_map(void)
 544{
 545        int i;
 546
 547        set_max_cpu_num();
 548        set_max_node_num();
 549
 550        cpunode_map = calloc(max_cpu_num, sizeof(int));
 551        if (!cpunode_map) {
 552                pr_err("%s: calloc failed\n", __func__);
 553                return -1;
 554        }
 555
 556        for (i = 0; i < max_cpu_num; i++)
 557                cpunode_map[i] = -1;
 558
 559        return 0;
 560}
 561
 562int cpu__setup_cpunode_map(void)
 563{
 564        struct dirent *dent1, *dent2;
 565        DIR *dir1, *dir2;
 566        unsigned int cpu, mem;
 567        char buf[PATH_MAX];
 568        char path[PATH_MAX];
 569        const char *mnt;
 570        int n;
 571
 572        /* initialize globals */
 573        if (init_cpunode_map())
 574                return -1;
 575
 576        mnt = sysfs__mountpoint();
 577        if (!mnt)
 578                return 0;
 579
 580        n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
 581        if (n == PATH_MAX) {
 582                pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 583                return -1;
 584        }
 585
 586        dir1 = opendir(path);
 587        if (!dir1)
 588                return 0;
 589
 590        /* walk tree and setup map */
 591        while ((dent1 = readdir(dir1)) != NULL) {
 592                if (dent1->d_type != DT_DIR || sscanf(dent1->d_name, "node%u", &mem) < 1)
 593                        continue;
 594
 595                n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
 596                if (n == PATH_MAX) {
 597                        pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
 598                        continue;
 599                }
 600
 601                dir2 = opendir(buf);
 602                if (!dir2)
 603                        continue;
 604                while ((dent2 = readdir(dir2)) != NULL) {
 605                        if (dent2->d_type != DT_LNK || sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
 606                                continue;
 607                        cpunode_map[cpu] = mem;
 608                }
 609                closedir(dir2);
 610        }
 611        closedir(dir1);
 612        return 0;
 613}
 614
 615bool cpu_map__has(struct cpu_map *cpus, int cpu)
 616{
 617        return cpu_map__idx(cpus, cpu) != -1;
 618}
 619
 620int cpu_map__idx(struct cpu_map *cpus, int cpu)
 621{
 622        int i;
 623
 624        for (i = 0; i < cpus->nr; ++i) {
 625                if (cpus->map[i] == cpu)
 626                        return i;
 627        }
 628
 629        return -1;
 630}
 631
 632int cpu_map__cpu(struct cpu_map *cpus, int idx)
 633{
 634        return cpus->map[idx];
 635}
 636
 637size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
 638{
 639        int i, cpu, start = -1;
 640        bool first = true;
 641        size_t ret = 0;
 642
 643#define COMMA first ? "" : ","
 644
 645        for (i = 0; i < map->nr + 1; i++) {
 646                bool last = i == map->nr;
 647
 648                cpu = last ? INT_MAX : map->map[i];
 649
 650                if (start == -1) {
 651                        start = i;
 652                        if (last) {
 653                                ret += snprintf(buf + ret, size - ret,
 654                                                "%s%d", COMMA,
 655                                                map->map[i]);
 656                        }
 657                } else if (((i - start) != (cpu - map->map[start])) || last) {
 658                        int end = i - 1;
 659
 660                        if (start == end) {
 661                                ret += snprintf(buf + ret, size - ret,
 662                                                "%s%d", COMMA,
 663                                                map->map[start]);
 664                        } else {
 665                                ret += snprintf(buf + ret, size - ret,
 666                                                "%s%d-%d", COMMA,
 667                                                map->map[start], map->map[end]);
 668                        }
 669                        first = false;
 670                        start = i;
 671                }
 672        }
 673
 674#undef COMMA
 675
 676        pr_debug("cpumask list: %s\n", buf);
 677        return ret;
 678}
 679
 680static char hex_char(unsigned char val)
 681{
 682        if (val < 10)
 683                return val + '0';
 684        if (val < 16)
 685                return val - 10 + 'a';
 686        return '?';
 687}
 688
 689size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
 690{
 691        int i, cpu;
 692        char *ptr = buf;
 693        unsigned char *bitmap;
 694        int last_cpu = cpu_map__cpu(map, map->nr - 1);
 695
 696        bitmap = zalloc((last_cpu + 7) / 8);
 697        if (bitmap == NULL) {
 698                buf[0] = '\0';
 699                return 0;
 700        }
 701
 702        for (i = 0; i < map->nr; i++) {
 703                cpu = cpu_map__cpu(map, i);
 704                bitmap[cpu / 8] |= 1 << (cpu % 8);
 705        }
 706
 707        for (cpu = last_cpu / 4 * 4; cpu >= 0; cpu -= 4) {
 708                unsigned char bits = bitmap[cpu / 8];
 709
 710                if (cpu % 8)
 711                        bits >>= 4;
 712                else
 713                        bits &= 0xf;
 714
 715                *ptr++ = hex_char(bits);
 716                if ((cpu % 32) == 0 && cpu > 0)
 717                        *ptr++ = ',';
 718        }
 719        *ptr = '\0';
 720        free(bitmap);
 721
 722        buf[size - 1] = '\0';
 723        return ptr - buf;
 724}
 725