linux/tools/perf/bench/numa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * numa.c
   4 *
   5 * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
   6 */
   7
   8#include <inttypes.h>
   9/* For the CLR_() macros */
  10#include <pthread.h>
  11
  12#include "../perf.h"
  13#include "../builtin.h"
  14#include "../util/util.h"
  15#include <subcmd/parse-options.h>
  16#include "../util/cloexec.h"
  17
  18#include "bench.h"
  19
  20#include <errno.h>
  21#include <sched.h>
  22#include <stdio.h>
  23#include <assert.h>
  24#include <malloc.h>
  25#include <signal.h>
  26#include <stdlib.h>
  27#include <string.h>
  28#include <unistd.h>
  29#include <sys/mman.h>
  30#include <sys/time.h>
  31#include <sys/resource.h>
  32#include <sys/wait.h>
  33#include <sys/prctl.h>
  34#include <sys/types.h>
  35#include <linux/kernel.h>
  36#include <linux/time64.h>
  37
  38#include <numa.h>
  39#include <numaif.h>
  40
  41/*
  42 * Regular printout to the terminal, supressed if -q is specified:
  43 */
  44#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
  45
  46/*
  47 * Debug printf:
  48 */
  49#undef dprintf
  50#define dprintf(x...) do { if (g && g->p.show_details >= 1) printf(x); } while (0)
  51
  52struct thread_data {
  53        int                     curr_cpu;
  54        cpu_set_t               bind_cpumask;
  55        int                     bind_node;
  56        u8                      *process_data;
  57        int                     process_nr;
  58        int                     thread_nr;
  59        int                     task_nr;
  60        unsigned int            loops_done;
  61        u64                     val;
  62        u64                     runtime_ns;
  63        u64                     system_time_ns;
  64        u64                     user_time_ns;
  65        double                  speed_gbs;
  66        pthread_mutex_t         *process_lock;
  67};
  68
  69/* Parameters set by options: */
  70
  71struct params {
  72        /* Startup synchronization: */
  73        bool                    serialize_startup;
  74
  75        /* Task hierarchy: */
  76        int                     nr_proc;
  77        int                     nr_threads;
  78
  79        /* Working set sizes: */
  80        const char              *mb_global_str;
  81        const char              *mb_proc_str;
  82        const char              *mb_proc_locked_str;
  83        const char              *mb_thread_str;
  84
  85        double                  mb_global;
  86        double                  mb_proc;
  87        double                  mb_proc_locked;
  88        double                  mb_thread;
  89
  90        /* Access patterns to the working set: */
  91        bool                    data_reads;
  92        bool                    data_writes;
  93        bool                    data_backwards;
  94        bool                    data_zero_memset;
  95        bool                    data_rand_walk;
  96        u32                     nr_loops;
  97        u32                     nr_secs;
  98        u32                     sleep_usecs;
  99
 100        /* Working set initialization: */
 101        bool                    init_zero;
 102        bool                    init_random;
 103        bool                    init_cpu0;
 104
 105        /* Misc options: */
 106        int                     show_details;
 107        int                     run_all;
 108        int                     thp;
 109
 110        long                    bytes_global;
 111        long                    bytes_process;
 112        long                    bytes_process_locked;
 113        long                    bytes_thread;
 114
 115        int                     nr_tasks;
 116        bool                    show_quiet;
 117
 118        bool                    show_convergence;
 119        bool                    measure_convergence;
 120
 121        int                     perturb_secs;
 122        int                     nr_cpus;
 123        int                     nr_nodes;
 124
 125        /* Affinity options -C and -N: */
 126        char                    *cpu_list_str;
 127        char                    *node_list_str;
 128};
 129
 130
 131/* Global, read-writable area, accessible to all processes and threads: */
 132
 133struct global_info {
 134        u8                      *data;
 135
 136        pthread_mutex_t         startup_mutex;
 137        int                     nr_tasks_started;
 138
 139        pthread_mutex_t         startup_done_mutex;
 140
 141        pthread_mutex_t         start_work_mutex;
 142        int                     nr_tasks_working;
 143
 144        pthread_mutex_t         stop_work_mutex;
 145        u64                     bytes_done;
 146
 147        struct thread_data      *threads;
 148
 149        /* Convergence latency measurement: */
 150        bool                    all_converged;
 151        bool                    stop_work;
 152
 153        int                     print_once;
 154
 155        struct params           p;
 156};
 157
 158static struct global_info       *g = NULL;
 159
 160static int parse_cpus_opt(const struct option *opt, const char *arg, int unset);
 161static int parse_nodes_opt(const struct option *opt, const char *arg, int unset);
 162
 163struct params p0;
 164
 165static const struct option options[] = {
 166        OPT_INTEGER('p', "nr_proc"      , &p0.nr_proc,          "number of processes"),
 167        OPT_INTEGER('t', "nr_threads"   , &p0.nr_threads,       "number of threads per process"),
 168
 169        OPT_STRING('G', "mb_global"     , &p0.mb_global_str,    "MB", "global  memory (MBs)"),
 170        OPT_STRING('P', "mb_proc"       , &p0.mb_proc_str,      "MB", "process memory (MBs)"),
 171        OPT_STRING('L', "mb_proc_locked", &p0.mb_proc_locked_str,"MB", "process serialized/locked memory access (MBs), <= process_memory"),
 172        OPT_STRING('T', "mb_thread"     , &p0.mb_thread_str,    "MB", "thread  memory (MBs)"),
 173
 174        OPT_UINTEGER('l', "nr_loops"    , &p0.nr_loops,         "max number of loops to run (default: unlimited)"),
 175        OPT_UINTEGER('s', "nr_secs"     , &p0.nr_secs,          "max number of seconds to run (default: 5 secs)"),
 176        OPT_UINTEGER('u', "usleep"      , &p0.sleep_usecs,      "usecs to sleep per loop iteration"),
 177
 178        OPT_BOOLEAN('R', "data_reads"   , &p0.data_reads,       "access the data via reads (can be mixed with -W)"),
 179        OPT_BOOLEAN('W', "data_writes"  , &p0.data_writes,      "access the data via writes (can be mixed with -R)"),
 180        OPT_BOOLEAN('B', "data_backwards", &p0.data_backwards,  "access the data backwards as well"),
 181        OPT_BOOLEAN('Z', "data_zero_memset", &p0.data_zero_memset,"access the data via glibc bzero only"),
 182        OPT_BOOLEAN('r', "data_rand_walk", &p0.data_rand_walk,  "access the data with random (32bit LFSR) walk"),
 183
 184
 185        OPT_BOOLEAN('z', "init_zero"    , &p0.init_zero,        "bzero the initial allocations"),
 186        OPT_BOOLEAN('I', "init_random"  , &p0.init_random,      "randomize the contents of the initial allocations"),
 187        OPT_BOOLEAN('0', "init_cpu0"    , &p0.init_cpu0,        "do the initial allocations on CPU#0"),
 188        OPT_INTEGER('x', "perturb_secs", &p0.perturb_secs,      "perturb thread 0/0 every X secs, to test convergence stability"),
 189
 190        OPT_INCR   ('d', "show_details" , &p0.show_details,     "Show details"),
 191        OPT_INCR   ('a', "all"          , &p0.run_all,          "Run all tests in the suite"),
 192        OPT_INTEGER('H', "thp"          , &p0.thp,              "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
 193        OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
 194                    "convergence is reached when each process (all its threads) is running on a single NUMA node."),
 195        OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
 196        OPT_BOOLEAN('q', "quiet"        , &p0.show_quiet,       "quiet mode"),
 197        OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
 198
 199        /* Special option string parsing callbacks: */
 200        OPT_CALLBACK('C', "cpus", NULL, "cpu[,cpu2,...cpuN]",
 201                        "bind the first N tasks to these specific cpus (the rest is unbound)",
 202                        parse_cpus_opt),
 203        OPT_CALLBACK('M', "memnodes", NULL, "node[,node2,...nodeN]",
 204                        "bind the first N tasks to these specific memory nodes (the rest is unbound)",
 205                        parse_nodes_opt),
 206        OPT_END()
 207};
 208
 209static const char * const bench_numa_usage[] = {
 210        "perf bench numa <options>",
 211        NULL
 212};
 213
 214static const char * const numa_usage[] = {
 215        "perf bench numa mem [<options>]",
 216        NULL
 217};
 218
 219/*
 220 * To get number of numa nodes present.
 221 */
 222static int nr_numa_nodes(void)
 223{
 224        int i, nr_nodes = 0;
 225
 226        for (i = 0; i < g->p.nr_nodes; i++) {
 227                if (numa_bitmask_isbitset(numa_nodes_ptr, i))
 228                        nr_nodes++;
 229        }
 230
 231        return nr_nodes;
 232}
 233
 234/*
 235 * To check if given numa node is present.
 236 */
 237static int is_node_present(int node)
 238{
 239        return numa_bitmask_isbitset(numa_nodes_ptr, node);
 240}
 241
 242/*
 243 * To check given numa node has cpus.
 244 */
 245static bool node_has_cpus(int node)
 246{
 247        struct bitmask *cpu = numa_allocate_cpumask();
 248        unsigned int i;
 249
 250        if (cpu && !numa_node_to_cpus(node, cpu)) {
 251                for (i = 0; i < cpu->size; i++) {
 252                        if (numa_bitmask_isbitset(cpu, i))
 253                                return true;
 254                }
 255        }
 256
 257        return false; /* lets fall back to nocpus safely */
 258}
 259
 260static cpu_set_t bind_to_cpu(int target_cpu)
 261{
 262        cpu_set_t orig_mask, mask;
 263        int ret;
 264
 265        ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
 266        BUG_ON(ret);
 267
 268        CPU_ZERO(&mask);
 269
 270        if (target_cpu == -1) {
 271                int cpu;
 272
 273                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
 274                        CPU_SET(cpu, &mask);
 275        } else {
 276                BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
 277                CPU_SET(target_cpu, &mask);
 278        }
 279
 280        ret = sched_setaffinity(0, sizeof(mask), &mask);
 281        BUG_ON(ret);
 282
 283        return orig_mask;
 284}
 285
 286static cpu_set_t bind_to_node(int target_node)
 287{
 288        int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
 289        cpu_set_t orig_mask, mask;
 290        int cpu;
 291        int ret;
 292
 293        BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
 294        BUG_ON(!cpus_per_node);
 295
 296        ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
 297        BUG_ON(ret);
 298
 299        CPU_ZERO(&mask);
 300
 301        if (target_node == -1) {
 302                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
 303                        CPU_SET(cpu, &mask);
 304        } else {
 305                int cpu_start = (target_node + 0) * cpus_per_node;
 306                int cpu_stop  = (target_node + 1) * cpus_per_node;
 307
 308                BUG_ON(cpu_stop > g->p.nr_cpus);
 309
 310                for (cpu = cpu_start; cpu < cpu_stop; cpu++)
 311                        CPU_SET(cpu, &mask);
 312        }
 313
 314        ret = sched_setaffinity(0, sizeof(mask), &mask);
 315        BUG_ON(ret);
 316
 317        return orig_mask;
 318}
 319
 320static void bind_to_cpumask(cpu_set_t mask)
 321{
 322        int ret;
 323
 324        ret = sched_setaffinity(0, sizeof(mask), &mask);
 325        BUG_ON(ret);
 326}
 327
 328static void mempol_restore(void)
 329{
 330        int ret;
 331
 332        ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);
 333
 334        BUG_ON(ret);
 335}
 336
 337static void bind_to_memnode(int node)
 338{
 339        unsigned long nodemask;
 340        int ret;
 341
 342        if (node == -1)
 343                return;
 344
 345        BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
 346        nodemask = 1L << node;
 347
 348        ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
 349        dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
 350
 351        BUG_ON(ret);
 352}
 353
 354#define HPSIZE (2*1024*1024)
 355
 356#define set_taskname(fmt...)                            \
 357do {                                                    \
 358        char name[20];                                  \
 359                                                        \
 360        snprintf(name, 20, fmt);                        \
 361        prctl(PR_SET_NAME, name);                       \
 362} while (0)
 363
 364static u8 *alloc_data(ssize_t bytes0, int map_flags,
 365                      int init_zero, int init_cpu0, int thp, int init_random)
 366{
 367        cpu_set_t orig_mask;
 368        ssize_t bytes;
 369        u8 *buf;
 370        int ret;
 371
 372        if (!bytes0)
 373                return NULL;
 374
 375        /* Allocate and initialize all memory on CPU#0: */
 376        if (init_cpu0) {
 377                orig_mask = bind_to_node(0);
 378                bind_to_memnode(0);
 379        }
 380
 381        bytes = bytes0 + HPSIZE;
 382
 383        buf = (void *)mmap(0, bytes, PROT_READ|PROT_WRITE, MAP_ANON|map_flags, -1, 0);
 384        BUG_ON(buf == (void *)-1);
 385
 386        if (map_flags == MAP_PRIVATE) {
 387                if (thp > 0) {
 388                        ret = madvise(buf, bytes, MADV_HUGEPAGE);
 389                        if (ret && !g->print_once) {
 390                                g->print_once = 1;
 391                                printf("WARNING: Could not enable THP - do: 'echo madvise > /sys/kernel/mm/transparent_hugepage/enabled'\n");
 392                        }
 393                }
 394                if (thp < 0) {
 395                        ret = madvise(buf, bytes, MADV_NOHUGEPAGE);
 396                        if (ret && !g->print_once) {
 397                                g->print_once = 1;
 398                                printf("WARNING: Could not disable THP: run a CONFIG_TRANSPARENT_HUGEPAGE kernel?\n");
 399                        }
 400                }
 401        }
 402
 403        if (init_zero) {
 404                bzero(buf, bytes);
 405        } else {
 406                /* Initialize random contents, different in each word: */
 407                if (init_random) {
 408                        u64 *wbuf = (void *)buf;
 409                        long off = rand();
 410                        long i;
 411
 412                        for (i = 0; i < bytes/8; i++)
 413                                wbuf[i] = i + off;
 414                }
 415        }
 416
 417        /* Align to 2MB boundary: */
 418        buf = (void *)(((unsigned long)buf + HPSIZE-1) & ~(HPSIZE-1));
 419
 420        /* Restore affinity: */
 421        if (init_cpu0) {
 422                bind_to_cpumask(orig_mask);
 423                mempol_restore();
 424        }
 425
 426        return buf;
 427}
 428
 429static void free_data(void *data, ssize_t bytes)
 430{
 431        int ret;
 432
 433        if (!data)
 434                return;
 435
 436        ret = munmap(data, bytes);
 437        BUG_ON(ret);
 438}
 439
 440/*
 441 * Create a shared memory buffer that can be shared between processes, zeroed:
 442 */
 443static void * zalloc_shared_data(ssize_t bytes)
 444{
 445        return alloc_data(bytes, MAP_SHARED, 1, g->p.init_cpu0,  g->p.thp, g->p.init_random);
 446}
 447
 448/*
 449 * Create a shared memory buffer that can be shared between processes:
 450 */
 451static void * setup_shared_data(ssize_t bytes)
 452{
 453        return alloc_data(bytes, MAP_SHARED, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
 454}
 455
 456/*
 457 * Allocate process-local memory - this will either be shared between
 458 * threads of this process, or only be accessed by this thread:
 459 */
 460static void * setup_private_data(ssize_t bytes)
 461{
 462        return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0,  g->p.thp, g->p.init_random);
 463}
 464
 465/*
 466 * Return a process-shared (global) mutex:
 467 */
 468static void init_global_mutex(pthread_mutex_t *mutex)
 469{
 470        pthread_mutexattr_t attr;
 471
 472        pthread_mutexattr_init(&attr);
 473        pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
 474        pthread_mutex_init(mutex, &attr);
 475}
 476
 477static int parse_cpu_list(const char *arg)
 478{
 479        p0.cpu_list_str = strdup(arg);
 480
 481        dprintf("got CPU list: {%s}\n", p0.cpu_list_str);
 482
 483        return 0;
 484}
 485
 486static int parse_setup_cpu_list(void)
 487{
 488        struct thread_data *td;
 489        char *str0, *str;
 490        int t;
 491
 492        if (!g->p.cpu_list_str)
 493                return 0;
 494
 495        dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
 496
 497        str0 = str = strdup(g->p.cpu_list_str);
 498        t = 0;
 499
 500        BUG_ON(!str);
 501
 502        tprintf("# binding tasks to CPUs:\n");
 503        tprintf("#  ");
 504
 505        while (true) {
 506                int bind_cpu, bind_cpu_0, bind_cpu_1;
 507                char *tok, *tok_end, *tok_step, *tok_len, *tok_mul;
 508                int bind_len;
 509                int step;
 510                int mul;
 511
 512                tok = strsep(&str, ",");
 513                if (!tok)
 514                        break;
 515
 516                tok_end = strstr(tok, "-");
 517
 518                dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
 519                if (!tok_end) {
 520                        /* Single CPU specified: */
 521                        bind_cpu_0 = bind_cpu_1 = atol(tok);
 522                } else {
 523                        /* CPU range specified (for example: "5-11"): */
 524                        bind_cpu_0 = atol(tok);
 525                        bind_cpu_1 = atol(tok_end + 1);
 526                }
 527
 528                step = 1;
 529                tok_step = strstr(tok, "#");
 530                if (tok_step) {
 531                        step = atol(tok_step + 1);
 532                        BUG_ON(step <= 0 || step >= g->p.nr_cpus);
 533                }
 534
 535                /*
 536                 * Mask length.
 537                 * Eg: "--cpus 8_4-16#4" means: '--cpus 8_4,12_4,16_4',
 538                 * where the _4 means the next 4 CPUs are allowed.
 539                 */
 540                bind_len = 1;
 541                tok_len = strstr(tok, "_");
 542                if (tok_len) {
 543                        bind_len = atol(tok_len + 1);
 544                        BUG_ON(bind_len <= 0 || bind_len > g->p.nr_cpus);
 545                }
 546
 547                /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
 548                mul = 1;
 549                tok_mul = strstr(tok, "x");
 550                if (tok_mul) {
 551                        mul = atol(tok_mul + 1);
 552                        BUG_ON(mul <= 0);
 553                }
 554
 555                dprintf("CPUs: %d_%d-%d#%dx%d\n", bind_cpu_0, bind_len, bind_cpu_1, step, mul);
 556
 557                if (bind_cpu_0 >= g->p.nr_cpus || bind_cpu_1 >= g->p.nr_cpus) {
 558                        printf("\nTest not applicable, system has only %d CPUs.\n", g->p.nr_cpus);
 559                        return -1;
 560                }
 561
 562                BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
 563                BUG_ON(bind_cpu_0 > bind_cpu_1);
 564
 565                for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
 566                        int i;
 567
 568                        for (i = 0; i < mul; i++) {
 569                                int cpu;
 570
 571                                if (t >= g->p.nr_tasks) {
 572                                        printf("\n# NOTE: ignoring bind CPUs starting at CPU#%d\n #", bind_cpu);
 573                                        goto out;
 574                                }
 575                                td = g->threads + t;
 576
 577                                if (t)
 578                                        tprintf(",");
 579                                if (bind_len > 1) {
 580                                        tprintf("%2d/%d", bind_cpu, bind_len);
 581                                } else {
 582                                        tprintf("%2d", bind_cpu);
 583                                }
 584
 585                                CPU_ZERO(&td->bind_cpumask);
 586                                for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
 587                                        BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
 588                                        CPU_SET(cpu, &td->bind_cpumask);
 589                                }
 590                                t++;
 591                        }
 592                }
 593        }
 594out:
 595
 596        tprintf("\n");
 597
 598        if (t < g->p.nr_tasks)
 599                printf("# NOTE: %d tasks bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
 600
 601        free(str0);
 602        return 0;
 603}
 604
 605static int parse_cpus_opt(const struct option *opt __maybe_unused,
 606                          const char *arg, int unset __maybe_unused)
 607{
 608        if (!arg)
 609                return -1;
 610
 611        return parse_cpu_list(arg);
 612}
 613
 614static int parse_node_list(const char *arg)
 615{
 616        p0.node_list_str = strdup(arg);
 617
 618        dprintf("got NODE list: {%s}\n", p0.node_list_str);
 619
 620        return 0;
 621}
 622
 623static int parse_setup_node_list(void)
 624{
 625        struct thread_data *td;
 626        char *str0, *str;
 627        int t;
 628
 629        if (!g->p.node_list_str)
 630                return 0;
 631
 632        dprintf("g->p.nr_tasks: %d\n", g->p.nr_tasks);
 633
 634        str0 = str = strdup(g->p.node_list_str);
 635        t = 0;
 636
 637        BUG_ON(!str);
 638
 639        tprintf("# binding tasks to NODEs:\n");
 640        tprintf("# ");
 641
 642        while (true) {
 643                int bind_node, bind_node_0, bind_node_1;
 644                char *tok, *tok_end, *tok_step, *tok_mul;
 645                int step;
 646                int mul;
 647
 648                tok = strsep(&str, ",");
 649                if (!tok)
 650                        break;
 651
 652                tok_end = strstr(tok, "-");
 653
 654                dprintf("\ntoken: {%s}, end: {%s}\n", tok, tok_end);
 655                if (!tok_end) {
 656                        /* Single NODE specified: */
 657                        bind_node_0 = bind_node_1 = atol(tok);
 658                } else {
 659                        /* NODE range specified (for example: "5-11"): */
 660                        bind_node_0 = atol(tok);
 661                        bind_node_1 = atol(tok_end + 1);
 662                }
 663
 664                step = 1;
 665                tok_step = strstr(tok, "#");
 666                if (tok_step) {
 667                        step = atol(tok_step + 1);
 668                        BUG_ON(step <= 0 || step >= g->p.nr_nodes);
 669                }
 670
 671                /* Multiplicator shortcut, "0x8" is a shortcut for: "0,0,0,0,0,0,0,0" */
 672                mul = 1;
 673                tok_mul = strstr(tok, "x");
 674                if (tok_mul) {
 675                        mul = atol(tok_mul + 1);
 676                        BUG_ON(mul <= 0);
 677                }
 678
 679                dprintf("NODEs: %d-%d #%d\n", bind_node_0, bind_node_1, step);
 680
 681                if (bind_node_0 >= g->p.nr_nodes || bind_node_1 >= g->p.nr_nodes) {
 682                        printf("\nTest not applicable, system has only %d nodes.\n", g->p.nr_nodes);
 683                        return -1;
 684                }
 685
 686                BUG_ON(bind_node_0 < 0 || bind_node_1 < 0);
 687                BUG_ON(bind_node_0 > bind_node_1);
 688
 689                for (bind_node = bind_node_0; bind_node <= bind_node_1; bind_node += step) {
 690                        int i;
 691
 692                        for (i = 0; i < mul; i++) {
 693                                if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
 694                                        printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
 695                                        goto out;
 696                                }
 697                                td = g->threads + t;
 698
 699                                if (!t)
 700                                        tprintf(" %2d", bind_node);
 701                                else
 702                                        tprintf(",%2d", bind_node);
 703
 704                                td->bind_node = bind_node;
 705                                t++;
 706                        }
 707                }
 708        }
 709out:
 710
 711        tprintf("\n");
 712
 713        if (t < g->p.nr_tasks)
 714                printf("# NOTE: %d tasks mem-bound, %d tasks unbound\n", t, g->p.nr_tasks - t);
 715
 716        free(str0);
 717        return 0;
 718}
 719
 720static int parse_nodes_opt(const struct option *opt __maybe_unused,
 721                          const char *arg, int unset __maybe_unused)
 722{
 723        if (!arg)
 724                return -1;
 725
 726        return parse_node_list(arg);
 727
 728        return 0;
 729}
 730
 731#define BIT(x) (1ul << x)
 732
 733static inline uint32_t lfsr_32(uint32_t lfsr)
 734{
 735        const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
 736        return (lfsr>>1) ^ ((0x0u - (lfsr & 0x1u)) & taps);
 737}
 738
 739/*
 740 * Make sure there's real data dependency to RAM (when read
 741 * accesses are enabled), so the compiler, the CPU and the
 742 * kernel (KSM, zero page, etc.) cannot optimize away RAM
 743 * accesses:
 744 */
 745static inline u64 access_data(u64 *data, u64 val)
 746{
 747        if (g->p.data_reads)
 748                val += *data;
 749        if (g->p.data_writes)
 750                *data = val + 1;
 751        return val;
 752}
 753
 754/*
 755 * The worker process does two types of work, a forwards going
 756 * loop and a backwards going loop.
 757 *
 758 * We do this so that on multiprocessor systems we do not create
 759 * a 'train' of processing, with highly synchronized processes,
 760 * skewing the whole benchmark.
 761 */
 762static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val)
 763{
 764        long words = bytes/sizeof(u64);
 765        u64 *data = (void *)__data;
 766        long chunk_0, chunk_1;
 767        u64 *d0, *d, *d1;
 768        long off;
 769        long i;
 770
 771        BUG_ON(!data && words);
 772        BUG_ON(data && !words);
 773
 774        if (!data)
 775                return val;
 776
 777        /* Very simple memset() work variant: */
 778        if (g->p.data_zero_memset && !g->p.data_rand_walk) {
 779                bzero(data, bytes);
 780                return val;
 781        }
 782
 783        /* Spread out by PID/TID nr and by loop nr: */
 784        chunk_0 = words/nr_max;
 785        chunk_1 = words/g->p.nr_loops;
 786        off = nr*chunk_0 + loop*chunk_1;
 787
 788        while (off >= words)
 789                off -= words;
 790
 791        if (g->p.data_rand_walk) {
 792                u32 lfsr = nr + loop + val;
 793                int j;
 794
 795                for (i = 0; i < words/1024; i++) {
 796                        long start, end;
 797
 798                        lfsr = lfsr_32(lfsr);
 799
 800                        start = lfsr % words;
 801                        end = min(start + 1024, words-1);
 802
 803                        if (g->p.data_zero_memset) {
 804                                bzero(data + start, (end-start) * sizeof(u64));
 805                        } else {
 806                                for (j = start; j < end; j++)
 807                                        val = access_data(data + j, val);
 808                        }
 809                }
 810        } else if (!g->p.data_backwards || (nr + loop) & 1) {
 811
 812                d0 = data + off;
 813                d  = data + off + 1;
 814                d1 = data + words;
 815
 816                /* Process data forwards: */
 817                for (;;) {
 818                        if (unlikely(d >= d1))
 819                                d = data;
 820                        if (unlikely(d == d0))
 821                                break;
 822
 823                        val = access_data(d, val);
 824
 825                        d++;
 826                }
 827        } else {
 828                /* Process data backwards: */
 829
 830                d0 = data + off;
 831                d  = data + off - 1;
 832                d1 = data + words;
 833
 834                /* Process data forwards: */
 835                for (;;) {
 836                        if (unlikely(d < data))
 837                                d = data + words-1;
 838                        if (unlikely(d == d0))
 839                                break;
 840
 841                        val = access_data(d, val);
 842
 843                        d--;
 844                }
 845        }
 846
 847        return val;
 848}
 849
 850static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
 851{
 852        unsigned int cpu;
 853
 854        cpu = sched_getcpu();
 855
 856        g->threads[task_nr].curr_cpu = cpu;
 857        prctl(0, bytes_worked);
 858}
 859
 860#define MAX_NR_NODES    64
 861
 862/*
 863 * Count the number of nodes a process's threads
 864 * are spread out on.
 865 *
 866 * A count of 1 means that the process is compressed
 867 * to a single node. A count of g->p.nr_nodes means it's
 868 * spread out on the whole system.
 869 */
 870static int count_process_nodes(int process_nr)
 871{
 872        char node_present[MAX_NR_NODES] = { 0, };
 873        int nodes;
 874        int n, t;
 875
 876        for (t = 0; t < g->p.nr_threads; t++) {
 877                struct thread_data *td;
 878                int task_nr;
 879                int node;
 880
 881                task_nr = process_nr*g->p.nr_threads + t;
 882                td = g->threads + task_nr;
 883
 884                node = numa_node_of_cpu(td->curr_cpu);
 885                if (node < 0) /* curr_cpu was likely still -1 */
 886                        return 0;
 887
 888                node_present[node] = 1;
 889        }
 890
 891        nodes = 0;
 892
 893        for (n = 0; n < MAX_NR_NODES; n++)
 894                nodes += node_present[n];
 895
 896        return nodes;
 897}
 898
 899/*
 900 * Count the number of distinct process-threads a node contains.
 901 *
 902 * A count of 1 means that the node contains only a single
 903 * process. If all nodes on the system contain at most one
 904 * process then we are well-converged.
 905 */
 906static int count_node_processes(int node)
 907{
 908        int processes = 0;
 909        int t, p;
 910
 911        for (p = 0; p < g->p.nr_proc; p++) {
 912                for (t = 0; t < g->p.nr_threads; t++) {
 913                        struct thread_data *td;
 914                        int task_nr;
 915                        int n;
 916
 917                        task_nr = p*g->p.nr_threads + t;
 918                        td = g->threads + task_nr;
 919
 920                        n = numa_node_of_cpu(td->curr_cpu);
 921                        if (n == node) {
 922                                processes++;
 923                                break;
 924                        }
 925                }
 926        }
 927
 928        return processes;
 929}
 930
 931static void calc_convergence_compression(int *strong)
 932{
 933        unsigned int nodes_min, nodes_max;
 934        int p;
 935
 936        nodes_min = -1;
 937        nodes_max =  0;
 938
 939        for (p = 0; p < g->p.nr_proc; p++) {
 940                unsigned int nodes = count_process_nodes(p);
 941
 942                if (!nodes) {
 943                        *strong = 0;
 944                        return;
 945                }
 946
 947                nodes_min = min(nodes, nodes_min);
 948                nodes_max = max(nodes, nodes_max);
 949        }
 950
 951        /* Strong convergence: all threads compress on a single node: */
 952        if (nodes_min == 1 && nodes_max == 1) {
 953                *strong = 1;
 954        } else {
 955                *strong = 0;
 956                tprintf(" {%d-%d}", nodes_min, nodes_max);
 957        }
 958}
 959
 960static void calc_convergence(double runtime_ns_max, double *convergence)
 961{
 962        unsigned int loops_done_min, loops_done_max;
 963        int process_groups;
 964        int nodes[MAX_NR_NODES];
 965        int distance;
 966        int nr_min;
 967        int nr_max;
 968        int strong;
 969        int sum;
 970        int nr;
 971        int node;
 972        int cpu;
 973        int t;
 974
 975        if (!g->p.show_convergence && !g->p.measure_convergence)
 976                return;
 977
 978        for (node = 0; node < g->p.nr_nodes; node++)
 979                nodes[node] = 0;
 980
 981        loops_done_min = -1;
 982        loops_done_max = 0;
 983
 984        for (t = 0; t < g->p.nr_tasks; t++) {
 985                struct thread_data *td = g->threads + t;
 986                unsigned int loops_done;
 987
 988                cpu = td->curr_cpu;
 989
 990                /* Not all threads have written it yet: */
 991                if (cpu < 0)
 992                        continue;
 993
 994                node = numa_node_of_cpu(cpu);
 995
 996                nodes[node]++;
 997
 998                loops_done = td->loops_done;
 999                loops_done_min = min(loops_done, loops_done_min);
1000                loops_done_max = max(loops_done, loops_done_max);
1001        }
1002
1003        nr_max = 0;
1004        nr_min = g->p.nr_tasks;
1005        sum = 0;
1006
1007        for (node = 0; node < g->p.nr_nodes; node++) {
1008                if (!is_node_present(node))
1009                        continue;
1010                nr = nodes[node];
1011                nr_min = min(nr, nr_min);
1012                nr_max = max(nr, nr_max);
1013                sum += nr;
1014        }
1015        BUG_ON(nr_min > nr_max);
1016
1017        BUG_ON(sum > g->p.nr_tasks);
1018
1019        if (0 && (sum < g->p.nr_tasks))
1020                return;
1021
1022        /*
1023         * Count the number of distinct process groups present
1024         * on nodes - when we are converged this will decrease
1025         * to g->p.nr_proc:
1026         */
1027        process_groups = 0;
1028
1029        for (node = 0; node < g->p.nr_nodes; node++) {
1030                int processes;
1031
1032                if (!is_node_present(node))
1033                        continue;
1034                processes = count_node_processes(node);
1035                nr = nodes[node];
1036                tprintf(" %2d/%-2d", nr, processes);
1037
1038                process_groups += processes;
1039        }
1040
1041        distance = nr_max - nr_min;
1042
1043        tprintf(" [%2d/%-2d]", distance, process_groups);
1044
1045        tprintf(" l:%3d-%-3d (%3d)",
1046                loops_done_min, loops_done_max, loops_done_max-loops_done_min);
1047
1048        if (loops_done_min && loops_done_max) {
1049                double skew = 1.0 - (double)loops_done_min/loops_done_max;
1050
1051                tprintf(" [%4.1f%%]", skew * 100.0);
1052        }
1053
1054        calc_convergence_compression(&strong);
1055
1056        if (strong && process_groups == g->p.nr_proc) {
1057                if (!*convergence) {
1058                        *convergence = runtime_ns_max;
1059                        tprintf(" (%6.1fs converged)\n", *convergence / NSEC_PER_SEC);
1060                        if (g->p.measure_convergence) {
1061                                g->all_converged = true;
1062                                g->stop_work = true;
1063                        }
1064                }
1065        } else {
1066                if (*convergence) {
1067                        tprintf(" (%6.1fs de-converged)", runtime_ns_max / NSEC_PER_SEC);
1068                        *convergence = 0;
1069                }
1070                tprintf("\n");
1071        }
1072}
1073
1074static void show_summary(double runtime_ns_max, int l, double *convergence)
1075{
1076        tprintf("\r #  %5.1f%%  [%.1f mins]",
1077                (double)(l+1)/g->p.nr_loops*100.0, runtime_ns_max / NSEC_PER_SEC / 60.0);
1078
1079        calc_convergence(runtime_ns_max, convergence);
1080
1081        if (g->p.show_details >= 0)
1082                fflush(stdout);
1083}
1084
1085static void *worker_thread(void *__tdata)
1086{
1087        struct thread_data *td = __tdata;
1088        struct timeval start0, start, stop, diff;
1089        int process_nr = td->process_nr;
1090        int thread_nr = td->thread_nr;
1091        unsigned long last_perturbance;
1092        int task_nr = td->task_nr;
1093        int details = g->p.show_details;
1094        int first_task, last_task;
1095        double convergence = 0;
1096        u64 val = td->val;
1097        double runtime_ns_max;
1098        u8 *global_data;
1099        u8 *process_data;
1100        u8 *thread_data;
1101        u64 bytes_done;
1102        long work_done;
1103        u32 l;
1104        struct rusage rusage;
1105
1106        bind_to_cpumask(td->bind_cpumask);
1107        bind_to_memnode(td->bind_node);
1108
1109        set_taskname("thread %d/%d", process_nr, thread_nr);
1110
1111        global_data = g->data;
1112        process_data = td->process_data;
1113        thread_data = setup_private_data(g->p.bytes_thread);
1114
1115        bytes_done = 0;
1116
1117        last_task = 0;
1118        if (process_nr == g->p.nr_proc-1 && thread_nr == g->p.nr_threads-1)
1119                last_task = 1;
1120
1121        first_task = 0;
1122        if (process_nr == 0 && thread_nr == 0)
1123                first_task = 1;
1124
1125        if (details >= 2) {
1126                printf("#  thread %2d / %2d global mem: %p, process mem: %p, thread mem: %p\n",
1127                        process_nr, thread_nr, global_data, process_data, thread_data);
1128        }
1129
1130        if (g->p.serialize_startup) {
1131                pthread_mutex_lock(&g->startup_mutex);
1132                g->nr_tasks_started++;
1133                pthread_mutex_unlock(&g->startup_mutex);
1134
1135                /* Here we will wait for the main process to start us all at once: */
1136                pthread_mutex_lock(&g->start_work_mutex);
1137                g->nr_tasks_working++;
1138
1139                /* Last one wake the main process: */
1140                if (g->nr_tasks_working == g->p.nr_tasks)
1141                        pthread_mutex_unlock(&g->startup_done_mutex);
1142
1143                pthread_mutex_unlock(&g->start_work_mutex);
1144        }
1145
1146        gettimeofday(&start0, NULL);
1147
1148        start = stop = start0;
1149        last_perturbance = start.tv_sec;
1150
1151        for (l = 0; l < g->p.nr_loops; l++) {
1152                start = stop;
1153
1154                if (g->stop_work)
1155                        break;
1156
1157                val += do_work(global_data,  g->p.bytes_global,  process_nr, g->p.nr_proc,      l, val);
1158                val += do_work(process_data, g->p.bytes_process, thread_nr,  g->p.nr_threads,   l, val);
1159                val += do_work(thread_data,  g->p.bytes_thread,  0,          1,         l, val);
1160
1161                if (g->p.sleep_usecs) {
1162                        pthread_mutex_lock(td->process_lock);
1163                        usleep(g->p.sleep_usecs);
1164                        pthread_mutex_unlock(td->process_lock);
1165                }
1166                /*
1167                 * Amount of work to be done under a process-global lock:
1168                 */
1169                if (g->p.bytes_process_locked) {
1170                        pthread_mutex_lock(td->process_lock);
1171                        val += do_work(process_data, g->p.bytes_process_locked, thread_nr,  g->p.nr_threads,    l, val);
1172                        pthread_mutex_unlock(td->process_lock);
1173                }
1174
1175                work_done = g->p.bytes_global + g->p.bytes_process +
1176                            g->p.bytes_process_locked + g->p.bytes_thread;
1177
1178                update_curr_cpu(task_nr, work_done);
1179                bytes_done += work_done;
1180
1181                if (details < 0 && !g->p.perturb_secs && !g->p.measure_convergence && !g->p.nr_secs)
1182                        continue;
1183
1184                td->loops_done = l;
1185
1186                gettimeofday(&stop, NULL);
1187
1188                /* Check whether our max runtime timed out: */
1189                if (g->p.nr_secs) {
1190                        timersub(&stop, &start0, &diff);
1191                        if ((u32)diff.tv_sec >= g->p.nr_secs) {
1192                                g->stop_work = true;
1193                                break;
1194                        }
1195                }
1196
1197                /* Update the summary at most once per second: */
1198                if (start.tv_sec == stop.tv_sec)
1199                        continue;
1200
1201                /*
1202                 * Perturb the first task's equilibrium every g->p.perturb_secs seconds,
1203                 * by migrating to CPU#0:
1204                 */
1205                if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
1206                        cpu_set_t orig_mask;
1207                        int target_cpu;
1208                        int this_cpu;
1209
1210                        last_perturbance = stop.tv_sec;
1211
1212                        /*
1213                         * Depending on where we are running, move into
1214                         * the other half of the system, to create some
1215                         * real disturbance:
1216                         */
1217                        this_cpu = g->threads[task_nr].curr_cpu;
1218                        if (this_cpu < g->p.nr_cpus/2)
1219                                target_cpu = g->p.nr_cpus-1;
1220                        else
1221                                target_cpu = 0;
1222
1223                        orig_mask = bind_to_cpu(target_cpu);
1224
1225                        /* Here we are running on the target CPU already */
1226                        if (details >= 1)
1227                                printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
1228
1229                        bind_to_cpumask(orig_mask);
1230                }
1231
1232                if (details >= 3) {
1233                        timersub(&stop, &start, &diff);
1234                        runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1235                        runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1236
1237                        if (details >= 0) {
1238                                printf(" #%2d / %2d: %14.2lf nsecs/op [val: %016"PRIx64"]\n",
1239                                        process_nr, thread_nr, runtime_ns_max / bytes_done, val);
1240                        }
1241                        fflush(stdout);
1242                }
1243                if (!last_task)
1244                        continue;
1245
1246                timersub(&stop, &start0, &diff);
1247                runtime_ns_max = diff.tv_sec * NSEC_PER_SEC;
1248                runtime_ns_max += diff.tv_usec * NSEC_PER_USEC;
1249
1250                show_summary(runtime_ns_max, l, &convergence);
1251        }
1252
1253        gettimeofday(&stop, NULL);
1254        timersub(&stop, &start0, &diff);
1255        td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
1256        td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
1257        td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
1258
1259        getrusage(RUSAGE_THREAD, &rusage);
1260        td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
1261        td->system_time_ns += rusage.ru_stime.tv_usec * NSEC_PER_USEC;
1262        td->user_time_ns = rusage.ru_utime.tv_sec * NSEC_PER_SEC;
1263        td->user_time_ns += rusage.ru_utime.tv_usec * NSEC_PER_USEC;
1264
1265        free_data(thread_data, g->p.bytes_thread);
1266
1267        pthread_mutex_lock(&g->stop_work_mutex);
1268        g->bytes_done += bytes_done;
1269        pthread_mutex_unlock(&g->stop_work_mutex);
1270
1271        return NULL;
1272}
1273
1274/*
1275 * A worker process starts a couple of threads:
1276 */
1277static void worker_process(int process_nr)
1278{
1279        pthread_mutex_t process_lock;
1280        struct thread_data *td;
1281        pthread_t *pthreads;
1282        u8 *process_data;
1283        int task_nr;
1284        int ret;
1285        int t;
1286
1287        pthread_mutex_init(&process_lock, NULL);
1288        set_taskname("process %d", process_nr);
1289
1290        /*
1291         * Pick up the memory policy and the CPU binding of our first thread,
1292         * so that we initialize memory accordingly:
1293         */
1294        task_nr = process_nr*g->p.nr_threads;
1295        td = g->threads + task_nr;
1296
1297        bind_to_memnode(td->bind_node);
1298        bind_to_cpumask(td->bind_cpumask);
1299
1300        pthreads = zalloc(g->p.nr_threads * sizeof(pthread_t));
1301        process_data = setup_private_data(g->p.bytes_process);
1302
1303        if (g->p.show_details >= 3) {
1304                printf(" # process %2d global mem: %p, process mem: %p\n",
1305                        process_nr, g->data, process_data);
1306        }
1307
1308        for (t = 0; t < g->p.nr_threads; t++) {
1309                task_nr = process_nr*g->p.nr_threads + t;
1310                td = g->threads + task_nr;
1311
1312                td->process_data = process_data;
1313                td->process_nr   = process_nr;
1314                td->thread_nr    = t;
1315                td->task_nr      = task_nr;
1316                td->val          = rand();
1317                td->curr_cpu     = -1;
1318                td->process_lock = &process_lock;
1319
1320                ret = pthread_create(pthreads + t, NULL, worker_thread, td);
1321                BUG_ON(ret);
1322        }
1323
1324        for (t = 0; t < g->p.nr_threads; t++) {
1325                ret = pthread_join(pthreads[t], NULL);
1326                BUG_ON(ret);
1327        }
1328
1329        free_data(process_data, g->p.bytes_process);
1330        free(pthreads);
1331}
1332
1333static void print_summary(void)
1334{
1335        if (g->p.show_details < 0)
1336                return;
1337
1338        printf("\n ###\n");
1339        printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
1340                g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
1341        printf(" #      %5dx %5ldMB global  shared mem operations\n",
1342                        g->p.nr_loops, g->p.bytes_global/1024/1024);
1343        printf(" #      %5dx %5ldMB process shared mem operations\n",
1344                        g->p.nr_loops, g->p.bytes_process/1024/1024);
1345        printf(" #      %5dx %5ldMB thread  local  mem operations\n",
1346                        g->p.nr_loops, g->p.bytes_thread/1024/1024);
1347
1348        printf(" ###\n");
1349
1350        printf("\n ###\n"); fflush(stdout);
1351}
1352
1353static void init_thread_data(void)
1354{
1355        ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1356        int t;
1357
1358        g->threads = zalloc_shared_data(size);
1359
1360        for (t = 0; t < g->p.nr_tasks; t++) {
1361                struct thread_data *td = g->threads + t;
1362                int cpu;
1363
1364                /* Allow all nodes by default: */
1365                td->bind_node = -1;
1366
1367                /* Allow all CPUs by default: */
1368                CPU_ZERO(&td->bind_cpumask);
1369                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
1370                        CPU_SET(cpu, &td->bind_cpumask);
1371        }
1372}
1373
1374static void deinit_thread_data(void)
1375{
1376        ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
1377
1378        free_data(g->threads, size);
1379}
1380
1381static int init(void)
1382{
1383        g = (void *)alloc_data(sizeof(*g), MAP_SHARED, 1, 0, 0 /* THP */, 0);
1384
1385        /* Copy over options: */
1386        g->p = p0;
1387
1388        g->p.nr_cpus = numa_num_configured_cpus();
1389
1390        g->p.nr_nodes = numa_max_node() + 1;
1391
1392        /* char array in count_process_nodes(): */
1393        BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
1394
1395        if (g->p.show_quiet && !g->p.show_details)
1396                g->p.show_details = -1;
1397
1398        /* Some memory should be specified: */
1399        if (!g->p.mb_global_str && !g->p.mb_proc_str && !g->p.mb_thread_str)
1400                return -1;
1401
1402        if (g->p.mb_global_str) {
1403                g->p.mb_global = atof(g->p.mb_global_str);
1404                BUG_ON(g->p.mb_global < 0);
1405        }
1406
1407        if (g->p.mb_proc_str) {
1408                g->p.mb_proc = atof(g->p.mb_proc_str);
1409                BUG_ON(g->p.mb_proc < 0);
1410        }
1411
1412        if (g->p.mb_proc_locked_str) {
1413                g->p.mb_proc_locked = atof(g->p.mb_proc_locked_str);
1414                BUG_ON(g->p.mb_proc_locked < 0);
1415                BUG_ON(g->p.mb_proc_locked > g->p.mb_proc);
1416        }
1417
1418        if (g->p.mb_thread_str) {
1419                g->p.mb_thread = atof(g->p.mb_thread_str);
1420                BUG_ON(g->p.mb_thread < 0);
1421        }
1422
1423        BUG_ON(g->p.nr_threads <= 0);
1424        BUG_ON(g->p.nr_proc <= 0);
1425
1426        g->p.nr_tasks = g->p.nr_proc*g->p.nr_threads;
1427
1428        g->p.bytes_global               = g->p.mb_global        *1024L*1024L;
1429        g->p.bytes_process              = g->p.mb_proc          *1024L*1024L;
1430        g->p.bytes_process_locked       = g->p.mb_proc_locked   *1024L*1024L;
1431        g->p.bytes_thread               = g->p.mb_thread        *1024L*1024L;
1432
1433        g->data = setup_shared_data(g->p.bytes_global);
1434
1435        /* Startup serialization: */
1436        init_global_mutex(&g->start_work_mutex);
1437        init_global_mutex(&g->startup_mutex);
1438        init_global_mutex(&g->startup_done_mutex);
1439        init_global_mutex(&g->stop_work_mutex);
1440
1441        init_thread_data();
1442
1443        tprintf("#\n");
1444        if (parse_setup_cpu_list() || parse_setup_node_list())
1445                return -1;
1446        tprintf("#\n");
1447
1448        print_summary();
1449
1450        return 0;
1451}
1452
1453static void deinit(void)
1454{
1455        free_data(g->data, g->p.bytes_global);
1456        g->data = NULL;
1457
1458        deinit_thread_data();
1459
1460        free_data(g, sizeof(*g));
1461        g = NULL;
1462}
1463
1464/*
1465 * Print a short or long result, depending on the verbosity setting:
1466 */
1467static void print_res(const char *name, double val,
1468                      const char *txt_unit, const char *txt_short, const char *txt_long)
1469{
1470        if (!name)
1471                name = "main,";
1472
1473        if (!g->p.show_quiet)
1474                printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
1475        else
1476                printf(" %14.3f %s\n", val, txt_long);
1477}
1478
1479static int __bench_numa(const char *name)
1480{
1481        struct timeval start, stop, diff;
1482        u64 runtime_ns_min, runtime_ns_sum;
1483        pid_t *pids, pid, wpid;
1484        double delta_runtime;
1485        double runtime_avg;
1486        double runtime_sec_max;
1487        double runtime_sec_min;
1488        int wait_stat;
1489        double bytes;
1490        int i, t, p;
1491
1492        if (init())
1493                return -1;
1494
1495        pids = zalloc(g->p.nr_proc * sizeof(*pids));
1496        pid = -1;
1497
1498        /* All threads try to acquire it, this way we can wait for them to start up: */
1499        pthread_mutex_lock(&g->start_work_mutex);
1500
1501        if (g->p.serialize_startup) {
1502                tprintf(" #\n");
1503                tprintf(" # Startup synchronization: ..."); fflush(stdout);
1504        }
1505
1506        gettimeofday(&start, NULL);
1507
1508        for (i = 0; i < g->p.nr_proc; i++) {
1509                pid = fork();
1510                dprintf(" # process %2d: PID %d\n", i, pid);
1511
1512                BUG_ON(pid < 0);
1513                if (!pid) {
1514                        /* Child process: */
1515                        worker_process(i);
1516
1517                        exit(0);
1518                }
1519                pids[i] = pid;
1520
1521        }
1522        /* Wait for all the threads to start up: */
1523        while (g->nr_tasks_started != g->p.nr_tasks)
1524                usleep(USEC_PER_MSEC);
1525
1526        BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
1527
1528        if (g->p.serialize_startup) {
1529                double startup_sec;
1530
1531                pthread_mutex_lock(&g->startup_done_mutex);
1532
1533                /* This will start all threads: */
1534                pthread_mutex_unlock(&g->start_work_mutex);
1535
1536                /* This mutex is locked - the last started thread will wake us: */
1537                pthread_mutex_lock(&g->startup_done_mutex);
1538
1539                gettimeofday(&stop, NULL);
1540
1541                timersub(&stop, &start, &diff);
1542
1543                startup_sec = diff.tv_sec * NSEC_PER_SEC;
1544                startup_sec += diff.tv_usec * NSEC_PER_USEC;
1545                startup_sec /= NSEC_PER_SEC;
1546
1547                tprintf(" threads initialized in %.6f seconds.\n", startup_sec);
1548                tprintf(" #\n");
1549
1550                start = stop;
1551                pthread_mutex_unlock(&g->startup_done_mutex);
1552        } else {
1553                gettimeofday(&start, NULL);
1554        }
1555
1556        /* Parent process: */
1557
1558
1559        for (i = 0; i < g->p.nr_proc; i++) {
1560                wpid = waitpid(pids[i], &wait_stat, 0);
1561                BUG_ON(wpid < 0);
1562                BUG_ON(!WIFEXITED(wait_stat));
1563
1564        }
1565
1566        runtime_ns_sum = 0;
1567        runtime_ns_min = -1LL;
1568
1569        for (t = 0; t < g->p.nr_tasks; t++) {
1570                u64 thread_runtime_ns = g->threads[t].runtime_ns;
1571
1572                runtime_ns_sum += thread_runtime_ns;
1573                runtime_ns_min = min(thread_runtime_ns, runtime_ns_min);
1574        }
1575
1576        gettimeofday(&stop, NULL);
1577        timersub(&stop, &start, &diff);
1578
1579        BUG_ON(bench_format != BENCH_FORMAT_DEFAULT);
1580
1581        tprintf("\n ###\n");
1582        tprintf("\n");
1583
1584        runtime_sec_max = diff.tv_sec * NSEC_PER_SEC;
1585        runtime_sec_max += diff.tv_usec * NSEC_PER_USEC;
1586        runtime_sec_max /= NSEC_PER_SEC;
1587
1588        runtime_sec_min = runtime_ns_min / NSEC_PER_SEC;
1589
1590        bytes = g->bytes_done;
1591        runtime_avg = (double)runtime_ns_sum / g->p.nr_tasks / NSEC_PER_SEC;
1592
1593        if (g->p.measure_convergence) {
1594                print_res(name, runtime_sec_max,
1595                        "secs,", "NUMA-convergence-latency", "secs latency to NUMA-converge");
1596        }
1597
1598        print_res(name, runtime_sec_max,
1599                "secs,", "runtime-max/thread",  "secs slowest (max) thread-runtime");
1600
1601        print_res(name, runtime_sec_min,
1602                "secs,", "runtime-min/thread",  "secs fastest (min) thread-runtime");
1603
1604        print_res(name, runtime_avg,
1605                "secs,", "runtime-avg/thread",  "secs average thread-runtime");
1606
1607        delta_runtime = (runtime_sec_max - runtime_sec_min)/2.0;
1608        print_res(name, delta_runtime / runtime_sec_max * 100.0,
1609                "%,", "spread-runtime/thread",  "% difference between max/avg runtime");
1610
1611        print_res(name, bytes / g->p.nr_tasks / 1e9,
1612                "GB,", "data/thread",           "GB data processed, per thread");
1613
1614        print_res(name, bytes / 1e9,
1615                "GB,", "data-total",            "GB data processed, total");
1616
1617        print_res(name, runtime_sec_max * NSEC_PER_SEC / (bytes / g->p.nr_tasks),
1618                "nsecs,", "runtime/byte/thread","nsecs/byte/thread runtime");
1619
1620        print_res(name, bytes / g->p.nr_tasks / 1e9 / runtime_sec_max,
1621                "GB/sec,", "thread-speed",      "GB/sec/thread speed");
1622
1623        print_res(name, bytes / runtime_sec_max / 1e9,
1624                "GB/sec,", "total-speed",       "GB/sec total speed");
1625
1626        if (g->p.show_details >= 2) {
1627                char tname[14 + 2 * 10 + 1];
1628                struct thread_data *td;
1629                for (p = 0; p < g->p.nr_proc; p++) {
1630                        for (t = 0; t < g->p.nr_threads; t++) {
1631                                memset(tname, 0, sizeof(tname));
1632                                td = g->threads + p*g->p.nr_threads + t;
1633                                snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
1634                                print_res(tname, td->speed_gbs,
1635                                        "GB/sec",       "thread-speed", "GB/sec/thread speed");
1636                                print_res(tname, td->system_time_ns / NSEC_PER_SEC,
1637                                        "secs", "thread-system-time", "system CPU time/thread");
1638                                print_res(tname, td->user_time_ns / NSEC_PER_SEC,
1639                                        "secs", "thread-user-time", "user CPU time/thread");
1640                        }
1641                }
1642        }
1643
1644        free(pids);
1645
1646        deinit();
1647
1648        return 0;
1649}
1650
1651#define MAX_ARGS 50
1652
1653static int command_size(const char **argv)
1654{
1655        int size = 0;
1656
1657        while (*argv) {
1658                size++;
1659                argv++;
1660        }
1661
1662        BUG_ON(size >= MAX_ARGS);
1663
1664        return size;
1665}
1666
1667static void init_params(struct params *p, const char *name, int argc, const char **argv)
1668{
1669        int i;
1670
1671        printf("\n # Running %s \"perf bench numa", name);
1672
1673        for (i = 0; i < argc; i++)
1674                printf(" %s", argv[i]);
1675
1676        printf("\"\n");
1677
1678        memset(p, 0, sizeof(*p));
1679
1680        /* Initialize nonzero defaults: */
1681
1682        p->serialize_startup            = 1;
1683        p->data_reads                   = true;
1684        p->data_writes                  = true;
1685        p->data_backwards               = true;
1686        p->data_rand_walk               = true;
1687        p->nr_loops                     = -1;
1688        p->init_random                  = true;
1689        p->mb_global_str                = "1";
1690        p->nr_proc                      = 1;
1691        p->nr_threads                   = 1;
1692        p->nr_secs                      = 5;
1693        p->run_all                      = argc == 1;
1694}
1695
1696static int run_bench_numa(const char *name, const char **argv)
1697{
1698        int argc = command_size(argv);
1699
1700        init_params(&p0, name, argc, argv);
1701        argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1702        if (argc)
1703                goto err;
1704
1705        if (__bench_numa(name))
1706                goto err;
1707
1708        return 0;
1709
1710err:
1711        return -1;
1712}
1713
1714#define OPT_BW_RAM              "-s",  "20", "-zZq",    "--thp", " 1", "--no-data_rand_walk"
1715#define OPT_BW_RAM_NOTHP        OPT_BW_RAM,             "--thp", "-1"
1716
1717#define OPT_CONV                "-s", "100", "-zZ0qcm", "--thp", " 1"
1718#define OPT_CONV_NOTHP          OPT_CONV,               "--thp", "-1"
1719
1720#define OPT_BW                  "-s",  "20", "-zZ0q",   "--thp", " 1"
1721#define OPT_BW_NOTHP            OPT_BW,                 "--thp", "-1"
1722
1723/*
1724 * The built-in test-suite executed by "perf bench numa -a".
1725 *
1726 * (A minimum of 4 nodes and 16 GB of RAM is recommended.)
1727 */
1728static const char *tests[][MAX_ARGS] = {
1729   /* Basic single-stream NUMA bandwidth measurements: */
1730   { "RAM-bw-local,",     "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1731                          "-C" ,   "0", "-M",   "0", OPT_BW_RAM },
1732   { "RAM-bw-local-NOTHP,",
1733                          "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1734                          "-C" ,   "0", "-M",   "0", OPT_BW_RAM_NOTHP },
1735   { "RAM-bw-remote,",    "mem",  "-p",  "1",  "-t",  "1", "-P", "1024",
1736                          "-C" ,   "0", "-M",   "1", OPT_BW_RAM },
1737
1738   /* 2-stream NUMA bandwidth measurements: */
1739   { "RAM-bw-local-2x,",  "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1740                           "-C", "0,2", "-M", "0x2", OPT_BW_RAM },
1741   { "RAM-bw-remote-2x,", "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1742                           "-C", "0,2", "-M", "1x2", OPT_BW_RAM },
1743
1744   /* Cross-stream NUMA bandwidth measurement: */
1745   { "RAM-bw-cross,",     "mem",  "-p",  "2",  "-t",  "1", "-P", "1024",
1746                           "-C", "0,8", "-M", "1,0", OPT_BW_RAM },
1747
1748   /* Convergence latency measurements: */
1749   { " 1x3-convergence,", "mem",  "-p",  "1", "-t",  "3", "-P",  "512", OPT_CONV },
1750   { " 1x4-convergence,", "mem",  "-p",  "1", "-t",  "4", "-P",  "512", OPT_CONV },
1751   { " 1x6-convergence,", "mem",  "-p",  "1", "-t",  "6", "-P", "1020", OPT_CONV },
1752   { " 2x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1753   { " 3x3-convergence,", "mem",  "-p",  "3", "-t",  "3", "-P", "1020", OPT_CONV },
1754   { " 4x4-convergence,", "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV },
1755   { " 4x4-convergence-NOTHP,",
1756                          "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1757   { " 4x6-convergence,", "mem",  "-p",  "4", "-t",  "6", "-P", "1020", OPT_CONV },
1758   { " 4x8-convergence,", "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_CONV },
1759   { " 8x4-convergence,", "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV },
1760   { " 8x4-convergence-NOTHP,",
1761                          "mem",  "-p",  "8", "-t",  "4", "-P",  "512", OPT_CONV_NOTHP },
1762   { " 3x1-convergence,", "mem",  "-p",  "3", "-t",  "1", "-P",  "512", OPT_CONV },
1763   { " 4x1-convergence,", "mem",  "-p",  "4", "-t",  "1", "-P",  "512", OPT_CONV },
1764   { " 8x1-convergence,", "mem",  "-p",  "8", "-t",  "1", "-P",  "512", OPT_CONV },
1765   { "16x1-convergence,", "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_CONV },
1766   { "32x1-convergence,", "mem",  "-p", "32", "-t",  "1", "-P",  "128", OPT_CONV },
1767
1768   /* Various NUMA process/thread layout bandwidth measurements: */
1769   { " 2x1-bw-process,",  "mem",  "-p",  "2", "-t",  "1", "-P", "1024", OPT_BW },
1770   { " 3x1-bw-process,",  "mem",  "-p",  "3", "-t",  "1", "-P", "1024", OPT_BW },
1771   { " 4x1-bw-process,",  "mem",  "-p",  "4", "-t",  "1", "-P", "1024", OPT_BW },
1772   { " 8x1-bw-process,",  "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW },
1773   { " 8x1-bw-process-NOTHP,",
1774                          "mem",  "-p",  "8", "-t",  "1", "-P", " 512", OPT_BW_NOTHP },
1775   { "16x1-bw-process,",  "mem",  "-p", "16", "-t",  "1", "-P",  "256", OPT_BW },
1776
1777   { " 4x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "4", "-T",  "256", OPT_BW },
1778   { " 8x1-bw-thread,",   "mem",  "-p",  "1", "-t",  "8", "-T",  "256", OPT_BW },
1779   { "16x1-bw-thread,",   "mem",  "-p",  "1", "-t", "16", "-T",  "128", OPT_BW },
1780   { "32x1-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-T",   "64", OPT_BW },
1781
1782   { " 2x3-bw-thread,",   "mem",  "-p",  "2", "-t",  "3", "-P",  "512", OPT_BW },
1783   { " 4x4-bw-thread,",   "mem",  "-p",  "4", "-t",  "4", "-P",  "512", OPT_BW },
1784   { " 4x6-bw-thread,",   "mem",  "-p",  "4", "-t",  "6", "-P",  "512", OPT_BW },
1785   { " 4x8-bw-thread,",   "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW },
1786   { " 4x8-bw-thread-NOTHP,",
1787                          "mem",  "-p",  "4", "-t",  "8", "-P",  "512", OPT_BW_NOTHP },
1788   { " 3x3-bw-thread,",   "mem",  "-p",  "3", "-t",  "3", "-P",  "512", OPT_BW },
1789   { " 5x5-bw-thread,",   "mem",  "-p",  "5", "-t",  "5", "-P",  "512", OPT_BW },
1790
1791   { "2x16-bw-thread,",   "mem",  "-p",  "2", "-t", "16", "-P",  "512", OPT_BW },
1792   { "1x32-bw-thread,",   "mem",  "-p",  "1", "-t", "32", "-P", "2048", OPT_BW },
1793
1794   { "numa02-bw,",        "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW },
1795   { "numa02-bw-NOTHP,",  "mem",  "-p",  "1", "-t", "32", "-T",   "32", OPT_BW_NOTHP },
1796   { "numa01-bw-thread,", "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW },
1797   { "numa01-bw-thread-NOTHP,",
1798                          "mem",  "-p",  "2", "-t", "16", "-T",  "192", OPT_BW_NOTHP },
1799};
1800
1801static int bench_all(void)
1802{
1803        int nr = ARRAY_SIZE(tests);
1804        int ret;
1805        int i;
1806
1807        ret = system("echo ' #'; echo ' # Running test on: '$(uname -a); echo ' #'");
1808        BUG_ON(ret < 0);
1809
1810        for (i = 0; i < nr; i++) {
1811                run_bench_numa(tests[i][0], tests[i] + 1);
1812        }
1813
1814        printf("\n");
1815
1816        return 0;
1817}
1818
1819int bench_numa(int argc, const char **argv)
1820{
1821        init_params(&p0, "main,", argc, argv);
1822        argc = parse_options(argc, argv, options, bench_numa_usage, 0);
1823        if (argc)
1824                goto err;
1825
1826        if (p0.run_all)
1827                return bench_all();
1828
1829        if (__bench_numa(NULL))
1830                goto err;
1831
1832        return 0;
1833
1834err:
1835        usage_with_options(numa_usage, options);
1836        return -1;
1837}
1838