linux/tools/perf/bench/epoll-wait.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#ifdef HAVE_EVENTFD_SUPPORT
   3/*
   4 * Copyright (C) 2018 Davidlohr Bueso.
   5 *
   6 * This program benchmarks concurrent epoll_wait(2) monitoring multiple
   7 * file descriptors under one or two load balancing models. The first,
   8 * and default, is the single/combined queueing (which refers to a single
   9 * epoll instance for N worker threads):
  10 *
  11 *                          |---> [worker A]
  12 *                          |---> [worker B]
  13 *        [combined queue]  .---> [worker C]
  14 *                          |---> [worker D]
  15 *                          |---> [worker E]
  16 *
  17 * While the second model, enabled via --multiq option, uses multiple
  18 * queueing (which refers to one epoll instance per worker). For example,
  19 * short lived tcp connections in a high throughput httpd server will
  20 * distribute the accept()'ing  connections across CPUs. In this case each
  21 * worker does a limited  amount of processing.
  22 *
  23 *             [queue A]  ---> [worker]
  24 *             [queue B]  ---> [worker]
  25 *             [queue C]  ---> [worker]
  26 *             [queue D]  ---> [worker]
  27 *             [queue E]  ---> [worker]
  28 *
  29 * Naturally, the single queue will enforce more concurrency on the epoll
  30 * instance, and can therefore scale poorly compared to multiple queues.
  31 * However, this is a benchmark raw data and must be taken with a grain of
  32 * salt when choosing how to make use of sys_epoll.
  33
  34 * Each thread has a number of private, nonblocking file descriptors,
  35 * referred to as fdmap. A writer thread will constantly be writing to
  36 * the fdmaps of all threads, minimizing each threads's chances of
  37 * epoll_wait not finding any ready read events and blocking as this
  38 * is not what we want to stress. The size of the fdmap can be adjusted
  39 * by the user; enlarging the value will increase the chances of
  40 * epoll_wait(2) blocking as the lineal writer thread will take "longer",
  41 * at least at a high level.
  42 *
  43 * Note that because fds are private to each thread, this workload does
  44 * not stress scenarios where multiple tasks are awoken per ready IO; ie:
  45 * EPOLLEXCLUSIVE semantics.
  46 *
  47 * The end result/metric is throughput: number of ops/second where an
  48 * operation consists of:
  49 *
  50 *   epoll_wait(2) + [others]
  51 *
  52 *        ... where [others] is the cost of re-adding the fd (EPOLLET),
  53 *            or rearming it (EPOLLONESHOT).
  54 *
  55 *
  56 * The purpose of this is program is that it be useful for measuring
  57 * kernel related changes to the sys_epoll, and not comparing different
  58 * IO polling methods, for example. Hence everything is very adhoc and
  59 * outputs raw microbenchmark numbers. Also this uses eventfd, similar
  60 * tools tend to use pipes or sockets, but the result is the same.
  61 */
  62
  63/* For the CLR_() macros */
  64#include <string.h>
  65#include <pthread.h>
  66#include <unistd.h>
  67
  68#include <errno.h>
  69#include <inttypes.h>
  70#include <signal.h>
  71#include <stdlib.h>
  72#include <linux/compiler.h>
  73#include <linux/kernel.h>
  74#include <sys/time.h>
  75#include <sys/resource.h>
  76#include <sys/epoll.h>
  77#include <sys/eventfd.h>
  78#include <sys/types.h>
  79#include <perf/cpumap.h>
  80
  81#include "../util/stat.h"
  82#include <subcmd/parse-options.h>
  83#include "bench.h"
  84
  85#include <err.h>
  86
  87#define printinfo(fmt, arg...) \
  88        do { if (__verbose) { printf(fmt, ## arg); fflush(stdout); } } while (0)
  89
  90static unsigned int nthreads = 0;
  91static unsigned int nsecs    = 8;
  92static bool wdone, done, __verbose, randomize, nonblocking;
  93
  94/*
  95 * epoll related shared variables.
  96 */
  97
  98/* Maximum number of nesting allowed inside epoll sets */
  99#define EPOLL_MAXNESTS 4
 100
 101static int epollfd;
 102static int *epollfdp;
 103static bool noaffinity;
 104static unsigned int nested = 0;
 105static bool et; /* edge-trigger */
 106static bool oneshot;
 107static bool multiq; /* use an epoll instance per thread */
 108
 109/* amount of fds to monitor, per thread */
 110static unsigned int nfds = 64;
 111
 112static pthread_mutex_t thread_lock;
 113static unsigned int threads_starting;
 114static struct stats throughput_stats;
 115static pthread_cond_t thread_parent, thread_worker;
 116
 117struct worker {
 118        int tid;
 119        int epollfd; /* for --multiq */
 120        pthread_t thread;
 121        unsigned long ops;
 122        int *fdmap;
 123};
 124
 125static const struct option options[] = {
 126        /* general benchmark options */
 127        OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
 128        OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
 129        OPT_UINTEGER('f', "nfds",    &nfds,  "Specify amount of file descriptors to monitor for each thread"),
 130        OPT_BOOLEAN( 'n', "noaffinity",  &noaffinity,   "Disables CPU affinity"),
 131        OPT_BOOLEAN('R', "randomize", &randomize,   "Enable random write behaviour (default is lineal)"),
 132        OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"),
 133
 134        /* epoll specific options */
 135        OPT_BOOLEAN( 'm', "multiq",  &multiq,   "Use multiple epoll instances (one per thread)"),
 136        OPT_BOOLEAN( 'B', "nonblocking", &nonblocking, "Nonblocking epoll_wait(2) behaviour"),
 137        OPT_UINTEGER( 'N', "nested",  &nested,   "Nesting level epoll hierarchy (default is 0, no nesting)"),
 138        OPT_BOOLEAN( 'S', "oneshot",  &oneshot,   "Use EPOLLONESHOT semantics"),
 139        OPT_BOOLEAN( 'E', "edge",  &et,   "Use Edge-triggered interface (default is LT)"),
 140
 141        OPT_END()
 142};
 143
 144static const char * const bench_epoll_wait_usage[] = {
 145        "perf bench epoll wait <options>",
 146        NULL
 147};
 148
 149
 150/*
 151 * Arrange the N elements of ARRAY in random order.
 152 * Only effective if N is much smaller than RAND_MAX;
 153 * if this may not be the case, use a better random
 154 * number generator. -- Ben Pfaff.
 155 */
 156static void shuffle(void *array, size_t n, size_t size)
 157{
 158        char *carray = array;
 159        void *aux;
 160        size_t i;
 161
 162        if (n <= 1)
 163                return;
 164
 165        aux = calloc(1, size);
 166        if (!aux)
 167                err(EXIT_FAILURE, "calloc");
 168
 169        for (i = 1; i < n; ++i) {
 170                size_t j =   i + rand() / (RAND_MAX / (n - i) + 1);
 171                j *= size;
 172
 173                memcpy(aux, &carray[j], size);
 174                memcpy(&carray[j], &carray[i*size], size);
 175                memcpy(&carray[i*size], aux, size);
 176        }
 177
 178        free(aux);
 179}
 180
 181
 182static void *workerfn(void *arg)
 183{
 184        int fd, ret, r;
 185        struct worker *w = (struct worker *) arg;
 186        unsigned long ops = w->ops;
 187        struct epoll_event ev;
 188        uint64_t val;
 189        int to = nonblocking? 0 : -1;
 190        int efd = multiq ? w->epollfd : epollfd;
 191
 192        pthread_mutex_lock(&thread_lock);
 193        threads_starting--;
 194        if (!threads_starting)
 195                pthread_cond_signal(&thread_parent);
 196        pthread_cond_wait(&thread_worker, &thread_lock);
 197        pthread_mutex_unlock(&thread_lock);
 198
 199        do {
 200                /*
 201                 * Block indefinitely waiting for the IN event.
 202                 * In order to stress the epoll_wait(2) syscall,
 203                 * call it event per event, instead of a larger
 204                 * batch (max)limit.
 205                 */
 206                do {
 207                        ret = epoll_wait(efd, &ev, 1, to);
 208                } while (ret < 0 && errno == EINTR);
 209                if (ret < 0)
 210                        err(EXIT_FAILURE, "epoll_wait");
 211
 212                fd = ev.data.fd;
 213
 214                do {
 215                        r = read(fd, &val, sizeof(val));
 216                } while (!done && (r < 0 && errno == EAGAIN));
 217
 218                if (et) {
 219                        ev.events = EPOLLIN | EPOLLET;
 220                        ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev);
 221                }
 222
 223                if (oneshot) {
 224                        /* rearm the file descriptor with a new event mask */
 225                        ev.events |= EPOLLIN | EPOLLONESHOT;
 226                        ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &ev);
 227                }
 228
 229                ops++;
 230        }  while (!done);
 231
 232        if (multiq)
 233                close(w->epollfd);
 234
 235        w->ops = ops;
 236        return NULL;
 237}
 238
 239static void nest_epollfd(struct worker *w)
 240{
 241        unsigned int i;
 242        struct epoll_event ev;
 243        int efd = multiq ? w->epollfd : epollfd;
 244
 245        if (nested > EPOLL_MAXNESTS)
 246                nested = EPOLL_MAXNESTS;
 247
 248        epollfdp = calloc(nested, sizeof(*epollfdp));
 249        if (!epollfdp)
 250                err(EXIT_FAILURE, "calloc");
 251
 252        for (i = 0; i < nested; i++) {
 253                epollfdp[i] = epoll_create(1);
 254                if (epollfdp[i] < 0)
 255                        err(EXIT_FAILURE, "epoll_create");
 256        }
 257
 258        ev.events = EPOLLHUP; /* anything */
 259        ev.data.u64 = i; /* any number */
 260
 261        for (i = nested - 1; i; i--) {
 262                if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD,
 263                              epollfdp[i], &ev) < 0)
 264                        err(EXIT_FAILURE, "epoll_ctl");
 265        }
 266
 267        if (epoll_ctl(efd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0)
 268                err(EXIT_FAILURE, "epoll_ctl");
 269}
 270
 271static void toggle_done(int sig __maybe_unused,
 272                        siginfo_t *info __maybe_unused,
 273                        void *uc __maybe_unused)
 274{
 275        /* inform all threads that we're done for the day */
 276        done = true;
 277        gettimeofday(&bench__end, NULL);
 278        timersub(&bench__end, &bench__start, &bench__runtime);
 279}
 280
 281static void print_summary(void)
 282{
 283        unsigned long avg = avg_stats(&throughput_stats);
 284        double stddev = stddev_stats(&throughput_stats);
 285
 286        printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
 287               avg, rel_stddev_stats(stddev, avg),
 288               (int)bench__runtime.tv_sec);
 289}
 290
 291static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
 292{
 293        pthread_attr_t thread_attr, *attrp = NULL;
 294        cpu_set_t cpuset;
 295        unsigned int i, j;
 296        int ret = 0, events = EPOLLIN;
 297
 298        if (oneshot)
 299                events |= EPOLLONESHOT;
 300        if (et)
 301                events |= EPOLLET;
 302
 303        printinfo("starting worker/consumer %sthreads%s\n",
 304                  noaffinity ?  "":"CPU affinity ",
 305                  nonblocking ? " (nonblocking)":"");
 306        if (!noaffinity)
 307                pthread_attr_init(&thread_attr);
 308
 309        for (i = 0; i < nthreads; i++) {
 310                struct worker *w = &worker[i];
 311
 312                if (multiq) {
 313                        w->epollfd = epoll_create(1);
 314                        if (w->epollfd < 0)
 315                                err(EXIT_FAILURE, "epoll_create");
 316
 317                        if (nested)
 318                                nest_epollfd(w);
 319                }
 320
 321                w->tid = i;
 322                w->fdmap = calloc(nfds, sizeof(int));
 323                if (!w->fdmap)
 324                        return 1;
 325
 326                for (j = 0; j < nfds; j++) {
 327                        int efd = multiq ? w->epollfd : epollfd;
 328                        struct epoll_event ev;
 329
 330                        w->fdmap[j] = eventfd(0, EFD_NONBLOCK);
 331                        if (w->fdmap[j] < 0)
 332                                err(EXIT_FAILURE, "eventfd");
 333
 334                        ev.data.fd = w->fdmap[j];
 335                        ev.events = events;
 336
 337                        ret = epoll_ctl(efd, EPOLL_CTL_ADD,
 338                                        w->fdmap[j], &ev);
 339                        if (ret < 0)
 340                                err(EXIT_FAILURE, "epoll_ctl");
 341                }
 342
 343                if (!noaffinity) {
 344                        CPU_ZERO(&cpuset);
 345                        CPU_SET(cpu->map[i % cpu->nr], &cpuset);
 346
 347                        ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
 348                        if (ret)
 349                                err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
 350
 351                        attrp = &thread_attr;
 352                }
 353
 354                ret = pthread_create(&w->thread, attrp, workerfn,
 355                                     (void *)(struct worker *) w);
 356                if (ret)
 357                        err(EXIT_FAILURE, "pthread_create");
 358        }
 359
 360        if (!noaffinity)
 361                pthread_attr_destroy(&thread_attr);
 362
 363        return ret;
 364}
 365
 366static void *writerfn(void *p)
 367{
 368        struct worker *worker = p;
 369        size_t i, j, iter;
 370        const uint64_t val = 1;
 371        ssize_t sz;
 372        struct timespec ts = { .tv_sec = 0,
 373                               .tv_nsec = 500 };
 374
 375        printinfo("starting writer-thread: doing %s writes ...\n",
 376                  randomize? "random":"lineal");
 377
 378        for (iter = 0; !wdone; iter++) {
 379                if (randomize) {
 380                        shuffle((void *)worker, nthreads, sizeof(*worker));
 381                }
 382
 383                for (i = 0; i < nthreads; i++) {
 384                        struct worker *w = &worker[i];
 385
 386                        if (randomize) {
 387                                shuffle((void *)w->fdmap, nfds, sizeof(int));
 388                        }
 389
 390                        for (j = 0; j < nfds; j++) {
 391                                do {
 392                                        sz = write(w->fdmap[j], &val, sizeof(val));
 393                                } while (!wdone && (sz < 0 && errno == EAGAIN));
 394                        }
 395                }
 396
 397                nanosleep(&ts, NULL);
 398        }
 399
 400        printinfo("exiting writer-thread (total full-loops: %zd)\n", iter);
 401        return NULL;
 402}
 403
 404static int cmpworker(const void *p1, const void *p2)
 405{
 406
 407        struct worker *w1 = (struct worker *) p1;
 408        struct worker *w2 = (struct worker *) p2;
 409        return w1->tid > w2->tid;
 410}
 411
 412int bench_epoll_wait(int argc, const char **argv)
 413{
 414        int ret = 0;
 415        struct sigaction act;
 416        unsigned int i;
 417        struct worker *worker = NULL;
 418        struct perf_cpu_map *cpu;
 419        pthread_t wthread;
 420        struct rlimit rl, prevrl;
 421
 422        argc = parse_options(argc, argv, options, bench_epoll_wait_usage, 0);
 423        if (argc) {
 424                usage_with_options(bench_epoll_wait_usage, options);
 425                exit(EXIT_FAILURE);
 426        }
 427
 428        memset(&act, 0, sizeof(act));
 429        sigfillset(&act.sa_mask);
 430        act.sa_sigaction = toggle_done;
 431        sigaction(SIGINT, &act, NULL);
 432
 433        cpu = perf_cpu_map__new(NULL);
 434        if (!cpu)
 435                goto errmem;
 436
 437        /* a single, main epoll instance */
 438        if (!multiq) {
 439                epollfd = epoll_create(1);
 440                if (epollfd < 0)
 441                        err(EXIT_FAILURE, "epoll_create");
 442
 443                /*
 444                 * Deal with nested epolls, if any.
 445                 */
 446                if (nested)
 447                        nest_epollfd(NULL);
 448        }
 449
 450        printinfo("Using %s queue model\n", multiq ? "multi" : "single");
 451        printinfo("Nesting level(s): %d\n", nested);
 452
 453        /* default to the number of CPUs and leave one for the writer pthread */
 454        if (!nthreads)
 455                nthreads = cpu->nr - 1;
 456
 457        worker = calloc(nthreads, sizeof(*worker));
 458        if (!worker) {
 459                goto errmem;
 460        }
 461
 462        if (getrlimit(RLIMIT_NOFILE, &prevrl))
 463                err(EXIT_FAILURE, "getrlimit");
 464        rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
 465        printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n",
 466                  (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
 467        if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
 468                err(EXIT_FAILURE, "setrlimit");
 469
 470        printf("Run summary [PID %d]: %d threads monitoring%s on "
 471               "%d file-descriptors for %d secs.\n\n",
 472               getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs);
 473
 474        init_stats(&throughput_stats);
 475        pthread_mutex_init(&thread_lock, NULL);
 476        pthread_cond_init(&thread_parent, NULL);
 477        pthread_cond_init(&thread_worker, NULL);
 478
 479        threads_starting = nthreads;
 480
 481        gettimeofday(&bench__start, NULL);
 482
 483        do_threads(worker, cpu);
 484
 485        pthread_mutex_lock(&thread_lock);
 486        while (threads_starting)
 487                pthread_cond_wait(&thread_parent, &thread_lock);
 488        pthread_cond_broadcast(&thread_worker);
 489        pthread_mutex_unlock(&thread_lock);
 490
 491        /*
 492         * At this point the workers should be blocked waiting for read events
 493         * to become ready. Launch the writer which will constantly be writing
 494         * to each thread's fdmap.
 495         */
 496        ret = pthread_create(&wthread, NULL, writerfn,
 497                             (void *)(struct worker *) worker);
 498        if (ret)
 499                err(EXIT_FAILURE, "pthread_create");
 500
 501        sleep(nsecs);
 502        toggle_done(0, NULL, NULL);
 503        printinfo("main thread: toggling done\n");
 504
 505        sleep(1); /* meh */
 506        wdone = true;
 507        ret = pthread_join(wthread, NULL);
 508        if (ret)
 509                err(EXIT_FAILURE, "pthread_join");
 510
 511        /* cleanup & report results */
 512        pthread_cond_destroy(&thread_parent);
 513        pthread_cond_destroy(&thread_worker);
 514        pthread_mutex_destroy(&thread_lock);
 515
 516        /* sort the array back before reporting */
 517        if (randomize)
 518                qsort(worker, nthreads, sizeof(struct worker), cmpworker);
 519
 520        for (i = 0; i < nthreads; i++) {
 521                unsigned long t = bench__runtime.tv_sec > 0 ?
 522                        worker[i].ops / bench__runtime.tv_sec : 0;
 523
 524                update_stats(&throughput_stats, t);
 525
 526                if (nfds == 1)
 527                        printf("[thread %2d] fdmap: %p [ %04ld ops/sec ]\n",
 528                               worker[i].tid, &worker[i].fdmap[0], t);
 529                else
 530                        printf("[thread %2d] fdmap: %p ... %p [ %04ld ops/sec ]\n",
 531                               worker[i].tid, &worker[i].fdmap[0],
 532                               &worker[i].fdmap[nfds-1], t);
 533        }
 534
 535        print_summary();
 536
 537        close(epollfd);
 538        return ret;
 539errmem:
 540        err(EXIT_FAILURE, "calloc");
 541}
 542#endif // HAVE_EVENTFD_SUPPORT
 543