linux/tools/perf/bench/epoll-wait.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#ifdef HAVE_EVENTFD
   3/*
   4 * Copyright (C) 2018 Davidlohr Bueso.
   5 *
   6 * This program benchmarks concurrent epoll_wait(2) monitoring multiple
   7 * file descriptors under one or two load balancing models. The first,
   8 * and default, is the single/combined queueing (which refers to a single
   9 * epoll instance for N worker threads):
  10 *
  11 *                          |---> [worker A]
  12 *                          |---> [worker B]
  13 *        [combined queue]  .---> [worker C]
  14 *                          |---> [worker D]
  15 *                          |---> [worker E]
  16 *
  17 * While the second model, enabled via --multiq option, uses multiple
  18 * queueing (which refers to one epoll instance per worker). For example,
  19 * short lived tcp connections in a high throughput httpd server will
  20 * ditribute the accept()'ing  connections across CPUs. In this case each
  21 * worker does a limited  amount of processing.
  22 *
  23 *             [queue A]  ---> [worker]
  24 *             [queue B]  ---> [worker]
  25 *             [queue C]  ---> [worker]
  26 *             [queue D]  ---> [worker]
  27 *             [queue E]  ---> [worker]
  28 *
  29 * Naturally, the single queue will enforce more concurrency on the epoll
  30 * instance, and can therefore scale poorly compared to multiple queues.
  31 * However, this is a benchmark raw data and must be taken with a grain of
  32 * salt when choosing how to make use of sys_epoll.
  33
  34 * Each thread has a number of private, nonblocking file descriptors,
  35 * referred to as fdmap. A writer thread will constantly be writing to
  36 * the fdmaps of all threads, minimizing each threads's chances of
  37 * epoll_wait not finding any ready read events and blocking as this
  38 * is not what we want to stress. The size of the fdmap can be adjusted
  39 * by the user; enlarging the value will increase the chances of
  40 * epoll_wait(2) blocking as the lineal writer thread will take "longer",
  41 * at least at a high level.
  42 *
  43 * Note that because fds are private to each thread, this workload does
  44 * not stress scenarios where multiple tasks are awoken per ready IO; ie:
  45 * EPOLLEXCLUSIVE semantics.
  46 *
  47 * The end result/metric is throughput: number of ops/second where an
  48 * operation consists of:
  49 *
  50 *   epoll_wait(2) + [others]
  51 *
  52 *        ... where [others] is the cost of re-adding the fd (EPOLLET),
  53 *            or rearming it (EPOLLONESHOT).
  54 *
  55 *
  56 * The purpose of this is program is that it be useful for measuring
  57 * kernel related changes to the sys_epoll, and not comparing different
  58 * IO polling methods, for example. Hence everything is very adhoc and
  59 * outputs raw microbenchmark numbers. Also this uses eventfd, similar
  60 * tools tend to use pipes or sockets, but the result is the same.
  61 */
  62
  63/* For the CLR_() macros */
  64#include <string.h>
  65#include <pthread.h>
  66#include <unistd.h>
  67
  68#include <errno.h>
  69#include <inttypes.h>
  70#include <signal.h>
  71#include <stdlib.h>
  72#include <linux/compiler.h>
  73#include <linux/kernel.h>
  74#include <sys/time.h>
  75#include <sys/resource.h>
  76#include <sys/epoll.h>
  77#include <sys/eventfd.h>
  78#include <sys/types.h>
  79#include <internal/cpumap.h>
  80#include <perf/cpumap.h>
  81
  82#include "../util/stat.h"
  83#include <subcmd/parse-options.h>
  84#include "bench.h"
  85
  86#include <err.h>
  87
  88#define printinfo(fmt, arg...) \
  89        do { if (__verbose) { printf(fmt, ## arg); fflush(stdout); } } while (0)
  90
  91static unsigned int nthreads = 0;
  92static unsigned int nsecs    = 8;
  93struct timeval start, end, runtime;
  94static bool wdone, done, __verbose, randomize, nonblocking;
  95
  96/*
  97 * epoll related shared variables.
  98 */
  99
 100/* Maximum number of nesting allowed inside epoll sets */
 101#define EPOLL_MAXNESTS 4
 102
 103static int epollfd;
 104static int *epollfdp;
 105static bool noaffinity;
 106static unsigned int nested = 0;
 107static bool et; /* edge-trigger */
 108static bool oneshot;
 109static bool multiq; /* use an epoll instance per thread */
 110
 111/* amount of fds to monitor, per thread */
 112static unsigned int nfds = 64;
 113
 114static pthread_mutex_t thread_lock;
 115static unsigned int threads_starting;
 116static struct stats throughput_stats;
 117static pthread_cond_t thread_parent, thread_worker;
 118
 119struct worker {
 120        int tid;
 121        int epollfd; /* for --multiq */
 122        pthread_t thread;
 123        unsigned long ops;
 124        int *fdmap;
 125};
 126
 127static const struct option options[] = {
 128        /* general benchmark options */
 129        OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
 130        OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
 131        OPT_UINTEGER('f', "nfds",    &nfds,  "Specify amount of file descriptors to monitor for each thread"),
 132        OPT_BOOLEAN( 'n', "noaffinity",  &noaffinity,   "Disables CPU affinity"),
 133        OPT_BOOLEAN('R', "randomize", &randomize,   "Enable random write behaviour (default is lineal)"),
 134        OPT_BOOLEAN( 'v', "verbose", &__verbose, "Verbose mode"),
 135
 136        /* epoll specific options */
 137        OPT_BOOLEAN( 'm', "multiq",  &multiq,   "Use multiple epoll instances (one per thread)"),
 138        OPT_BOOLEAN( 'B', "nonblocking", &nonblocking, "Nonblocking epoll_wait(2) behaviour"),
 139        OPT_UINTEGER( 'N', "nested",  &nested,   "Nesting level epoll hierarchy (default is 0, no nesting)"),
 140        OPT_BOOLEAN( 'S', "oneshot",  &oneshot,   "Use EPOLLONESHOT semantics"),
 141        OPT_BOOLEAN( 'E', "edge",  &et,   "Use Edge-triggered interface (default is LT)"),
 142
 143        OPT_END()
 144};
 145
 146static const char * const bench_epoll_wait_usage[] = {
 147        "perf bench epoll wait <options>",
 148        NULL
 149};
 150
 151
 152/*
 153 * Arrange the N elements of ARRAY in random order.
 154 * Only effective if N is much smaller than RAND_MAX;
 155 * if this may not be the case, use a better random
 156 * number generator. -- Ben Pfaff.
 157 */
 158static void shuffle(void *array, size_t n, size_t size)
 159{
 160        char *carray = array;
 161        void *aux;
 162        size_t i;
 163
 164        if (n <= 1)
 165                return;
 166
 167        aux = calloc(1, size);
 168        if (!aux)
 169                err(EXIT_FAILURE, "calloc");
 170
 171        for (i = 1; i < n; ++i) {
 172                size_t j =   i + rand() / (RAND_MAX / (n - i) + 1);
 173                j *= size;
 174
 175                memcpy(aux, &carray[j], size);
 176                memcpy(&carray[j], &carray[i*size], size);
 177                memcpy(&carray[i*size], aux, size);
 178        }
 179
 180        free(aux);
 181}
 182
 183
 184static void *workerfn(void *arg)
 185{
 186        int fd, ret, r;
 187        struct worker *w = (struct worker *) arg;
 188        unsigned long ops = w->ops;
 189        struct epoll_event ev;
 190        uint64_t val;
 191        int to = nonblocking? 0 : -1;
 192        int efd = multiq ? w->epollfd : epollfd;
 193
 194        pthread_mutex_lock(&thread_lock);
 195        threads_starting--;
 196        if (!threads_starting)
 197                pthread_cond_signal(&thread_parent);
 198        pthread_cond_wait(&thread_worker, &thread_lock);
 199        pthread_mutex_unlock(&thread_lock);
 200
 201        do {
 202                /*
 203                 * Block undefinitely waiting for the IN event.
 204                 * In order to stress the epoll_wait(2) syscall,
 205                 * call it event per event, instead of a larger
 206                 * batch (max)limit.
 207                 */
 208                do {
 209                        ret = epoll_wait(efd, &ev, 1, to);
 210                } while (ret < 0 && errno == EINTR);
 211                if (ret < 0)
 212                        err(EXIT_FAILURE, "epoll_wait");
 213
 214                fd = ev.data.fd;
 215
 216                do {
 217                        r = read(fd, &val, sizeof(val));
 218                } while (!done && (r < 0 && errno == EAGAIN));
 219
 220                if (et) {
 221                        ev.events = EPOLLIN | EPOLLET;
 222                        ret = epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev);
 223                }
 224
 225                if (oneshot) {
 226                        /* rearm the file descriptor with a new event mask */
 227                        ev.events |= EPOLLIN | EPOLLONESHOT;
 228                        ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &ev);
 229                }
 230
 231                ops++;
 232        }  while (!done);
 233
 234        if (multiq)
 235                close(w->epollfd);
 236
 237        w->ops = ops;
 238        return NULL;
 239}
 240
 241static void nest_epollfd(struct worker *w)
 242{
 243        unsigned int i;
 244        struct epoll_event ev;
 245        int efd = multiq ? w->epollfd : epollfd;
 246
 247        if (nested > EPOLL_MAXNESTS)
 248                nested = EPOLL_MAXNESTS;
 249
 250        epollfdp = calloc(nested, sizeof(*epollfdp));
 251        if (!epollfdp)
 252                err(EXIT_FAILURE, "calloc");
 253
 254        for (i = 0; i < nested; i++) {
 255                epollfdp[i] = epoll_create(1);
 256                if (epollfdp[i] < 0)
 257                        err(EXIT_FAILURE, "epoll_create");
 258        }
 259
 260        ev.events = EPOLLHUP; /* anything */
 261        ev.data.u64 = i; /* any number */
 262
 263        for (i = nested - 1; i; i--) {
 264                if (epoll_ctl(epollfdp[i - 1], EPOLL_CTL_ADD,
 265                              epollfdp[i], &ev) < 0)
 266                        err(EXIT_FAILURE, "epoll_ctl");
 267        }
 268
 269        if (epoll_ctl(efd, EPOLL_CTL_ADD, *epollfdp, &ev) < 0)
 270                err(EXIT_FAILURE, "epoll_ctl");
 271}
 272
 273static void toggle_done(int sig __maybe_unused,
 274                        siginfo_t *info __maybe_unused,
 275                        void *uc __maybe_unused)
 276{
 277        /* inform all threads that we're done for the day */
 278        done = true;
 279        gettimeofday(&end, NULL);
 280        timersub(&end, &start, &runtime);
 281}
 282
 283static void print_summary(void)
 284{
 285        unsigned long avg = avg_stats(&throughput_stats);
 286        double stddev = stddev_stats(&throughput_stats);
 287
 288        printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
 289               avg, rel_stddev_stats(stddev, avg),
 290               (int) runtime.tv_sec);
 291}
 292
 293static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
 294{
 295        pthread_attr_t thread_attr, *attrp = NULL;
 296        cpu_set_t cpuset;
 297        unsigned int i, j;
 298        int ret = 0, events = EPOLLIN;
 299
 300        if (oneshot)
 301                events |= EPOLLONESHOT;
 302        if (et)
 303                events |= EPOLLET;
 304
 305        printinfo("starting worker/consumer %sthreads%s\n",
 306                  noaffinity ?  "":"CPU affinity ",
 307                  nonblocking ? " (nonblocking)":"");
 308        if (!noaffinity)
 309                pthread_attr_init(&thread_attr);
 310
 311        for (i = 0; i < nthreads; i++) {
 312                struct worker *w = &worker[i];
 313
 314                if (multiq) {
 315                        w->epollfd = epoll_create(1);
 316                        if (w->epollfd < 0)
 317                                err(EXIT_FAILURE, "epoll_create");
 318
 319                        if (nested)
 320                                nest_epollfd(w);
 321                }
 322
 323                w->tid = i;
 324                w->fdmap = calloc(nfds, sizeof(int));
 325                if (!w->fdmap)
 326                        return 1;
 327
 328                for (j = 0; j < nfds; j++) {
 329                        int efd = multiq ? w->epollfd : epollfd;
 330                        struct epoll_event ev;
 331
 332                        w->fdmap[j] = eventfd(0, EFD_NONBLOCK);
 333                        if (w->fdmap[j] < 0)
 334                                err(EXIT_FAILURE, "eventfd");
 335
 336                        ev.data.fd = w->fdmap[j];
 337                        ev.events = events;
 338
 339                        ret = epoll_ctl(efd, EPOLL_CTL_ADD,
 340                                        w->fdmap[j], &ev);
 341                        if (ret < 0)
 342                                err(EXIT_FAILURE, "epoll_ctl");
 343                }
 344
 345                if (!noaffinity) {
 346                        CPU_ZERO(&cpuset);
 347                        CPU_SET(cpu->map[i % cpu->nr], &cpuset);
 348
 349                        ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
 350                        if (ret)
 351                                err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
 352
 353                        attrp = &thread_attr;
 354                }
 355
 356                ret = pthread_create(&w->thread, attrp, workerfn,
 357                                     (void *)(struct worker *) w);
 358                if (ret)
 359                        err(EXIT_FAILURE, "pthread_create");
 360        }
 361
 362        if (!noaffinity)
 363                pthread_attr_destroy(&thread_attr);
 364
 365        return ret;
 366}
 367
 368static void *writerfn(void *p)
 369{
 370        struct worker *worker = p;
 371        size_t i, j, iter;
 372        const uint64_t val = 1;
 373        ssize_t sz;
 374        struct timespec ts = { .tv_sec = 0,
 375                               .tv_nsec = 500 };
 376
 377        printinfo("starting writer-thread: doing %s writes ...\n",
 378                  randomize? "random":"lineal");
 379
 380        for (iter = 0; !wdone; iter++) {
 381                if (randomize) {
 382                        shuffle((void *)worker, nthreads, sizeof(*worker));
 383                }
 384
 385                for (i = 0; i < nthreads; i++) {
 386                        struct worker *w = &worker[i];
 387
 388                        if (randomize) {
 389                                shuffle((void *)w->fdmap, nfds, sizeof(int));
 390                        }
 391
 392                        for (j = 0; j < nfds; j++) {
 393                                do {
 394                                        sz = write(w->fdmap[j], &val, sizeof(val));
 395                                } while (!wdone && (sz < 0 && errno == EAGAIN));
 396                        }
 397                }
 398
 399                nanosleep(&ts, NULL);
 400        }
 401
 402        printinfo("exiting writer-thread (total full-loops: %zd)\n", iter);
 403        return NULL;
 404}
 405
 406static int cmpworker(const void *p1, const void *p2)
 407{
 408
 409        struct worker *w1 = (struct worker *) p1;
 410        struct worker *w2 = (struct worker *) p2;
 411        return w1->tid > w2->tid;
 412}
 413
 414int bench_epoll_wait(int argc, const char **argv)
 415{
 416        int ret = 0;
 417        struct sigaction act;
 418        unsigned int i;
 419        struct worker *worker = NULL;
 420        struct perf_cpu_map *cpu;
 421        pthread_t wthread;
 422        struct rlimit rl, prevrl;
 423
 424        argc = parse_options(argc, argv, options, bench_epoll_wait_usage, 0);
 425        if (argc) {
 426                usage_with_options(bench_epoll_wait_usage, options);
 427                exit(EXIT_FAILURE);
 428        }
 429
 430        sigfillset(&act.sa_mask);
 431        act.sa_sigaction = toggle_done;
 432        sigaction(SIGINT, &act, NULL);
 433
 434        cpu = perf_cpu_map__new(NULL);
 435        if (!cpu)
 436                goto errmem;
 437
 438        /* a single, main epoll instance */
 439        if (!multiq) {
 440                epollfd = epoll_create(1);
 441                if (epollfd < 0)
 442                        err(EXIT_FAILURE, "epoll_create");
 443
 444                /*
 445                 * Deal with nested epolls, if any.
 446                 */
 447                if (nested)
 448                        nest_epollfd(NULL);
 449        }
 450
 451        printinfo("Using %s queue model\n", multiq ? "multi" : "single");
 452        printinfo("Nesting level(s): %d\n", nested);
 453
 454        /* default to the number of CPUs and leave one for the writer pthread */
 455        if (!nthreads)
 456                nthreads = cpu->nr - 1;
 457
 458        worker = calloc(nthreads, sizeof(*worker));
 459        if (!worker) {
 460                goto errmem;
 461        }
 462
 463        if (getrlimit(RLIMIT_NOFILE, &prevrl))
 464                err(EXIT_FAILURE, "getrlimit");
 465        rl.rlim_cur = rl.rlim_max = nfds * nthreads * 2 + 50;
 466        printinfo("Setting RLIMIT_NOFILE rlimit from %" PRIu64 " to: %" PRIu64 "\n",
 467                  (uint64_t)prevrl.rlim_max, (uint64_t)rl.rlim_max);
 468        if (setrlimit(RLIMIT_NOFILE, &rl) < 0)
 469                err(EXIT_FAILURE, "setrlimit");
 470
 471        printf("Run summary [PID %d]: %d threads monitoring%s on "
 472               "%d file-descriptors for %d secs.\n\n",
 473               getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs);
 474
 475        init_stats(&throughput_stats);
 476        pthread_mutex_init(&thread_lock, NULL);
 477        pthread_cond_init(&thread_parent, NULL);
 478        pthread_cond_init(&thread_worker, NULL);
 479
 480        threads_starting = nthreads;
 481
 482        gettimeofday(&start, NULL);
 483
 484        do_threads(worker, cpu);
 485
 486        pthread_mutex_lock(&thread_lock);
 487        while (threads_starting)
 488                pthread_cond_wait(&thread_parent, &thread_lock);
 489        pthread_cond_broadcast(&thread_worker);
 490        pthread_mutex_unlock(&thread_lock);
 491
 492        /*
 493         * At this point the workers should be blocked waiting for read events
 494         * to become ready. Launch the writer which will constantly be writing
 495         * to each thread's fdmap.
 496         */
 497        ret = pthread_create(&wthread, NULL, writerfn,
 498                             (void *)(struct worker *) worker);
 499        if (ret)
 500                err(EXIT_FAILURE, "pthread_create");
 501
 502        sleep(nsecs);
 503        toggle_done(0, NULL, NULL);
 504        printinfo("main thread: toggling done\n");
 505
 506        sleep(1); /* meh */
 507        wdone = true;
 508        ret = pthread_join(wthread, NULL);
 509        if (ret)
 510                err(EXIT_FAILURE, "pthread_join");
 511
 512        /* cleanup & report results */
 513        pthread_cond_destroy(&thread_parent);
 514        pthread_cond_destroy(&thread_worker);
 515        pthread_mutex_destroy(&thread_lock);
 516
 517        /* sort the array back before reporting */
 518        if (randomize)
 519                qsort(worker, nthreads, sizeof(struct worker), cmpworker);
 520
 521        for (i = 0; i < nthreads; i++) {
 522                unsigned long t = worker[i].ops/runtime.tv_sec;
 523
 524                update_stats(&throughput_stats, t);
 525
 526                if (nfds == 1)
 527                        printf("[thread %2d] fdmap: %p [ %04ld ops/sec ]\n",
 528                               worker[i].tid, &worker[i].fdmap[0], t);
 529                else
 530                        printf("[thread %2d] fdmap: %p ... %p [ %04ld ops/sec ]\n",
 531                               worker[i].tid, &worker[i].fdmap[0],
 532                               &worker[i].fdmap[nfds-1], t);
 533        }
 534
 535        print_summary();
 536
 537        close(epollfd);
 538        return ret;
 539errmem:
 540        err(EXIT_FAILURE, "calloc");
 541}
 542#endif // HAVE_EVENTFD
 543