qemu/tests/test-coroutine.c
<<
>>
Prefs
   1/*
   2 * Coroutine tests
   3 *
   4 * Copyright IBM, Corp. 2011
   5 *
   6 * Authors:
   7 *  Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  10 * See the COPYING.LIB file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/coroutine.h"
  16#include "qemu/coroutine_int.h"
  17#include "qemu/lockable.h"
  18
  19/*
  20 * Check that qemu_in_coroutine() works
  21 */
  22
  23static void coroutine_fn verify_in_coroutine(void *opaque)
  24{
  25    g_assert(qemu_in_coroutine());
  26}
  27
  28static void test_in_coroutine(void)
  29{
  30    Coroutine *coroutine;
  31
  32    g_assert(!qemu_in_coroutine());
  33
  34    coroutine = qemu_coroutine_create(verify_in_coroutine, NULL);
  35    qemu_coroutine_enter(coroutine);
  36}
  37
  38/*
  39 * Check that qemu_coroutine_self() works
  40 */
  41
  42static void coroutine_fn verify_self(void *opaque)
  43{
  44    Coroutine **p_co = opaque;
  45    g_assert(qemu_coroutine_self() == *p_co);
  46}
  47
  48static void test_self(void)
  49{
  50    Coroutine *coroutine;
  51
  52    coroutine = qemu_coroutine_create(verify_self, &coroutine);
  53    qemu_coroutine_enter(coroutine);
  54}
  55
  56/*
  57 * Check that qemu_coroutine_entered() works
  58 */
  59
  60static void coroutine_fn verify_entered_step_2(void *opaque)
  61{
  62    Coroutine *caller = (Coroutine *)opaque;
  63
  64    g_assert(qemu_coroutine_entered(caller));
  65    g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
  66    qemu_coroutine_yield();
  67
  68    /* Once more to check it still works after yielding */
  69    g_assert(qemu_coroutine_entered(caller));
  70    g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
  71}
  72
  73static void coroutine_fn verify_entered_step_1(void *opaque)
  74{
  75    Coroutine *self = qemu_coroutine_self();
  76    Coroutine *coroutine;
  77
  78    g_assert(qemu_coroutine_entered(self));
  79
  80    coroutine = qemu_coroutine_create(verify_entered_step_2, self);
  81    g_assert(!qemu_coroutine_entered(coroutine));
  82    qemu_coroutine_enter(coroutine);
  83    g_assert(!qemu_coroutine_entered(coroutine));
  84    qemu_coroutine_enter(coroutine);
  85}
  86
  87static void test_entered(void)
  88{
  89    Coroutine *coroutine;
  90
  91    coroutine = qemu_coroutine_create(verify_entered_step_1, NULL);
  92    g_assert(!qemu_coroutine_entered(coroutine));
  93    qemu_coroutine_enter(coroutine);
  94}
  95
  96/*
  97 * Check that coroutines may nest multiple levels
  98 */
  99
 100typedef struct {
 101    unsigned int n_enter;   /* num coroutines entered */
 102    unsigned int n_return;  /* num coroutines returned */
 103    unsigned int max;       /* maximum level of nesting */
 104} NestData;
 105
 106static void coroutine_fn nest(void *opaque)
 107{
 108    NestData *nd = opaque;
 109
 110    nd->n_enter++;
 111
 112    if (nd->n_enter < nd->max) {
 113        Coroutine *child;
 114
 115        child = qemu_coroutine_create(nest, nd);
 116        qemu_coroutine_enter(child);
 117    }
 118
 119    nd->n_return++;
 120}
 121
 122static void test_nesting(void)
 123{
 124    Coroutine *root;
 125    NestData nd = {
 126        .n_enter  = 0,
 127        .n_return = 0,
 128        .max      = 128,
 129    };
 130
 131    root = qemu_coroutine_create(nest, &nd);
 132    qemu_coroutine_enter(root);
 133
 134    /* Must enter and return from max nesting level */
 135    g_assert_cmpint(nd.n_enter, ==, nd.max);
 136    g_assert_cmpint(nd.n_return, ==, nd.max);
 137}
 138
 139/*
 140 * Check that yield/enter transfer control correctly
 141 */
 142
 143static void coroutine_fn yield_5_times(void *opaque)
 144{
 145    bool *done = opaque;
 146    int i;
 147
 148    for (i = 0; i < 5; i++) {
 149        qemu_coroutine_yield();
 150    }
 151    *done = true;
 152}
 153
 154static void test_yield(void)
 155{
 156    Coroutine *coroutine;
 157    bool done = false;
 158    int i = -1; /* one extra time to return from coroutine */
 159
 160    coroutine = qemu_coroutine_create(yield_5_times, &done);
 161    while (!done) {
 162        qemu_coroutine_enter(coroutine);
 163        i++;
 164    }
 165    g_assert_cmpint(i, ==, 5); /* coroutine must yield 5 times */
 166}
 167
 168static void coroutine_fn c2_fn(void *opaque)
 169{
 170    qemu_coroutine_yield();
 171}
 172
 173static void coroutine_fn c1_fn(void *opaque)
 174{
 175    Coroutine *c2 = opaque;
 176    qemu_coroutine_enter(c2);
 177}
 178
 179static void test_no_dangling_access(void)
 180{
 181    Coroutine *c1;
 182    Coroutine *c2;
 183    Coroutine tmp;
 184
 185    c2 = qemu_coroutine_create(c2_fn, NULL);
 186    c1 = qemu_coroutine_create(c1_fn, c2);
 187
 188    qemu_coroutine_enter(c1);
 189
 190    /* c1 shouldn't be used any more now; make sure we segfault if it is */
 191    tmp = *c1;
 192    memset(c1, 0xff, sizeof(Coroutine));
 193    qemu_coroutine_enter(c2);
 194
 195    /* Must restore the coroutine now to avoid corrupted pool */
 196    *c1 = tmp;
 197}
 198
 199static bool locked;
 200static int done;
 201
 202static void coroutine_fn mutex_fn(void *opaque)
 203{
 204    CoMutex *m = opaque;
 205    qemu_co_mutex_lock(m);
 206    assert(!locked);
 207    locked = true;
 208    qemu_coroutine_yield();
 209    locked = false;
 210    qemu_co_mutex_unlock(m);
 211    done++;
 212}
 213
 214static void coroutine_fn lockable_fn(void *opaque)
 215{
 216    QemuLockable *x = opaque;
 217    qemu_lockable_lock(x);
 218    assert(!locked);
 219    locked = true;
 220    qemu_coroutine_yield();
 221    locked = false;
 222    qemu_lockable_unlock(x);
 223    done++;
 224}
 225
 226static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
 227{
 228    Coroutine *c1 = qemu_coroutine_create(entry, opaque);
 229    Coroutine *c2 = qemu_coroutine_create(entry, opaque);
 230
 231    done = 0;
 232    qemu_coroutine_enter(c1);
 233    g_assert(locked);
 234    qemu_coroutine_enter(c2);
 235
 236    /* Unlock queues c2.  It is then started automatically when c1 yields or
 237     * terminates.
 238     */
 239    qemu_coroutine_enter(c1);
 240    g_assert_cmpint(done, ==, 1);
 241    g_assert(locked);
 242
 243    qemu_coroutine_enter(c2);
 244    g_assert_cmpint(done, ==, 2);
 245    g_assert(!locked);
 246}
 247
 248static void test_co_mutex(void)
 249{
 250    CoMutex m;
 251
 252    qemu_co_mutex_init(&m);
 253    do_test_co_mutex(mutex_fn, &m);
 254}
 255
 256static void test_co_mutex_lockable(void)
 257{
 258    CoMutex m;
 259    CoMutex *null_pointer = NULL;
 260
 261    qemu_co_mutex_init(&m);
 262    do_test_co_mutex(lockable_fn, QEMU_MAKE_LOCKABLE(&m));
 263
 264    g_assert(QEMU_MAKE_LOCKABLE(null_pointer) == NULL);
 265}
 266
 267/*
 268 * Check that creation, enter, and return work
 269 */
 270
 271static void coroutine_fn set_and_exit(void *opaque)
 272{
 273    bool *done = opaque;
 274
 275    *done = true;
 276}
 277
 278static void test_lifecycle(void)
 279{
 280    Coroutine *coroutine;
 281    bool done = false;
 282
 283    /* Create, enter, and return from coroutine */
 284    coroutine = qemu_coroutine_create(set_and_exit, &done);
 285    qemu_coroutine_enter(coroutine);
 286    g_assert(done); /* expect done to be true (first time) */
 287
 288    /* Repeat to check that no state affects this test */
 289    done = false;
 290    coroutine = qemu_coroutine_create(set_and_exit, &done);
 291    qemu_coroutine_enter(coroutine);
 292    g_assert(done); /* expect done to be true (second time) */
 293}
 294
 295
 296#define RECORD_SIZE 10 /* Leave some room for expansion */
 297struct coroutine_position {
 298    int func;
 299    int state;
 300};
 301static struct coroutine_position records[RECORD_SIZE];
 302static unsigned record_pos;
 303
 304static void record_push(int func, int state)
 305{
 306    struct coroutine_position *cp = &records[record_pos++];
 307    g_assert_cmpint(record_pos, <, RECORD_SIZE);
 308    cp->func = func;
 309    cp->state = state;
 310}
 311
 312static void coroutine_fn co_order_test(void *opaque)
 313{
 314    record_push(2, 1);
 315    g_assert(qemu_in_coroutine());
 316    qemu_coroutine_yield();
 317    record_push(2, 2);
 318    g_assert(qemu_in_coroutine());
 319}
 320
 321static void do_order_test(void)
 322{
 323    Coroutine *co;
 324
 325    co = qemu_coroutine_create(co_order_test, NULL);
 326    record_push(1, 1);
 327    qemu_coroutine_enter(co);
 328    record_push(1, 2);
 329    g_assert(!qemu_in_coroutine());
 330    qemu_coroutine_enter(co);
 331    record_push(1, 3);
 332    g_assert(!qemu_in_coroutine());
 333}
 334
 335static void test_order(void)
 336{
 337    int i;
 338    const struct coroutine_position expected_pos[] = {
 339        {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
 340    };
 341    do_order_test();
 342    g_assert_cmpint(record_pos, ==, 5);
 343    for (i = 0; i < record_pos; i++) {
 344        g_assert_cmpint(records[i].func , ==, expected_pos[i].func );
 345        g_assert_cmpint(records[i].state, ==, expected_pos[i].state);
 346    }
 347}
 348/*
 349 * Lifecycle benchmark
 350 */
 351
 352static void coroutine_fn empty_coroutine(void *opaque)
 353{
 354    /* Do nothing */
 355}
 356
 357static void perf_lifecycle(void)
 358{
 359    Coroutine *coroutine;
 360    unsigned int i, max;
 361    double duration;
 362
 363    max = 1000000;
 364
 365    g_test_timer_start();
 366    for (i = 0; i < max; i++) {
 367        coroutine = qemu_coroutine_create(empty_coroutine, NULL);
 368        qemu_coroutine_enter(coroutine);
 369    }
 370    duration = g_test_timer_elapsed();
 371
 372    g_test_message("Lifecycle %u iterations: %f s", max, duration);
 373}
 374
 375static void perf_nesting(void)
 376{
 377    unsigned int i, maxcycles, maxnesting;
 378    double duration;
 379
 380    maxcycles = 10000;
 381    maxnesting = 1000;
 382    Coroutine *root;
 383
 384    g_test_timer_start();
 385    for (i = 0; i < maxcycles; i++) {
 386        NestData nd = {
 387            .n_enter  = 0,
 388            .n_return = 0,
 389            .max      = maxnesting,
 390        };
 391        root = qemu_coroutine_create(nest, &nd);
 392        qemu_coroutine_enter(root);
 393    }
 394    duration = g_test_timer_elapsed();
 395
 396    g_test_message("Nesting %u iterations of %u depth each: %f s",
 397        maxcycles, maxnesting, duration);
 398}
 399
 400/*
 401 * Yield benchmark
 402 */
 403
 404static void coroutine_fn yield_loop(void *opaque)
 405{
 406    unsigned int *counter = opaque;
 407
 408    while ((*counter) > 0) {
 409        (*counter)--;
 410        qemu_coroutine_yield();
 411    }
 412}
 413
 414static void perf_yield(void)
 415{
 416    unsigned int i, maxcycles;
 417    double duration;
 418
 419    maxcycles = 100000000;
 420    i = maxcycles;
 421    Coroutine *coroutine = qemu_coroutine_create(yield_loop, &i);
 422
 423    g_test_timer_start();
 424    while (i > 0) {
 425        qemu_coroutine_enter(coroutine);
 426    }
 427    duration = g_test_timer_elapsed();
 428
 429    g_test_message("Yield %u iterations: %f s", maxcycles, duration);
 430}
 431
 432static __attribute__((noinline)) void dummy(unsigned *i)
 433{
 434    (*i)--;
 435}
 436
 437static void perf_baseline(void)
 438{
 439    unsigned int i, maxcycles;
 440    double duration;
 441
 442    maxcycles = 100000000;
 443    i = maxcycles;
 444
 445    g_test_timer_start();
 446    while (i > 0) {
 447        dummy(&i);
 448    }
 449    duration = g_test_timer_elapsed();
 450
 451    g_test_message("Function call %u iterations: %f s", maxcycles, duration);
 452}
 453
 454static __attribute__((noinline)) void perf_cost_func(void *opaque)
 455{
 456    qemu_coroutine_yield();
 457}
 458
 459static void perf_cost(void)
 460{
 461    const unsigned long maxcycles = 40000000;
 462    unsigned long i = 0;
 463    double duration;
 464    unsigned long ops;
 465    Coroutine *co;
 466
 467    g_test_timer_start();
 468    while (i++ < maxcycles) {
 469        co = qemu_coroutine_create(perf_cost_func, &i);
 470        qemu_coroutine_enter(co);
 471        qemu_coroutine_enter(co);
 472    }
 473    duration = g_test_timer_elapsed();
 474    ops = (long)(maxcycles / (duration * 1000));
 475
 476    g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
 477                   "%luns per coroutine",
 478                   maxcycles,
 479                   duration, ops,
 480                   (unsigned long)(1000000000.0 * duration / maxcycles));
 481}
 482
 483int main(int argc, char **argv)
 484{
 485    g_test_init(&argc, &argv, NULL);
 486
 487    /* This test assumes there is a freelist and marks freed coroutine memory
 488     * with a sentinel value.  If there is no freelist this would legitimately
 489     * crash, so skip it.
 490     */
 491    if (CONFIG_COROUTINE_POOL) {
 492        g_test_add_func("/basic/no-dangling-access", test_no_dangling_access);
 493    }
 494
 495    g_test_add_func("/basic/lifecycle", test_lifecycle);
 496    g_test_add_func("/basic/yield", test_yield);
 497    g_test_add_func("/basic/nesting", test_nesting);
 498    g_test_add_func("/basic/self", test_self);
 499    g_test_add_func("/basic/entered", test_entered);
 500    g_test_add_func("/basic/in_coroutine", test_in_coroutine);
 501    g_test_add_func("/basic/order", test_order);
 502    g_test_add_func("/locking/co-mutex", test_co_mutex);
 503    g_test_add_func("/locking/co-mutex/lockable", test_co_mutex_lockable);
 504    if (g_test_perf()) {
 505        g_test_add_func("/perf/lifecycle", perf_lifecycle);
 506        g_test_add_func("/perf/nesting", perf_nesting);
 507        g_test_add_func("/perf/yield", perf_yield);
 508        g_test_add_func("/perf/function-call", perf_baseline);
 509        g_test_add_func("/perf/cost", perf_cost);
 510    }
 511    return g_test_run();
 512}
 513