qemu/tests/unit/test-coroutine.c
<<
>>
Prefs
   1/*
   2 * Coroutine tests
   3 *
   4 * Copyright IBM, Corp. 2011
   5 *
   6 * Authors:
   7 *  Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  10 * See the COPYING.LIB file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/coroutine_int.h"
  16
  17/*
  18 * Check that qemu_in_coroutine() works
  19 */
  20
  21static void coroutine_fn verify_in_coroutine(void *opaque)
  22{
  23    g_assert(qemu_in_coroutine());
  24}
  25
  26static void test_in_coroutine(void)
  27{
  28    Coroutine *coroutine;
  29
  30    g_assert(!qemu_in_coroutine());
  31
  32    coroutine = qemu_coroutine_create(verify_in_coroutine, NULL);
  33    qemu_coroutine_enter(coroutine);
  34}
  35
  36/*
  37 * Check that qemu_coroutine_self() works
  38 */
  39
  40static void coroutine_fn verify_self(void *opaque)
  41{
  42    Coroutine **p_co = opaque;
  43    g_assert(qemu_coroutine_self() == *p_co);
  44}
  45
  46static void test_self(void)
  47{
  48    Coroutine *coroutine;
  49
  50    coroutine = qemu_coroutine_create(verify_self, &coroutine);
  51    qemu_coroutine_enter(coroutine);
  52}
  53
  54/*
  55 * Check that qemu_coroutine_entered() works
  56 */
  57
  58static void coroutine_fn verify_entered_step_2(void *opaque)
  59{
  60    Coroutine *caller = (Coroutine *)opaque;
  61
  62    g_assert(qemu_coroutine_entered(caller));
  63    g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
  64    qemu_coroutine_yield();
  65
  66    /* Once more to check it still works after yielding */
  67    g_assert(qemu_coroutine_entered(caller));
  68    g_assert(qemu_coroutine_entered(qemu_coroutine_self()));
  69}
  70
  71static void coroutine_fn verify_entered_step_1(void *opaque)
  72{
  73    Coroutine *self = qemu_coroutine_self();
  74    Coroutine *coroutine;
  75
  76    g_assert(qemu_coroutine_entered(self));
  77
  78    coroutine = qemu_coroutine_create(verify_entered_step_2, self);
  79    g_assert(!qemu_coroutine_entered(coroutine));
  80    qemu_coroutine_enter(coroutine);
  81    g_assert(!qemu_coroutine_entered(coroutine));
  82    qemu_coroutine_enter(coroutine);
  83}
  84
  85static void test_entered(void)
  86{
  87    Coroutine *coroutine;
  88
  89    coroutine = qemu_coroutine_create(verify_entered_step_1, NULL);
  90    g_assert(!qemu_coroutine_entered(coroutine));
  91    qemu_coroutine_enter(coroutine);
  92}
  93
  94/*
  95 * Check that coroutines may nest multiple levels
  96 */
  97
  98typedef struct {
  99    unsigned int n_enter;   /* num coroutines entered */
 100    unsigned int n_return;  /* num coroutines returned */
 101    unsigned int max;       /* maximum level of nesting */
 102} NestData;
 103
 104static void coroutine_fn nest(void *opaque)
 105{
 106    NestData *nd = opaque;
 107
 108    nd->n_enter++;
 109
 110    if (nd->n_enter < nd->max) {
 111        Coroutine *child;
 112
 113        child = qemu_coroutine_create(nest, nd);
 114        qemu_coroutine_enter(child);
 115    }
 116
 117    nd->n_return++;
 118}
 119
 120static void test_nesting(void)
 121{
 122    Coroutine *root;
 123    NestData nd = {
 124        .n_enter  = 0,
 125        .n_return = 0,
 126        .max      = 128,
 127    };
 128
 129    root = qemu_coroutine_create(nest, &nd);
 130    qemu_coroutine_enter(root);
 131
 132    /* Must enter and return from max nesting level */
 133    g_assert_cmpint(nd.n_enter, ==, nd.max);
 134    g_assert_cmpint(nd.n_return, ==, nd.max);
 135}
 136
 137/*
 138 * Check that yield/enter transfer control correctly
 139 */
 140
 141static void coroutine_fn yield_5_times(void *opaque)
 142{
 143    bool *done = opaque;
 144    int i;
 145
 146    for (i = 0; i < 5; i++) {
 147        qemu_coroutine_yield();
 148    }
 149    *done = true;
 150}
 151
 152static void test_yield(void)
 153{
 154    Coroutine *coroutine;
 155    bool done = false;
 156    int i = -1; /* one extra time to return from coroutine */
 157
 158    coroutine = qemu_coroutine_create(yield_5_times, &done);
 159    while (!done) {
 160        qemu_coroutine_enter(coroutine);
 161        i++;
 162    }
 163    g_assert_cmpint(i, ==, 5); /* coroutine must yield 5 times */
 164}
 165
 166static void coroutine_fn c2_fn(void *opaque)
 167{
 168    qemu_coroutine_yield();
 169}
 170
 171static void coroutine_fn c1_fn(void *opaque)
 172{
 173    Coroutine *c2 = opaque;
 174    qemu_coroutine_enter(c2);
 175}
 176
 177static void test_no_dangling_access(void)
 178{
 179    Coroutine *c1;
 180    Coroutine *c2;
 181    Coroutine tmp;
 182
 183    c2 = qemu_coroutine_create(c2_fn, NULL);
 184    c1 = qemu_coroutine_create(c1_fn, c2);
 185
 186    qemu_coroutine_enter(c1);
 187
 188    /* c1 shouldn't be used any more now; make sure we segfault if it is */
 189    tmp = *c1;
 190    memset(c1, 0xff, sizeof(Coroutine));
 191    qemu_coroutine_enter(c2);
 192
 193    /* Must restore the coroutine now to avoid corrupted pool */
 194    *c1 = tmp;
 195}
 196
 197static bool locked;
 198static int done;
 199
 200static void coroutine_fn mutex_fn(void *opaque)
 201{
 202    CoMutex *m = opaque;
 203    qemu_co_mutex_lock(m);
 204    assert(!locked);
 205    locked = true;
 206    qemu_coroutine_yield();
 207    locked = false;
 208    qemu_co_mutex_unlock(m);
 209    done++;
 210}
 211
 212static void coroutine_fn lockable_fn(void *opaque)
 213{
 214    QemuLockable *x = opaque;
 215    qemu_lockable_lock(x);
 216    assert(!locked);
 217    locked = true;
 218    qemu_coroutine_yield();
 219    locked = false;
 220    qemu_lockable_unlock(x);
 221    done++;
 222}
 223
 224static void do_test_co_mutex(CoroutineEntry *entry, void *opaque)
 225{
 226    Coroutine *c1 = qemu_coroutine_create(entry, opaque);
 227    Coroutine *c2 = qemu_coroutine_create(entry, opaque);
 228
 229    done = 0;
 230    qemu_coroutine_enter(c1);
 231    g_assert(locked);
 232    qemu_coroutine_enter(c2);
 233
 234    /* Unlock queues c2.  It is then started automatically when c1 yields or
 235     * terminates.
 236     */
 237    qemu_coroutine_enter(c1);
 238    g_assert_cmpint(done, ==, 1);
 239    g_assert(locked);
 240
 241    qemu_coroutine_enter(c2);
 242    g_assert_cmpint(done, ==, 2);
 243    g_assert(!locked);
 244}
 245
 246static void test_co_mutex(void)
 247{
 248    CoMutex m;
 249
 250    qemu_co_mutex_init(&m);
 251    do_test_co_mutex(mutex_fn, &m);
 252}
 253
 254static void test_co_mutex_lockable(void)
 255{
 256    CoMutex m;
 257    CoMutex *null_pointer = NULL;
 258
 259    qemu_co_mutex_init(&m);
 260    do_test_co_mutex(lockable_fn, QEMU_MAKE_LOCKABLE(&m));
 261
 262    g_assert(QEMU_MAKE_LOCKABLE(null_pointer) == NULL);
 263}
 264
 265static CoRwlock rwlock;
 266
 267/* Test that readers are properly sent back to the queue when upgrading,
 268 * even if they are the sole readers.  The test scenario is as follows:
 269 *
 270 *
 271 * | c1           | c2         |
 272 * |--------------+------------+
 273 * | rdlock       |            |
 274 * | yield        |            |
 275 * |              | wrlock     |
 276 * |              | <queued>   |
 277 * | upgrade      |            |
 278 * | <queued>     | <dequeued> |
 279 * |              | unlock     |
 280 * | <dequeued>   |            |
 281 * | unlock       |            |
 282 */
 283
 284static void coroutine_fn rwlock_yield_upgrade(void *opaque)
 285{
 286    qemu_co_rwlock_rdlock(&rwlock);
 287    qemu_coroutine_yield();
 288
 289    qemu_co_rwlock_upgrade(&rwlock);
 290    qemu_co_rwlock_unlock(&rwlock);
 291
 292    *(bool *)opaque = true;
 293}
 294
 295static void coroutine_fn rwlock_wrlock_yield(void *opaque)
 296{
 297    qemu_co_rwlock_wrlock(&rwlock);
 298    qemu_coroutine_yield();
 299
 300    qemu_co_rwlock_unlock(&rwlock);
 301    *(bool *)opaque = true;
 302}
 303
 304static void test_co_rwlock_upgrade(void)
 305{
 306    bool c1_done = false;
 307    bool c2_done = false;
 308    Coroutine *c1, *c2;
 309
 310    qemu_co_rwlock_init(&rwlock);
 311    c1 = qemu_coroutine_create(rwlock_yield_upgrade, &c1_done);
 312    c2 = qemu_coroutine_create(rwlock_wrlock_yield, &c2_done);
 313
 314    qemu_coroutine_enter(c1);
 315    qemu_coroutine_enter(c2);
 316
 317    /* c1 now should go to sleep.  */
 318    qemu_coroutine_enter(c1);
 319    g_assert(!c1_done);
 320
 321    qemu_coroutine_enter(c2);
 322    g_assert(c1_done);
 323    g_assert(c2_done);
 324}
 325
 326static void coroutine_fn rwlock_rdlock_yield(void *opaque)
 327{
 328    qemu_co_rwlock_rdlock(&rwlock);
 329    qemu_coroutine_yield();
 330
 331    qemu_co_rwlock_unlock(&rwlock);
 332    qemu_coroutine_yield();
 333
 334    *(bool *)opaque = true;
 335}
 336
 337static void coroutine_fn rwlock_wrlock_downgrade(void *opaque)
 338{
 339    qemu_co_rwlock_wrlock(&rwlock);
 340
 341    qemu_co_rwlock_downgrade(&rwlock);
 342    qemu_co_rwlock_unlock(&rwlock);
 343    *(bool *)opaque = true;
 344}
 345
 346static void coroutine_fn rwlock_rdlock(void *opaque)
 347{
 348    qemu_co_rwlock_rdlock(&rwlock);
 349
 350    qemu_co_rwlock_unlock(&rwlock);
 351    *(bool *)opaque = true;
 352}
 353
 354static void coroutine_fn rwlock_wrlock(void *opaque)
 355{
 356    qemu_co_rwlock_wrlock(&rwlock);
 357
 358    qemu_co_rwlock_unlock(&rwlock);
 359    *(bool *)opaque = true;
 360}
 361
 362/*
 363 * Check that downgrading a reader-writer lock does not cause a hang.
 364 *
 365 * Four coroutines are used to produce a situation where there are
 366 * both reader and writer hopefuls waiting to acquire an rwlock that
 367 * is held by a reader.
 368 *
 369 * The correct sequence of operations we aim to provoke can be
 370 * represented as:
 371 *
 372 * | c1     | c2         | c3         | c4         |
 373 * |--------+------------+------------+------------|
 374 * | rdlock |            |            |            |
 375 * | yield  |            |            |            |
 376 * |        | wrlock     |            |            |
 377 * |        | <queued>   |            |            |
 378 * |        |            | rdlock     |            |
 379 * |        |            | <queued>   |            |
 380 * |        |            |            | wrlock     |
 381 * |        |            |            | <queued>   |
 382 * | unlock |            |            |            |
 383 * | yield  |            |            |            |
 384 * |        | <dequeued> |            |            |
 385 * |        | downgrade  |            |            |
 386 * |        |            | <dequeued> |            |
 387 * |        |            | unlock     |            |
 388 * |        | ...        |            |            |
 389 * |        | unlock     |            |            |
 390 * |        |            |            | <dequeued> |
 391 * |        |            |            | unlock     |
 392 */
 393static void test_co_rwlock_downgrade(void)
 394{
 395    bool c1_done = false;
 396    bool c2_done = false;
 397    bool c3_done = false;
 398    bool c4_done = false;
 399    Coroutine *c1, *c2, *c3, *c4;
 400
 401    qemu_co_rwlock_init(&rwlock);
 402
 403    c1 = qemu_coroutine_create(rwlock_rdlock_yield, &c1_done);
 404    c2 = qemu_coroutine_create(rwlock_wrlock_downgrade, &c2_done);
 405    c3 = qemu_coroutine_create(rwlock_rdlock, &c3_done);
 406    c4 = qemu_coroutine_create(rwlock_wrlock, &c4_done);
 407
 408    qemu_coroutine_enter(c1);
 409    qemu_coroutine_enter(c2);
 410    qemu_coroutine_enter(c3);
 411    qemu_coroutine_enter(c4);
 412
 413    qemu_coroutine_enter(c1);
 414
 415    g_assert(c2_done);
 416    g_assert(c3_done);
 417    g_assert(c4_done);
 418
 419    qemu_coroutine_enter(c1);
 420
 421    g_assert(c1_done);
 422}
 423
 424/*
 425 * Check that creation, enter, and return work
 426 */
 427
 428static void coroutine_fn set_and_exit(void *opaque)
 429{
 430    bool *done = opaque;
 431
 432    *done = true;
 433}
 434
 435static void test_lifecycle(void)
 436{
 437    Coroutine *coroutine;
 438    bool done = false;
 439
 440    /* Create, enter, and return from coroutine */
 441    coroutine = qemu_coroutine_create(set_and_exit, &done);
 442    qemu_coroutine_enter(coroutine);
 443    g_assert(done); /* expect done to be true (first time) */
 444
 445    /* Repeat to check that no state affects this test */
 446    done = false;
 447    coroutine = qemu_coroutine_create(set_and_exit, &done);
 448    qemu_coroutine_enter(coroutine);
 449    g_assert(done); /* expect done to be true (second time) */
 450}
 451
 452
 453#define RECORD_SIZE 10 /* Leave some room for expansion */
 454struct coroutine_position {
 455    int func;
 456    int state;
 457};
 458static struct coroutine_position records[RECORD_SIZE];
 459static unsigned record_pos;
 460
 461static void record_push(int func, int state)
 462{
 463    struct coroutine_position *cp = &records[record_pos++];
 464    g_assert_cmpint(record_pos, <, RECORD_SIZE);
 465    cp->func = func;
 466    cp->state = state;
 467}
 468
 469static void coroutine_fn co_order_test(void *opaque)
 470{
 471    record_push(2, 1);
 472    g_assert(qemu_in_coroutine());
 473    qemu_coroutine_yield();
 474    record_push(2, 2);
 475    g_assert(qemu_in_coroutine());
 476}
 477
 478static void do_order_test(void)
 479{
 480    Coroutine *co;
 481
 482    co = qemu_coroutine_create(co_order_test, NULL);
 483    record_push(1, 1);
 484    qemu_coroutine_enter(co);
 485    record_push(1, 2);
 486    g_assert(!qemu_in_coroutine());
 487    qemu_coroutine_enter(co);
 488    record_push(1, 3);
 489    g_assert(!qemu_in_coroutine());
 490}
 491
 492static void test_order(void)
 493{
 494    int i;
 495    const struct coroutine_position expected_pos[] = {
 496        {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3}
 497    };
 498    do_order_test();
 499    g_assert_cmpint(record_pos, ==, 5);
 500    for (i = 0; i < record_pos; i++) {
 501        g_assert_cmpint(records[i].func , ==, expected_pos[i].func );
 502        g_assert_cmpint(records[i].state, ==, expected_pos[i].state);
 503    }
 504}
 505/*
 506 * Lifecycle benchmark
 507 */
 508
 509static void coroutine_fn empty_coroutine(void *opaque)
 510{
 511    /* Do nothing */
 512}
 513
 514static void perf_lifecycle(void)
 515{
 516    Coroutine *coroutine;
 517    unsigned int i, max;
 518    double duration;
 519
 520    max = 1000000;
 521
 522    g_test_timer_start();
 523    for (i = 0; i < max; i++) {
 524        coroutine = qemu_coroutine_create(empty_coroutine, NULL);
 525        qemu_coroutine_enter(coroutine);
 526    }
 527    duration = g_test_timer_elapsed();
 528
 529    g_test_message("Lifecycle %u iterations: %f s", max, duration);
 530}
 531
 532static void perf_nesting(void)
 533{
 534    unsigned int i, maxcycles, maxnesting;
 535    double duration;
 536
 537    maxcycles = 10000;
 538    maxnesting = 1000;
 539    Coroutine *root;
 540
 541    g_test_timer_start();
 542    for (i = 0; i < maxcycles; i++) {
 543        NestData nd = {
 544            .n_enter  = 0,
 545            .n_return = 0,
 546            .max      = maxnesting,
 547        };
 548        root = qemu_coroutine_create(nest, &nd);
 549        qemu_coroutine_enter(root);
 550    }
 551    duration = g_test_timer_elapsed();
 552
 553    g_test_message("Nesting %u iterations of %u depth each: %f s",
 554        maxcycles, maxnesting, duration);
 555}
 556
 557/*
 558 * Yield benchmark
 559 */
 560
 561static void coroutine_fn yield_loop(void *opaque)
 562{
 563    unsigned int *counter = opaque;
 564
 565    while ((*counter) > 0) {
 566        (*counter)--;
 567        qemu_coroutine_yield();
 568    }
 569}
 570
 571static void perf_yield(void)
 572{
 573    unsigned int i, maxcycles;
 574    double duration;
 575
 576    maxcycles = 100000000;
 577    i = maxcycles;
 578    Coroutine *coroutine = qemu_coroutine_create(yield_loop, &i);
 579
 580    g_test_timer_start();
 581    while (i > 0) {
 582        qemu_coroutine_enter(coroutine);
 583    }
 584    duration = g_test_timer_elapsed();
 585
 586    g_test_message("Yield %u iterations: %f s", maxcycles, duration);
 587}
 588
 589static __attribute__((noinline)) void dummy(unsigned *i)
 590{
 591    (*i)--;
 592}
 593
 594static void perf_baseline(void)
 595{
 596    unsigned int i, maxcycles;
 597    double duration;
 598
 599    maxcycles = 100000000;
 600    i = maxcycles;
 601
 602    g_test_timer_start();
 603    while (i > 0) {
 604        dummy(&i);
 605    }
 606    duration = g_test_timer_elapsed();
 607
 608    g_test_message("Function call %u iterations: %f s", maxcycles, duration);
 609}
 610
 611static __attribute__((noinline)) void coroutine_fn perf_cost_func(void *opaque)
 612{
 613    qemu_coroutine_yield();
 614}
 615
 616static void perf_cost(void)
 617{
 618    const unsigned long maxcycles = 40000000;
 619    unsigned long i = 0;
 620    double duration;
 621    unsigned long ops;
 622    Coroutine *co;
 623
 624    g_test_timer_start();
 625    while (i++ < maxcycles) {
 626        co = qemu_coroutine_create(perf_cost_func, &i);
 627        qemu_coroutine_enter(co);
 628        qemu_coroutine_enter(co);
 629    }
 630    duration = g_test_timer_elapsed();
 631    ops = (long)(maxcycles / (duration * 1000));
 632
 633    g_test_message("Run operation %lu iterations %f s, %luK operations/s, "
 634                   "%luns per coroutine",
 635                   maxcycles,
 636                   duration, ops,
 637                   (unsigned long)(1000000000.0 * duration / maxcycles));
 638}
 639
 640int main(int argc, char **argv)
 641{
 642    g_test_init(&argc, &argv, NULL);
 643
 644    /* This test assumes there is a freelist and marks freed coroutine memory
 645     * with a sentinel value.  If there is no freelist this would legitimately
 646     * crash, so skip it.
 647     */
 648    if (CONFIG_COROUTINE_POOL) {
 649        g_test_add_func("/basic/no-dangling-access", test_no_dangling_access);
 650    }
 651
 652    g_test_add_func("/basic/lifecycle", test_lifecycle);
 653    g_test_add_func("/basic/yield", test_yield);
 654    g_test_add_func("/basic/nesting", test_nesting);
 655    g_test_add_func("/basic/self", test_self);
 656    g_test_add_func("/basic/entered", test_entered);
 657    g_test_add_func("/basic/in_coroutine", test_in_coroutine);
 658    g_test_add_func("/basic/order", test_order);
 659    g_test_add_func("/locking/co-mutex", test_co_mutex);
 660    g_test_add_func("/locking/co-mutex/lockable", test_co_mutex_lockable);
 661    g_test_add_func("/locking/co-rwlock/upgrade", test_co_rwlock_upgrade);
 662    g_test_add_func("/locking/co-rwlock/downgrade", test_co_rwlock_downgrade);
 663    if (g_test_perf()) {
 664        g_test_add_func("/perf/lifecycle", perf_lifecycle);
 665        g_test_add_func("/perf/nesting", perf_nesting);
 666        g_test_add_func("/perf/yield", perf_yield);
 667        g_test_add_func("/perf/function-call", perf_baseline);
 668        g_test_add_func("/perf/cost", perf_cost);
 669    }
 670    return g_test_run();
 671}
 672