dpdk/app/test/test_ticketlock.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2018-2019 Arm Limited
   3 */
   4
   5#include <inttypes.h>
   6#include <stdint.h>
   7#include <stdio.h>
   8#include <string.h>
   9#include <sys/queue.h>
  10#include <unistd.h>
  11
  12#include <rte_atomic.h>
  13#include <rte_common.h>
  14#include <rte_cycles.h>
  15#include <rte_eal.h>
  16#include <rte_launch.h>
  17#include <rte_lcore.h>
  18#include <rte_memory.h>
  19#include <rte_per_lcore.h>
  20#include <rte_ticketlock.h>
  21
  22#include "test.h"
  23
  24/*
  25 * Ticketlock test
  26 * =============
  27 *
  28 * - There is a global ticketlock and a table of ticketlocks (one per lcore).
  29 *
  30 * - The test function takes all of these locks and launches the
  31 *   ``test_ticketlock_per_core()`` function on each core (except the main).
  32 *
  33 *   - The function takes the global lock, display something, then releases
  34 *     the global lock.
  35 *   - The function takes the per-lcore lock, display something, then releases
  36 *     the per-core lock.
  37 *
  38 * - The main function unlocks the per-lcore locks sequentially and
  39 *   waits between each lock. This triggers the display of a message
  40 *   for each core, in the correct order. The autotest script checks that
  41 *   this order is correct.
  42 *
  43 * - A load test is carried out, with all cores attempting to lock a single lock
  44 *   multiple times
  45 */
  46
  47static rte_ticketlock_t tl, tl_try;
  48static rte_ticketlock_t tl_tab[RTE_MAX_LCORE];
  49static rte_ticketlock_recursive_t tlr;
  50static unsigned int count;
  51
  52static rte_atomic32_t synchro;
  53
  54static int
  55test_ticketlock_per_core(__rte_unused void *arg)
  56{
  57        rte_ticketlock_lock(&tl);
  58        printf("Global lock taken on core %u\n", rte_lcore_id());
  59        rte_ticketlock_unlock(&tl);
  60
  61        rte_ticketlock_lock(&tl_tab[rte_lcore_id()]);
  62        printf("Hello from core %u !\n", rte_lcore_id());
  63        rte_ticketlock_unlock(&tl_tab[rte_lcore_id()]);
  64
  65        return 0;
  66}
  67
  68static int
  69test_ticketlock_recursive_per_core(__rte_unused void *arg)
  70{
  71        unsigned int id = rte_lcore_id();
  72
  73        rte_ticketlock_recursive_lock(&tlr);
  74        printf("Global recursive lock taken on core %u - count = %d\n",
  75               id, tlr.count);
  76        rte_ticketlock_recursive_lock(&tlr);
  77        printf("Global recursive lock taken on core %u - count = %d\n",
  78               id, tlr.count);
  79        rte_ticketlock_recursive_lock(&tlr);
  80        printf("Global recursive lock taken on core %u - count = %d\n",
  81               id, tlr.count);
  82
  83        printf("Hello from within recursive locks from core %u !\n", id);
  84
  85        rte_ticketlock_recursive_unlock(&tlr);
  86        printf("Global recursive lock released on core %u - count = %d\n",
  87               id, tlr.count);
  88        rte_ticketlock_recursive_unlock(&tlr);
  89        printf("Global recursive lock released on core %u - count = %d\n",
  90               id, tlr.count);
  91        rte_ticketlock_recursive_unlock(&tlr);
  92        printf("Global recursive lock released on core %u - count = %d\n",
  93               id, tlr.count);
  94
  95        return 0;
  96}
  97
  98static rte_ticketlock_t lk = RTE_TICKETLOCK_INITIALIZER;
  99static uint64_t lcount __rte_cache_aligned;
 100static uint64_t lcore_count[RTE_MAX_LCORE] __rte_cache_aligned;
 101static uint64_t time_cost[RTE_MAX_LCORE];
 102
 103#define MAX_LOOP 10000
 104
 105static int
 106load_loop_fn(void *func_param)
 107{
 108        uint64_t time_diff = 0, begin;
 109        uint64_t hz = rte_get_timer_hz();
 110        const int use_lock = *(int *)func_param;
 111        const unsigned int lcore = rte_lcore_id();
 112
 113        /* wait synchro for workers */
 114        if (lcore != rte_get_main_lcore())
 115                while (rte_atomic32_read(&synchro) == 0)
 116                        ;
 117
 118        begin = rte_rdtsc_precise();
 119        while (lcore_count[lcore] < MAX_LOOP) {
 120                if (use_lock)
 121                        rte_ticketlock_lock(&lk);
 122                lcore_count[lcore]++;
 123                lcount++;
 124                if (use_lock)
 125                        rte_ticketlock_unlock(&lk);
 126        }
 127        time_diff = rte_rdtsc_precise() - begin;
 128        time_cost[lcore] = time_diff * 1000000 / hz;
 129        return 0;
 130}
 131
 132static int
 133test_ticketlock_perf(void)
 134{
 135        unsigned int i;
 136        uint64_t tcount = 0;
 137        uint64_t total_time = 0;
 138        int lock = 0;
 139        const unsigned int lcore = rte_lcore_id();
 140
 141        printf("\nTest with no lock on single core...\n");
 142        load_loop_fn(&lock);
 143        printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
 144        memset(lcore_count, 0, sizeof(lcore_count));
 145        memset(time_cost, 0, sizeof(time_cost));
 146
 147        printf("\nTest with lock on single core...\n");
 148        lock = 1;
 149        load_loop_fn(&lock);
 150        printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
 151        memset(lcore_count, 0, sizeof(lcore_count));
 152        memset(time_cost, 0, sizeof(time_cost));
 153
 154        lcount = 0;
 155        printf("\nTest with lock on %u cores...\n", rte_lcore_count());
 156
 157        /* Clear synchro and start workers */
 158        rte_atomic32_set(&synchro, 0);
 159        rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
 160
 161        /* start synchro and launch test on main */
 162        rte_atomic32_set(&synchro, 1);
 163        load_loop_fn(&lock);
 164
 165        rte_eal_mp_wait_lcore();
 166
 167        RTE_LCORE_FOREACH(i) {
 168                printf("Core [%u] cost time = %"PRIu64" us\n", i, time_cost[i]);
 169                tcount += lcore_count[i];
 170                total_time += time_cost[i];
 171        }
 172
 173        if (tcount != lcount)
 174                return -1;
 175
 176        printf("Total cost time = %"PRIu64" us\n", total_time);
 177
 178        return 0;
 179}
 180
 181/*
 182 * Use rte_ticketlock_trylock() to trylock a ticketlock object,
 183 * If it could not lock the object successfully, it would
 184 * return immediately and the variable of "count" would be
 185 * increased by one per times. the value of "count" could be
 186 * checked as the result later.
 187 */
 188static int
 189test_ticketlock_try(__rte_unused void *arg)
 190{
 191        if (rte_ticketlock_trylock(&tl_try) == 0) {
 192                rte_ticketlock_lock(&tl);
 193                count++;
 194                rte_ticketlock_unlock(&tl);
 195        }
 196
 197        return 0;
 198}
 199
 200
 201/*
 202 * Test rte_eal_get_lcore_state() in addition to ticketlocks
 203 * as we have "waiting" then "running" lcores.
 204 */
 205static int
 206test_ticketlock(void)
 207{
 208        int ret = 0;
 209        int i;
 210
 211        /* worker cores should be waiting: print it */
 212        RTE_LCORE_FOREACH_WORKER(i) {
 213                printf("lcore %d state: %d\n", i,
 214                       (int) rte_eal_get_lcore_state(i));
 215        }
 216
 217        rte_ticketlock_init(&tl);
 218        rte_ticketlock_init(&tl_try);
 219        rte_ticketlock_recursive_init(&tlr);
 220        RTE_LCORE_FOREACH_WORKER(i) {
 221                rte_ticketlock_init(&tl_tab[i]);
 222        }
 223
 224        rte_ticketlock_lock(&tl);
 225
 226        RTE_LCORE_FOREACH_WORKER(i) {
 227                rte_ticketlock_lock(&tl_tab[i]);
 228                rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
 229        }
 230
 231        /* worker cores should be busy: print it */
 232        RTE_LCORE_FOREACH_WORKER(i) {
 233                printf("lcore %d state: %d\n", i,
 234                       (int) rte_eal_get_lcore_state(i));
 235        }
 236        rte_ticketlock_unlock(&tl);
 237
 238        RTE_LCORE_FOREACH_WORKER(i) {
 239                rte_ticketlock_unlock(&tl_tab[i]);
 240                rte_delay_ms(10);
 241        }
 242
 243        rte_eal_mp_wait_lcore();
 244
 245        rte_ticketlock_recursive_lock(&tlr);
 246
 247        /*
 248         * Try to acquire a lock that we already own
 249         */
 250        if (!rte_ticketlock_recursive_trylock(&tlr)) {
 251                printf("rte_ticketlock_recursive_trylock failed on a lock that "
 252                       "we already own\n");
 253                ret = -1;
 254        } else
 255                rte_ticketlock_recursive_unlock(&tlr);
 256
 257        RTE_LCORE_FOREACH_WORKER(i) {
 258                rte_eal_remote_launch(test_ticketlock_recursive_per_core,
 259                                        NULL, i);
 260        }
 261        rte_ticketlock_recursive_unlock(&tlr);
 262        rte_eal_mp_wait_lcore();
 263
 264        /*
 265         * Test if it could return immediately from try-locking a locked object.
 266         * Here it will lock the ticketlock object first, then launch all the
 267         * worker lcores to trylock the same ticketlock object.
 268         * All the worker lcores should give up try-locking a locked object and
 269         * return immediately, and then increase the "count" initialized with
 270         * zero by one per times.
 271         * We can check if the "count" is finally equal to the number of all
 272         * worker lcores to see if the behavior of try-locking a locked
 273         * ticketlock object is correct.
 274         */
 275        if (rte_ticketlock_trylock(&tl_try) == 0)
 276                return -1;
 277
 278        count = 0;
 279        RTE_LCORE_FOREACH_WORKER(i) {
 280                rte_eal_remote_launch(test_ticketlock_try, NULL, i);
 281        }
 282        rte_eal_mp_wait_lcore();
 283        rte_ticketlock_unlock(&tl_try);
 284        if (rte_ticketlock_is_locked(&tl)) {
 285                printf("ticketlock is locked but it should not be\n");
 286                return -1;
 287        }
 288        rte_ticketlock_lock(&tl);
 289        if (count != (rte_lcore_count() - 1))
 290                ret = -1;
 291
 292        rte_ticketlock_unlock(&tl);
 293
 294        /*
 295         * Test if it can trylock recursively.
 296         * Use rte_ticketlock_recursive_trylock() to check if it can lock
 297         * a ticketlock object recursively. Here it will try to lock a
 298         * ticketlock object twice.
 299         */
 300        if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
 301                printf("It failed to do the first ticketlock_recursive_trylock "
 302                           "but it should able to do\n");
 303                return -1;
 304        }
 305        if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
 306                printf("It failed to do the second ticketlock_recursive_trylock "
 307                           "but it should able to do\n");
 308                return -1;
 309        }
 310        rte_ticketlock_recursive_unlock(&tlr);
 311        rte_ticketlock_recursive_unlock(&tlr);
 312
 313        if (test_ticketlock_perf() < 0)
 314                return -1;
 315
 316        return ret;
 317}
 318
 319REGISTER_TEST_COMMAND(ticketlock_autotest, test_ticketlock);
 320