dpdk/lib/eal/common/rte_service.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017 Intel Corporation
   3 */
   4
   5#include <stdio.h>
   6#include <inttypes.h>
   7#include <string.h>
   8
   9#include <rte_service.h>
  10#include <rte_service_component.h>
  11
  12#include <rte_lcore.h>
  13#include <rte_common.h>
  14#include <rte_cycles.h>
  15#include <rte_atomic.h>
  16#include <rte_malloc.h>
  17#include <rte_spinlock.h>
  18
  19#include "eal_private.h"
  20
  21#define RTE_SERVICE_NUM_MAX 64
  22
  23#define SERVICE_F_REGISTERED    (1 << 0)
  24#define SERVICE_F_STATS_ENABLED (1 << 1)
  25#define SERVICE_F_START_CHECK   (1 << 2)
  26
  27/* runstates for services and lcores, denoting if they are active or not */
  28#define RUNSTATE_STOPPED 0
  29#define RUNSTATE_RUNNING 1
  30
  31/* internal representation of a service */
  32struct rte_service_spec_impl {
  33        /* public part of the struct */
  34        struct rte_service_spec spec;
  35
  36        /* spin lock that when set indicates a service core is currently
  37         * running this service callback. When not set, a core may take the
  38         * lock and then run the service callback.
  39         */
  40        rte_spinlock_t execute_lock;
  41
  42        /* API set/get-able variables */
  43        int8_t app_runstate;
  44        int8_t comp_runstate;
  45        uint8_t internal_flags;
  46
  47        /* per service statistics */
  48        /* Indicates how many cores the service is mapped to run on.
  49         * It does not indicate the number of cores the service is running
  50         * on currently.
  51         */
  52        uint32_t num_mapped_cores;
  53        uint64_t calls;
  54        uint64_t cycles_spent;
  55} __rte_cache_aligned;
  56
  57/* the internal values of a service core */
  58struct core_state {
  59        /* map of services IDs are run on this core */
  60        uint64_t service_mask;
  61        uint8_t runstate; /* running or stopped */
  62        uint8_t thread_active; /* indicates when thread is in service_run() */
  63        uint8_t is_service_core; /* set if core is currently a service core */
  64        uint8_t service_active_on_lcore[RTE_SERVICE_NUM_MAX];
  65        uint64_t loops;
  66        uint64_t calls_per_service[RTE_SERVICE_NUM_MAX];
  67} __rte_cache_aligned;
  68
  69static uint32_t rte_service_count;
  70static struct rte_service_spec_impl *rte_services;
  71static struct core_state *lcore_states;
  72static uint32_t rte_service_library_initialized;
  73
  74int32_t
  75rte_service_init(void)
  76{
  77        if (rte_service_library_initialized) {
  78                RTE_LOG(NOTICE, EAL,
  79                        "service library init() called, init flag %d\n",
  80                        rte_service_library_initialized);
  81                return -EALREADY;
  82        }
  83
  84        rte_services = rte_calloc("rte_services", RTE_SERVICE_NUM_MAX,
  85                        sizeof(struct rte_service_spec_impl),
  86                        RTE_CACHE_LINE_SIZE);
  87        if (!rte_services) {
  88                RTE_LOG(ERR, EAL, "error allocating rte services array\n");
  89                goto fail_mem;
  90        }
  91
  92        lcore_states = rte_calloc("rte_service_core_states", RTE_MAX_LCORE,
  93                        sizeof(struct core_state), RTE_CACHE_LINE_SIZE);
  94        if (!lcore_states) {
  95                RTE_LOG(ERR, EAL, "error allocating core states array\n");
  96                goto fail_mem;
  97        }
  98
  99        int i;
 100        int count = 0;
 101        struct rte_config *cfg = rte_eal_get_configuration();
 102        for (i = 0; i < RTE_MAX_LCORE; i++) {
 103                if (lcore_config[i].core_role == ROLE_SERVICE) {
 104                        if ((unsigned int)i == cfg->main_lcore)
 105                                continue;
 106                        rte_service_lcore_add(i);
 107                        count++;
 108                }
 109        }
 110
 111        rte_service_library_initialized = 1;
 112        return 0;
 113fail_mem:
 114        rte_free(rte_services);
 115        rte_free(lcore_states);
 116        return -ENOMEM;
 117}
 118
 119void
 120rte_service_finalize(void)
 121{
 122        if (!rte_service_library_initialized)
 123                return;
 124
 125        rte_service_lcore_reset_all();
 126        rte_eal_mp_wait_lcore();
 127
 128        rte_free(rte_services);
 129        rte_free(lcore_states);
 130
 131        rte_service_library_initialized = 0;
 132}
 133
 134/* returns 1 if service is registered and has not been unregistered
 135 * Returns 0 if service never registered, or has been unregistered
 136 */
 137static inline int
 138service_valid(uint32_t id)
 139{
 140        return !!(rte_services[id].internal_flags & SERVICE_F_REGISTERED);
 141}
 142
 143static struct rte_service_spec_impl *
 144service_get(uint32_t id)
 145{
 146        return &rte_services[id];
 147}
 148
 149/* validate ID and retrieve service pointer, or return error value */
 150#define SERVICE_VALID_GET_OR_ERR_RET(id, service, retval) do {          \
 151        if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))            \
 152                return retval;                                          \
 153        service = &rte_services[id];                                    \
 154} while (0)
 155
 156/* returns 1 if statistics should be collected for service
 157 * Returns 0 if statistics should not be collected for service
 158 */
 159static inline int
 160service_stats_enabled(struct rte_service_spec_impl *impl)
 161{
 162        return !!(impl->internal_flags & SERVICE_F_STATS_ENABLED);
 163}
 164
 165static inline int
 166service_mt_safe(struct rte_service_spec_impl *s)
 167{
 168        return !!(s->spec.capabilities & RTE_SERVICE_CAP_MT_SAFE);
 169}
 170
 171int32_t
 172rte_service_set_stats_enable(uint32_t id, int32_t enabled)
 173{
 174        struct rte_service_spec_impl *s;
 175        SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
 176
 177        if (enabled)
 178                s->internal_flags |= SERVICE_F_STATS_ENABLED;
 179        else
 180                s->internal_flags &= ~(SERVICE_F_STATS_ENABLED);
 181
 182        return 0;
 183}
 184
 185int32_t
 186rte_service_set_runstate_mapped_check(uint32_t id, int32_t enabled)
 187{
 188        struct rte_service_spec_impl *s;
 189        SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
 190
 191        if (enabled)
 192                s->internal_flags |= SERVICE_F_START_CHECK;
 193        else
 194                s->internal_flags &= ~(SERVICE_F_START_CHECK);
 195
 196        return 0;
 197}
 198
 199uint32_t
 200rte_service_get_count(void)
 201{
 202        return rte_service_count;
 203}
 204
 205int32_t
 206rte_service_get_by_name(const char *name, uint32_t *service_id)
 207{
 208        if (!service_id)
 209                return -EINVAL;
 210
 211        int i;
 212        for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
 213                if (service_valid(i) &&
 214                                strcmp(name, rte_services[i].spec.name) == 0) {
 215                        *service_id = i;
 216                        return 0;
 217                }
 218        }
 219
 220        return -ENODEV;
 221}
 222
 223const char *
 224rte_service_get_name(uint32_t id)
 225{
 226        struct rte_service_spec_impl *s;
 227        SERVICE_VALID_GET_OR_ERR_RET(id, s, 0);
 228        return s->spec.name;
 229}
 230
 231int32_t
 232rte_service_probe_capability(uint32_t id, uint32_t capability)
 233{
 234        struct rte_service_spec_impl *s;
 235        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 236        return !!(s->spec.capabilities & capability);
 237}
 238
 239int32_t
 240rte_service_component_register(const struct rte_service_spec *spec,
 241                               uint32_t *id_ptr)
 242{
 243        uint32_t i;
 244        int32_t free_slot = -1;
 245
 246        if (spec->callback == NULL || strlen(spec->name) == 0)
 247                return -EINVAL;
 248
 249        for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
 250                if (!service_valid(i)) {
 251                        free_slot = i;
 252                        break;
 253                }
 254        }
 255
 256        if ((free_slot < 0) || (i == RTE_SERVICE_NUM_MAX))
 257                return -ENOSPC;
 258
 259        struct rte_service_spec_impl *s = &rte_services[free_slot];
 260        s->spec = *spec;
 261        s->internal_flags |= SERVICE_F_REGISTERED | SERVICE_F_START_CHECK;
 262
 263        rte_service_count++;
 264
 265        if (id_ptr)
 266                *id_ptr = free_slot;
 267
 268        return 0;
 269}
 270
 271int32_t
 272rte_service_component_unregister(uint32_t id)
 273{
 274        uint32_t i;
 275        struct rte_service_spec_impl *s;
 276        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 277
 278        rte_service_count--;
 279
 280        s->internal_flags &= ~(SERVICE_F_REGISTERED);
 281
 282        /* clear the run-bit in all cores */
 283        for (i = 0; i < RTE_MAX_LCORE; i++)
 284                lcore_states[i].service_mask &= ~(UINT64_C(1) << id);
 285
 286        memset(&rte_services[id], 0, sizeof(struct rte_service_spec_impl));
 287
 288        return 0;
 289}
 290
 291int32_t
 292rte_service_component_runstate_set(uint32_t id, uint32_t runstate)
 293{
 294        struct rte_service_spec_impl *s;
 295        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 296
 297        /* comp_runstate act as the guard variable. Use store-release
 298         * memory order. This synchronizes with load-acquire in
 299         * service_run and service_runstate_get function.
 300         */
 301        if (runstate)
 302                __atomic_store_n(&s->comp_runstate, RUNSTATE_RUNNING,
 303                        __ATOMIC_RELEASE);
 304        else
 305                __atomic_store_n(&s->comp_runstate, RUNSTATE_STOPPED,
 306                        __ATOMIC_RELEASE);
 307
 308        return 0;
 309}
 310
 311int32_t
 312rte_service_runstate_set(uint32_t id, uint32_t runstate)
 313{
 314        struct rte_service_spec_impl *s;
 315        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 316
 317        /* app_runstate act as the guard variable. Use store-release
 318         * memory order. This synchronizes with load-acquire in
 319         * service_run runstate_get function.
 320         */
 321        if (runstate)
 322                __atomic_store_n(&s->app_runstate, RUNSTATE_RUNNING,
 323                        __ATOMIC_RELEASE);
 324        else
 325                __atomic_store_n(&s->app_runstate, RUNSTATE_STOPPED,
 326                        __ATOMIC_RELEASE);
 327
 328        return 0;
 329}
 330
 331int32_t
 332rte_service_runstate_get(uint32_t id)
 333{
 334        struct rte_service_spec_impl *s;
 335        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 336
 337        /* comp_runstate and app_runstate act as the guard variables.
 338         * Use load-acquire memory order. This synchronizes with
 339         * store-release in service state set functions.
 340         */
 341        if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) ==
 342                        RUNSTATE_RUNNING &&
 343            __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) ==
 344                        RUNSTATE_RUNNING) {
 345                int check_disabled = !(s->internal_flags &
 346                        SERVICE_F_START_CHECK);
 347                int lcore_mapped = (__atomic_load_n(&s->num_mapped_cores,
 348                        __ATOMIC_RELAXED) > 0);
 349
 350                return (check_disabled | lcore_mapped);
 351        } else
 352                return 0;
 353
 354}
 355
 356static inline void
 357service_runner_do_callback(struct rte_service_spec_impl *s,
 358                           struct core_state *cs, uint32_t service_idx)
 359{
 360        void *userdata = s->spec.callback_userdata;
 361
 362        if (service_stats_enabled(s)) {
 363                uint64_t start = rte_rdtsc();
 364                s->spec.callback(userdata);
 365                uint64_t end = rte_rdtsc();
 366                s->cycles_spent += end - start;
 367                cs->calls_per_service[service_idx]++;
 368                s->calls++;
 369        } else
 370                s->spec.callback(userdata);
 371}
 372
 373
 374/* Expects the service 's' is valid. */
 375static int32_t
 376service_run(uint32_t i, struct core_state *cs, uint64_t service_mask,
 377            struct rte_service_spec_impl *s, uint32_t serialize_mt_unsafe)
 378{
 379        if (!s)
 380                return -EINVAL;
 381
 382        /* comp_runstate and app_runstate act as the guard variables.
 383         * Use load-acquire memory order. This synchronizes with
 384         * store-release in service state set functions.
 385         */
 386        if (__atomic_load_n(&s->comp_runstate, __ATOMIC_ACQUIRE) !=
 387                        RUNSTATE_RUNNING ||
 388            __atomic_load_n(&s->app_runstate, __ATOMIC_ACQUIRE) !=
 389                        RUNSTATE_RUNNING ||
 390            !(service_mask & (UINT64_C(1) << i))) {
 391                cs->service_active_on_lcore[i] = 0;
 392                return -ENOEXEC;
 393        }
 394
 395        cs->service_active_on_lcore[i] = 1;
 396
 397        if ((service_mt_safe(s) == 0) && (serialize_mt_unsafe == 1)) {
 398                if (!rte_spinlock_trylock(&s->execute_lock))
 399                        return -EBUSY;
 400
 401                service_runner_do_callback(s, cs, i);
 402                rte_spinlock_unlock(&s->execute_lock);
 403        } else
 404                service_runner_do_callback(s, cs, i);
 405
 406        return 0;
 407}
 408
 409int32_t
 410rte_service_may_be_active(uint32_t id)
 411{
 412        uint32_t ids[RTE_MAX_LCORE] = {0};
 413        int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
 414        int i;
 415
 416        if (id >= RTE_SERVICE_NUM_MAX || !service_valid(id))
 417                return -EINVAL;
 418
 419        for (i = 0; i < lcore_count; i++) {
 420                if (lcore_states[ids[i]].service_active_on_lcore[id])
 421                        return 1;
 422        }
 423
 424        return 0;
 425}
 426
 427int32_t
 428rte_service_run_iter_on_app_lcore(uint32_t id, uint32_t serialize_mt_unsafe)
 429{
 430        struct core_state *cs = &lcore_states[rte_lcore_id()];
 431        struct rte_service_spec_impl *s;
 432
 433        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 434
 435        /* Increment num_mapped_cores to reflect that this core is
 436         * now mapped capable of running the service.
 437         */
 438        __atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
 439
 440        int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
 441
 442        __atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
 443
 444        return ret;
 445}
 446
 447static int32_t
 448service_runner_func(void *arg)
 449{
 450        RTE_SET_USED(arg);
 451        uint32_t i;
 452        const int lcore = rte_lcore_id();
 453        struct core_state *cs = &lcore_states[lcore];
 454
 455        __atomic_store_n(&cs->thread_active, 1, __ATOMIC_SEQ_CST);
 456
 457        /* runstate act as the guard variable. Use load-acquire
 458         * memory order here to synchronize with store-release
 459         * in runstate update functions.
 460         */
 461        while (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
 462                        RUNSTATE_RUNNING) {
 463                const uint64_t service_mask = cs->service_mask;
 464
 465                for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
 466                        if (!service_valid(i))
 467                                continue;
 468                        /* return value ignored as no change to code flow */
 469                        service_run(i, cs, service_mask, service_get(i), 1);
 470                }
 471
 472                cs->loops++;
 473        }
 474
 475        /* Use SEQ CST memory ordering to avoid any re-ordering around
 476         * this store, ensuring that once this store is visible, the service
 477         * lcore thread really is done in service cores code.
 478         */
 479        __atomic_store_n(&cs->thread_active, 0, __ATOMIC_SEQ_CST);
 480        return 0;
 481}
 482
 483int32_t
 484rte_service_lcore_may_be_active(uint32_t lcore)
 485{
 486        if (lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
 487                return -EINVAL;
 488
 489        /* Load thread_active using ACQUIRE to avoid instructions dependent on
 490         * the result being re-ordered before this load completes.
 491         */
 492        return __atomic_load_n(&lcore_states[lcore].thread_active,
 493                               __ATOMIC_ACQUIRE);
 494}
 495
 496int32_t
 497rte_service_lcore_count(void)
 498{
 499        int32_t count = 0;
 500        uint32_t i;
 501        for (i = 0; i < RTE_MAX_LCORE; i++)
 502                count += lcore_states[i].is_service_core;
 503        return count;
 504}
 505
 506int32_t
 507rte_service_lcore_list(uint32_t array[], uint32_t n)
 508{
 509        uint32_t count = rte_service_lcore_count();
 510        if (count > n)
 511                return -ENOMEM;
 512
 513        if (!array)
 514                return -EINVAL;
 515
 516        uint32_t i;
 517        uint32_t idx = 0;
 518        for (i = 0; i < RTE_MAX_LCORE; i++) {
 519                struct core_state *cs = &lcore_states[i];
 520                if (cs->is_service_core) {
 521                        array[idx] = i;
 522                        idx++;
 523                }
 524        }
 525
 526        return count;
 527}
 528
 529int32_t
 530rte_service_lcore_count_services(uint32_t lcore)
 531{
 532        if (lcore >= RTE_MAX_LCORE)
 533                return -EINVAL;
 534
 535        struct core_state *cs = &lcore_states[lcore];
 536        if (!cs->is_service_core)
 537                return -ENOTSUP;
 538
 539        return __builtin_popcountll(cs->service_mask);
 540}
 541
 542int32_t
 543rte_service_start_with_defaults(void)
 544{
 545        /* create a default mapping from cores to services, then start the
 546         * services to make them transparent to unaware applications.
 547         */
 548        uint32_t i;
 549        int ret;
 550        uint32_t count = rte_service_get_count();
 551
 552        int32_t lcore_iter = 0;
 553        uint32_t ids[RTE_MAX_LCORE] = {0};
 554        int32_t lcore_count = rte_service_lcore_list(ids, RTE_MAX_LCORE);
 555
 556        if (lcore_count == 0)
 557                return -ENOTSUP;
 558
 559        for (i = 0; (int)i < lcore_count; i++)
 560                rte_service_lcore_start(ids[i]);
 561
 562        for (i = 0; i < count; i++) {
 563                /* do 1:1 core mapping here, with each service getting
 564                 * assigned a single core by default. Adding multiple services
 565                 * should multiplex to a single core, or 1:1 if there are the
 566                 * same amount of services as service-cores
 567                 */
 568                ret = rte_service_map_lcore_set(i, ids[lcore_iter], 1);
 569                if (ret)
 570                        return -ENODEV;
 571
 572                lcore_iter++;
 573                if (lcore_iter >= lcore_count)
 574                        lcore_iter = 0;
 575
 576                ret = rte_service_runstate_set(i, 1);
 577                if (ret)
 578                        return -ENOEXEC;
 579        }
 580
 581        return 0;
 582}
 583
 584static int32_t
 585service_update(uint32_t sid, uint32_t lcore, uint32_t *set, uint32_t *enabled)
 586{
 587        /* validate ID, or return error value */
 588        if (sid >= RTE_SERVICE_NUM_MAX || !service_valid(sid) ||
 589            lcore >= RTE_MAX_LCORE || !lcore_states[lcore].is_service_core)
 590                return -EINVAL;
 591
 592        uint64_t sid_mask = UINT64_C(1) << sid;
 593        if (set) {
 594                uint64_t lcore_mapped = lcore_states[lcore].service_mask &
 595                        sid_mask;
 596
 597                if (*set && !lcore_mapped) {
 598                        lcore_states[lcore].service_mask |= sid_mask;
 599                        __atomic_add_fetch(&rte_services[sid].num_mapped_cores,
 600                                1, __ATOMIC_RELAXED);
 601                }
 602                if (!*set && lcore_mapped) {
 603                        lcore_states[lcore].service_mask &= ~(sid_mask);
 604                        __atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
 605                                1, __ATOMIC_RELAXED);
 606                }
 607        }
 608
 609        if (enabled)
 610                *enabled = !!(lcore_states[lcore].service_mask & (sid_mask));
 611
 612        return 0;
 613}
 614
 615int32_t
 616rte_service_map_lcore_set(uint32_t id, uint32_t lcore, uint32_t enabled)
 617{
 618        uint32_t on = enabled > 0;
 619        return service_update(id, lcore, &on, 0);
 620}
 621
 622int32_t
 623rte_service_map_lcore_get(uint32_t id, uint32_t lcore)
 624{
 625        uint32_t enabled;
 626        int ret = service_update(id, lcore, 0, &enabled);
 627        if (ret == 0)
 628                return enabled;
 629        return ret;
 630}
 631
 632static void
 633set_lcore_state(uint32_t lcore, int32_t state)
 634{
 635        /* mark core state in hugepage backed config */
 636        struct rte_config *cfg = rte_eal_get_configuration();
 637        cfg->lcore_role[lcore] = state;
 638
 639        /* mark state in process local lcore_config */
 640        lcore_config[lcore].core_role = state;
 641
 642        /* update per-lcore optimized state tracking */
 643        lcore_states[lcore].is_service_core = (state == ROLE_SERVICE);
 644}
 645
 646int32_t
 647rte_service_lcore_reset_all(void)
 648{
 649        /* loop over cores, reset all to mask 0 */
 650        uint32_t i;
 651        for (i = 0; i < RTE_MAX_LCORE; i++) {
 652                if (lcore_states[i].is_service_core) {
 653                        lcore_states[i].service_mask = 0;
 654                        set_lcore_state(i, ROLE_RTE);
 655                        /* runstate act as guard variable Use
 656                         * store-release memory order here to synchronize
 657                         * with load-acquire in runstate read functions.
 658                         */
 659                        __atomic_store_n(&lcore_states[i].runstate,
 660                                RUNSTATE_STOPPED, __ATOMIC_RELEASE);
 661                }
 662        }
 663        for (i = 0; i < RTE_SERVICE_NUM_MAX; i++)
 664                __atomic_store_n(&rte_services[i].num_mapped_cores, 0,
 665                        __ATOMIC_RELAXED);
 666
 667        return 0;
 668}
 669
 670int32_t
 671rte_service_lcore_add(uint32_t lcore)
 672{
 673        if (lcore >= RTE_MAX_LCORE)
 674                return -EINVAL;
 675        if (lcore_states[lcore].is_service_core)
 676                return -EALREADY;
 677
 678        set_lcore_state(lcore, ROLE_SERVICE);
 679
 680        /* ensure that after adding a core the mask and state are defaults */
 681        lcore_states[lcore].service_mask = 0;
 682        /* Use store-release memory order here to synchronize with
 683         * load-acquire in runstate read functions.
 684         */
 685        __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
 686                __ATOMIC_RELEASE);
 687
 688        return rte_eal_wait_lcore(lcore);
 689}
 690
 691int32_t
 692rte_service_lcore_del(uint32_t lcore)
 693{
 694        if (lcore >= RTE_MAX_LCORE)
 695                return -EINVAL;
 696
 697        struct core_state *cs = &lcore_states[lcore];
 698        if (!cs->is_service_core)
 699                return -EINVAL;
 700
 701        /* runstate act as the guard variable. Use load-acquire
 702         * memory order here to synchronize with store-release
 703         * in runstate update functions.
 704         */
 705        if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) !=
 706                        RUNSTATE_STOPPED)
 707                return -EBUSY;
 708
 709        set_lcore_state(lcore, ROLE_RTE);
 710
 711        rte_smp_wmb();
 712        return 0;
 713}
 714
 715int32_t
 716rte_service_lcore_start(uint32_t lcore)
 717{
 718        if (lcore >= RTE_MAX_LCORE)
 719                return -EINVAL;
 720
 721        struct core_state *cs = &lcore_states[lcore];
 722        if (!cs->is_service_core)
 723                return -EINVAL;
 724
 725        /* runstate act as the guard variable. Use load-acquire
 726         * memory order here to synchronize with store-release
 727         * in runstate update functions.
 728         */
 729        if (__atomic_load_n(&cs->runstate, __ATOMIC_ACQUIRE) ==
 730                        RUNSTATE_RUNNING)
 731                return -EALREADY;
 732
 733        /* set core to run state first, and then launch otherwise it will
 734         * return immediately as runstate keeps it in the service poll loop
 735         */
 736        /* Use load-acquire memory order here to synchronize with
 737         * store-release in runstate update functions.
 738         */
 739        __atomic_store_n(&cs->runstate, RUNSTATE_RUNNING, __ATOMIC_RELEASE);
 740
 741        int ret = rte_eal_remote_launch(service_runner_func, 0, lcore);
 742        /* returns -EBUSY if the core is already launched, 0 on success */
 743        return ret;
 744}
 745
 746int32_t
 747rte_service_lcore_stop(uint32_t lcore)
 748{
 749        if (lcore >= RTE_MAX_LCORE)
 750                return -EINVAL;
 751
 752        /* runstate act as the guard variable. Use load-acquire
 753         * memory order here to synchronize with store-release
 754         * in runstate update functions.
 755         */
 756        if (__atomic_load_n(&lcore_states[lcore].runstate, __ATOMIC_ACQUIRE) ==
 757                        RUNSTATE_STOPPED)
 758                return -EALREADY;
 759
 760        uint32_t i;
 761        struct core_state *cs = &lcore_states[lcore];
 762        uint64_t service_mask = cs->service_mask;
 763
 764        for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
 765                int32_t enabled = service_mask & (UINT64_C(1) << i);
 766                int32_t service_running = rte_service_runstate_get(i);
 767                int32_t only_core = (1 ==
 768                        __atomic_load_n(&rte_services[i].num_mapped_cores,
 769                                __ATOMIC_RELAXED));
 770
 771                /* Switch off this core for all services, to ensure that future
 772                 * calls to may_be_active() know this core is switched off.
 773                 */
 774                cs->service_active_on_lcore[i] = 0;
 775
 776                /* if the core is mapped, and the service is running, and this
 777                 * is the only core that is mapped, the service would cease to
 778                 * run if this core stopped, so fail instead.
 779                 */
 780                if (enabled && service_running && only_core)
 781                        return -EBUSY;
 782        }
 783
 784        /* Use store-release memory order here to synchronize with
 785         * load-acquire in runstate read functions.
 786         */
 787        __atomic_store_n(&lcore_states[lcore].runstate, RUNSTATE_STOPPED,
 788                __ATOMIC_RELEASE);
 789
 790        return 0;
 791}
 792
 793int32_t
 794rte_service_attr_get(uint32_t id, uint32_t attr_id, uint64_t *attr_value)
 795{
 796        struct rte_service_spec_impl *s;
 797        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 798
 799        if (!attr_value)
 800                return -EINVAL;
 801
 802        switch (attr_id) {
 803        case RTE_SERVICE_ATTR_CYCLES:
 804                *attr_value = s->cycles_spent;
 805                return 0;
 806        case RTE_SERVICE_ATTR_CALL_COUNT:
 807                *attr_value = s->calls;
 808                return 0;
 809        default:
 810                return -EINVAL;
 811        }
 812}
 813
 814int32_t
 815rte_service_lcore_attr_get(uint32_t lcore, uint32_t attr_id,
 816                           uint64_t *attr_value)
 817{
 818        struct core_state *cs;
 819
 820        if (lcore >= RTE_MAX_LCORE || !attr_value)
 821                return -EINVAL;
 822
 823        cs = &lcore_states[lcore];
 824        if (!cs->is_service_core)
 825                return -ENOTSUP;
 826
 827        switch (attr_id) {
 828        case RTE_SERVICE_LCORE_ATTR_LOOPS:
 829                *attr_value = cs->loops;
 830                return 0;
 831        default:
 832                return -EINVAL;
 833        }
 834}
 835
 836int32_t
 837rte_service_attr_reset_all(uint32_t id)
 838{
 839        struct rte_service_spec_impl *s;
 840        SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 841
 842        s->cycles_spent = 0;
 843        s->calls = 0;
 844        return 0;
 845}
 846
 847int32_t
 848rte_service_lcore_attr_reset_all(uint32_t lcore)
 849{
 850        struct core_state *cs;
 851
 852        if (lcore >= RTE_MAX_LCORE)
 853                return -EINVAL;
 854
 855        cs = &lcore_states[lcore];
 856        if (!cs->is_service_core)
 857                return -ENOTSUP;
 858
 859        cs->loops = 0;
 860
 861        return 0;
 862}
 863
 864static void
 865service_dump_one(FILE *f, struct rte_service_spec_impl *s)
 866{
 867        /* avoid divide by zero */
 868        int calls = 1;
 869
 870        if (s->calls != 0)
 871                calls = s->calls;
 872        fprintf(f, "  %s: stats %d\tcalls %"PRIu64"\tcycles %"
 873                        PRIu64"\tavg: %"PRIu64"\n",
 874                        s->spec.name, service_stats_enabled(s), s->calls,
 875                        s->cycles_spent, s->cycles_spent / calls);
 876}
 877
 878static void
 879service_dump_calls_per_lcore(FILE *f, uint32_t lcore)
 880{
 881        uint32_t i;
 882        struct core_state *cs = &lcore_states[lcore];
 883
 884        fprintf(f, "%02d\t", lcore);
 885        for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
 886                if (!service_valid(i))
 887                        continue;
 888                fprintf(f, "%"PRIu64"\t", cs->calls_per_service[i]);
 889        }
 890        fprintf(f, "\n");
 891}
 892
 893int32_t
 894rte_service_dump(FILE *f, uint32_t id)
 895{
 896        uint32_t i;
 897        int print_one = (id != UINT32_MAX);
 898
 899        /* print only the specified service */
 900        if (print_one) {
 901                struct rte_service_spec_impl *s;
 902                SERVICE_VALID_GET_OR_ERR_RET(id, s, -EINVAL);
 903                fprintf(f, "Service %s Summary\n", s->spec.name);
 904                service_dump_one(f, s);
 905                return 0;
 906        }
 907
 908        /* print all services, as UINT32_MAX was passed as id */
 909        fprintf(f, "Services Summary\n");
 910        for (i = 0; i < RTE_SERVICE_NUM_MAX; i++) {
 911                if (!service_valid(i))
 912                        continue;
 913                service_dump_one(f, &rte_services[i]);
 914        }
 915
 916        fprintf(f, "Service Cores Summary\n");
 917        for (i = 0; i < RTE_MAX_LCORE; i++) {
 918                if (lcore_config[i].core_role != ROLE_SERVICE)
 919                        continue;
 920
 921                service_dump_calls_per_lcore(f, i);
 922        }
 923
 924        return 0;
 925}
 926