dpdk/lib/eventdev/rte_event_timer_adapter.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2017-2018 Intel Corporation.
   3 * All rights reserved.
   4 */
   5
   6#include <string.h>
   7#include <inttypes.h>
   8#include <stdbool.h>
   9
  10#include <rte_memzone.h>
  11#include <rte_errno.h>
  12#include <rte_malloc.h>
  13#include <rte_mempool.h>
  14#include <rte_common.h>
  15#include <rte_timer.h>
  16#include <rte_service_component.h>
  17#include <rte_telemetry.h>
  18
  19#include "event_timer_adapter_pmd.h"
  20#include "eventdev_pmd.h"
  21#include "rte_event_timer_adapter.h"
  22#include "rte_eventdev.h"
  23#include "eventdev_trace.h"
  24
  25#define DATA_MZ_NAME_MAX_LEN 64
  26#define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
  27
  28RTE_LOG_REGISTER_SUFFIX(evtim_logtype, adapter.timer, NOTICE);
  29RTE_LOG_REGISTER_SUFFIX(evtim_buffer_logtype, adapter.timer, NOTICE);
  30RTE_LOG_REGISTER_SUFFIX(evtim_svc_logtype, adapter.timer.svc, NOTICE);
  31
  32static struct rte_event_timer_adapter *adapters;
  33
  34static const struct event_timer_adapter_ops swtim_ops;
  35
  36#define EVTIM_LOG(level, logtype, ...) \
  37        rte_log(RTE_LOG_ ## level, logtype, \
  38                RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
  39                        "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
  40
  41#define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
  42
  43#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
  44#define EVTIM_LOG_DBG(...) \
  45        EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
  46#define EVTIM_BUF_LOG_DBG(...) \
  47        EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
  48#define EVTIM_SVC_LOG_DBG(...) \
  49        EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
  50#else
  51#define EVTIM_LOG_DBG(...) (void)0
  52#define EVTIM_BUF_LOG_DBG(...) (void)0
  53#define EVTIM_SVC_LOG_DBG(...) (void)0
  54#endif
  55
  56static int
  57default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
  58                     void *conf_arg)
  59{
  60        struct rte_event_timer_adapter *adapter;
  61        struct rte_eventdev *dev;
  62        struct rte_event_dev_config dev_conf;
  63        struct rte_event_port_conf *port_conf, def_port_conf = {0};
  64        int started;
  65        uint8_t port_id;
  66        uint8_t dev_id;
  67        int ret;
  68
  69        RTE_SET_USED(event_dev_id);
  70
  71        adapter = &adapters[id];
  72        dev = &rte_eventdevs[adapter->data->event_dev_id];
  73        dev_id = dev->data->dev_id;
  74        dev_conf = dev->data->dev_conf;
  75
  76        started = dev->data->dev_started;
  77        if (started)
  78                rte_event_dev_stop(dev_id);
  79
  80        port_id = dev_conf.nb_event_ports;
  81        dev_conf.nb_event_ports += 1;
  82        ret = rte_event_dev_configure(dev_id, &dev_conf);
  83        if (ret < 0) {
  84                EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
  85                if (started)
  86                        if (rte_event_dev_start(dev_id))
  87                                return -EIO;
  88
  89                return ret;
  90        }
  91
  92        if (conf_arg != NULL)
  93                port_conf = conf_arg;
  94        else {
  95                port_conf = &def_port_conf;
  96                ret = rte_event_port_default_conf_get(dev_id, port_id,
  97                                                      port_conf);
  98                if (ret < 0)
  99                        return ret;
 100        }
 101
 102        ret = rte_event_port_setup(dev_id, port_id, port_conf);
 103        if (ret < 0) {
 104                EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
 105                              port_id, dev_id);
 106                return ret;
 107        }
 108
 109        *event_port_id = port_id;
 110
 111        if (started)
 112                ret = rte_event_dev_start(dev_id);
 113
 114        return ret;
 115}
 116
 117struct rte_event_timer_adapter *
 118rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
 119{
 120        return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
 121                                                  NULL);
 122}
 123
 124struct rte_event_timer_adapter *
 125rte_event_timer_adapter_create_ext(
 126                const struct rte_event_timer_adapter_conf *conf,
 127                rte_event_timer_adapter_port_conf_cb_t conf_cb,
 128                void *conf_arg)
 129{
 130        uint16_t adapter_id;
 131        struct rte_event_timer_adapter *adapter;
 132        const struct rte_memzone *mz;
 133        char mz_name[DATA_MZ_NAME_MAX_LEN];
 134        int n, ret;
 135        struct rte_eventdev *dev;
 136
 137        if (adapters == NULL) {
 138                adapters = rte_zmalloc("Eventdev",
 139                                       sizeof(struct rte_event_timer_adapter) *
 140                                               RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
 141                                       RTE_CACHE_LINE_SIZE);
 142                if (adapters == NULL) {
 143                        rte_errno = ENOMEM;
 144                        return NULL;
 145                }
 146        }
 147
 148        if (conf == NULL) {
 149                rte_errno = EINVAL;
 150                return NULL;
 151        }
 152
 153        /* Check eventdev ID */
 154        if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
 155                rte_errno = EINVAL;
 156                return NULL;
 157        }
 158        dev = &rte_eventdevs[conf->event_dev_id];
 159
 160        adapter_id = conf->timer_adapter_id;
 161
 162        /* Check that adapter_id is in range */
 163        if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
 164                rte_errno = EINVAL;
 165                return NULL;
 166        }
 167
 168        /* Check adapter ID not already allocated */
 169        adapter = &adapters[adapter_id];
 170        if (adapter->allocated) {
 171                rte_errno = EEXIST;
 172                return NULL;
 173        }
 174
 175        /* Create shared data area. */
 176        n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
 177        if (n >= (int)sizeof(mz_name)) {
 178                rte_errno = EINVAL;
 179                return NULL;
 180        }
 181        mz = rte_memzone_reserve(mz_name,
 182                                 sizeof(struct rte_event_timer_adapter_data),
 183                                 conf->socket_id, 0);
 184        if (mz == NULL)
 185                /* rte_errno set by rte_memzone_reserve */
 186                return NULL;
 187
 188        adapter->data = mz->addr;
 189        memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
 190
 191        adapter->data->mz = mz;
 192        adapter->data->event_dev_id = conf->event_dev_id;
 193        adapter->data->id = adapter_id;
 194        adapter->data->socket_id = conf->socket_id;
 195        adapter->data->conf = *conf;  /* copy conf structure */
 196
 197        /* Query eventdev PMD for timer adapter capabilities and ops */
 198        ret = dev->dev_ops->timer_adapter_caps_get(dev,
 199                                                   adapter->data->conf.flags,
 200                                                   &adapter->data->caps,
 201                                                   &adapter->ops);
 202        if (ret < 0) {
 203                rte_errno = -ret;
 204                goto free_memzone;
 205        }
 206
 207        if (!(adapter->data->caps &
 208              RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
 209                FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, EINVAL);
 210                ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
 211                              &adapter->data->event_port_id, conf_arg);
 212                if (ret < 0) {
 213                        rte_errno = -ret;
 214                        goto free_memzone;
 215                }
 216        }
 217
 218        /* If eventdev PMD did not provide ops, use default software
 219         * implementation.
 220         */
 221        if (adapter->ops == NULL)
 222                adapter->ops = &swtim_ops;
 223
 224        /* Allow driver to do some setup */
 225        FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, ENOTSUP);
 226        ret = adapter->ops->init(adapter);
 227        if (ret < 0) {
 228                rte_errno = -ret;
 229                goto free_memzone;
 230        }
 231
 232        /* Set fast-path function pointers */
 233        adapter->arm_burst = adapter->ops->arm_burst;
 234        adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
 235        adapter->cancel_burst = adapter->ops->cancel_burst;
 236
 237        adapter->allocated = 1;
 238
 239        rte_eventdev_trace_timer_adapter_create(adapter_id, adapter, conf,
 240                conf_cb);
 241        return adapter;
 242
 243free_memzone:
 244        rte_memzone_free(adapter->data->mz);
 245        return NULL;
 246}
 247
 248int
 249rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
 250                struct rte_event_timer_adapter_info *adapter_info)
 251{
 252        ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 253
 254        if (adapter->ops->get_info)
 255                /* let driver set values it knows */
 256                adapter->ops->get_info(adapter, adapter_info);
 257
 258        /* Set common values */
 259        adapter_info->conf = adapter->data->conf;
 260        adapter_info->event_dev_port_id = adapter->data->event_port_id;
 261        adapter_info->caps = adapter->data->caps;
 262
 263        return 0;
 264}
 265
 266int
 267rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
 268{
 269        int ret;
 270
 271        ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 272        FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
 273
 274        if (adapter->data->started) {
 275                EVTIM_LOG_ERR("event timer adapter %"PRIu8" already started",
 276                              adapter->data->id);
 277                return -EALREADY;
 278        }
 279
 280        ret = adapter->ops->start(adapter);
 281        if (ret < 0)
 282                return ret;
 283
 284        adapter->data->started = 1;
 285        rte_eventdev_trace_timer_adapter_start(adapter);
 286        return 0;
 287}
 288
 289int
 290rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
 291{
 292        int ret;
 293
 294        ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 295        FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
 296
 297        if (adapter->data->started == 0) {
 298                EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
 299                              adapter->data->id);
 300                return 0;
 301        }
 302
 303        ret = adapter->ops->stop(adapter);
 304        if (ret < 0)
 305                return ret;
 306
 307        adapter->data->started = 0;
 308        rte_eventdev_trace_timer_adapter_stop(adapter);
 309        return 0;
 310}
 311
 312struct rte_event_timer_adapter *
 313rte_event_timer_adapter_lookup(uint16_t adapter_id)
 314{
 315        char name[DATA_MZ_NAME_MAX_LEN];
 316        const struct rte_memzone *mz;
 317        struct rte_event_timer_adapter_data *data;
 318        struct rte_event_timer_adapter *adapter;
 319        int ret;
 320        struct rte_eventdev *dev;
 321
 322        if (adapters == NULL) {
 323                adapters = rte_zmalloc("Eventdev",
 324                                       sizeof(struct rte_event_timer_adapter) *
 325                                               RTE_EVENT_TIMER_ADAPTER_NUM_MAX,
 326                                       RTE_CACHE_LINE_SIZE);
 327                if (adapters == NULL) {
 328                        rte_errno = ENOMEM;
 329                        return NULL;
 330                }
 331        }
 332
 333        if (adapters[adapter_id].allocated)
 334                return &adapters[adapter_id]; /* Adapter is already loaded */
 335
 336        snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
 337        mz = rte_memzone_lookup(name);
 338        if (mz == NULL) {
 339                rte_errno = ENOENT;
 340                return NULL;
 341        }
 342
 343        data = mz->addr;
 344
 345        adapter = &adapters[data->id];
 346        adapter->data = data;
 347
 348        dev = &rte_eventdevs[adapter->data->event_dev_id];
 349
 350        /* Query eventdev PMD for timer adapter capabilities and ops */
 351        ret = dev->dev_ops->timer_adapter_caps_get(dev,
 352                                                   adapter->data->conf.flags,
 353                                                   &adapter->data->caps,
 354                                                   &adapter->ops);
 355        if (ret < 0) {
 356                rte_errno = EINVAL;
 357                return NULL;
 358        }
 359
 360        /* If eventdev PMD did not provide ops, use default software
 361         * implementation.
 362         */
 363        if (adapter->ops == NULL)
 364                adapter->ops = &swtim_ops;
 365
 366        /* Set fast-path function pointers */
 367        adapter->arm_burst = adapter->ops->arm_burst;
 368        adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
 369        adapter->cancel_burst = adapter->ops->cancel_burst;
 370
 371        adapter->allocated = 1;
 372
 373        return adapter;
 374}
 375
 376int
 377rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
 378{
 379        int i, ret;
 380
 381        ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 382        FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
 383
 384        if (adapter->data->started == 1) {
 385                EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
 386                              "before freeing", adapter->data->id);
 387                return -EBUSY;
 388        }
 389
 390        /* free impl priv data */
 391        ret = adapter->ops->uninit(adapter);
 392        if (ret < 0)
 393                return ret;
 394
 395        /* free shared data area */
 396        ret = rte_memzone_free(adapter->data->mz);
 397        if (ret < 0)
 398                return ret;
 399
 400        adapter->data = NULL;
 401        adapter->allocated = 0;
 402
 403        ret = 0;
 404        for (i = 0; i < RTE_EVENT_TIMER_ADAPTER_NUM_MAX; i++)
 405                if (adapters[i].allocated)
 406                        ret = adapters[i].allocated;
 407
 408        if (!ret) {
 409                rte_free(adapters);
 410                adapters = NULL;
 411        }
 412
 413        rte_eventdev_trace_timer_adapter_free(adapter);
 414        return 0;
 415}
 416
 417int
 418rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
 419                                       uint32_t *service_id)
 420{
 421        ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 422
 423        if (adapter->data->service_inited && service_id != NULL)
 424                *service_id = adapter->data->service_id;
 425
 426        return adapter->data->service_inited ? 0 : -ESRCH;
 427}
 428
 429int
 430rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
 431                                  struct rte_event_timer_adapter_stats *stats)
 432{
 433        ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 434        FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
 435        if (stats == NULL)
 436                return -EINVAL;
 437
 438        return adapter->ops->stats_get(adapter, stats);
 439}
 440
 441int
 442rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
 443{
 444        ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
 445        FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
 446        return adapter->ops->stats_reset(adapter);
 447}
 448
 449/*
 450 * Software event timer adapter buffer helper functions
 451 */
 452
 453#define NSECPERSEC 1E9
 454
 455/* Optimizations used to index into the buffer require that the buffer size
 456 * be a power of 2.
 457 */
 458#define EVENT_BUFFER_SZ 4096
 459#define EVENT_BUFFER_BATCHSZ 32
 460#define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
 461
 462#define EXP_TIM_BUF_SZ 128
 463
 464struct event_buffer {
 465        size_t head;
 466        size_t tail;
 467        struct rte_event events[EVENT_BUFFER_SZ];
 468} __rte_cache_aligned;
 469
 470static inline bool
 471event_buffer_full(struct event_buffer *bufp)
 472{
 473        return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
 474}
 475
 476static inline bool
 477event_buffer_batch_ready(struct event_buffer *bufp)
 478{
 479        return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
 480}
 481
 482static void
 483event_buffer_init(struct event_buffer *bufp)
 484{
 485        bufp->head = bufp->tail = 0;
 486        memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
 487}
 488
 489static int
 490event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
 491{
 492        size_t head_idx;
 493        struct rte_event *buf_eventp;
 494
 495        if (event_buffer_full(bufp))
 496                return -1;
 497
 498        /* Instead of modulus, bitwise AND with mask to get head_idx. */
 499        head_idx = bufp->head & EVENT_BUFFER_MASK;
 500        buf_eventp = &bufp->events[head_idx];
 501        rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
 502
 503        /* Wrap automatically when overflow occurs. */
 504        bufp->head++;
 505
 506        return 0;
 507}
 508
 509static void
 510event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
 511                   uint16_t *nb_events_flushed,
 512                   uint16_t *nb_events_inv)
 513{
 514        struct rte_event *events = bufp->events;
 515        size_t head_idx, tail_idx;
 516        uint16_t n = 0;
 517
 518        /* Instead of modulus, bitwise AND with mask to get index. */
 519        head_idx = bufp->head & EVENT_BUFFER_MASK;
 520        tail_idx = bufp->tail & EVENT_BUFFER_MASK;
 521
 522        RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
 523
 524        /* Determine the largest contiguous run we can attempt to enqueue to the
 525         * event device.
 526         */
 527        if (head_idx > tail_idx)
 528                n = head_idx - tail_idx;
 529        else if (head_idx < tail_idx)
 530                n = EVENT_BUFFER_SZ - tail_idx;
 531        else if (event_buffer_full(bufp))
 532                n = EVENT_BUFFER_SZ - tail_idx;
 533        else {
 534                *nb_events_flushed = 0;
 535                return;
 536        }
 537
 538        n = RTE_MIN(EVENT_BUFFER_BATCHSZ, n);
 539        *nb_events_inv = 0;
 540
 541        *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
 542                                                     &events[tail_idx], n);
 543        if (*nb_events_flushed != n) {
 544                if (rte_errno == EINVAL) {
 545                        EVTIM_LOG_ERR("failed to enqueue invalid event - "
 546                                      "dropping it");
 547                        (*nb_events_inv)++;
 548                } else if (rte_errno == ENOSPC)
 549                        rte_pause();
 550        }
 551
 552        if (*nb_events_flushed > 0)
 553                EVTIM_BUF_LOG_DBG("enqueued %"PRIu16" timer events to event "
 554                                  "device", *nb_events_flushed);
 555
 556        bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
 557}
 558
 559/*
 560 * Software event timer adapter implementation
 561 */
 562struct swtim {
 563        /* Identifier of service executing timer management logic. */
 564        uint32_t service_id;
 565        /* The cycle count at which the adapter should next tick */
 566        uint64_t next_tick_cycles;
 567        /* The tick resolution used by adapter instance. May have been
 568         * adjusted from what user requested
 569         */
 570        uint64_t timer_tick_ns;
 571        /* Maximum timeout in nanoseconds allowed by adapter instance. */
 572        uint64_t max_tmo_ns;
 573        /* Buffered timer expiry events to be enqueued to an event device. */
 574        struct event_buffer buffer;
 575        /* Statistics */
 576        struct rte_event_timer_adapter_stats stats;
 577        /* Mempool of timer objects */
 578        struct rte_mempool *tim_pool;
 579        /* Back pointer for convenience */
 580        struct rte_event_timer_adapter *adapter;
 581        /* Identifier of timer data instance */
 582        uint32_t timer_data_id;
 583        /* Track which cores have actually armed a timer */
 584        struct {
 585                uint16_t v;
 586        } __rte_cache_aligned in_use[RTE_MAX_LCORE];
 587        /* Track which cores' timer lists should be polled */
 588        unsigned int poll_lcores[RTE_MAX_LCORE];
 589        /* The number of lists that should be polled */
 590        int n_poll_lcores;
 591        /* Timers which have expired and can be returned to a mempool */
 592        struct rte_timer *expired_timers[EXP_TIM_BUF_SZ];
 593        /* The number of timers that can be returned to a mempool */
 594        size_t n_expired_timers;
 595};
 596
 597static inline struct swtim *
 598swtim_pmd_priv(const struct rte_event_timer_adapter *adapter)
 599{
 600        return adapter->data->adapter_priv;
 601}
 602
 603static void
 604swtim_callback(struct rte_timer *tim)
 605{
 606        struct rte_event_timer *evtim = tim->arg;
 607        struct rte_event_timer_adapter *adapter;
 608        unsigned int lcore = rte_lcore_id();
 609        struct swtim *sw;
 610        uint16_t nb_evs_flushed = 0;
 611        uint16_t nb_evs_invalid = 0;
 612        uint64_t opaque;
 613        int ret;
 614        int n_lcores;
 615
 616        opaque = evtim->impl_opaque[1];
 617        adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
 618        sw = swtim_pmd_priv(adapter);
 619
 620        ret = event_buffer_add(&sw->buffer, &evtim->ev);
 621        if (ret < 0) {
 622                /* If event buffer is full, put timer back in list with
 623                 * immediate expiry value, so that we process it again on the
 624                 * next iteration.
 625                 */
 626                ret = rte_timer_alt_reset(sw->timer_data_id, tim, 0, SINGLE,
 627                                          lcore, NULL, evtim);
 628                if (ret < 0) {
 629                        EVTIM_LOG_DBG("event buffer full, failed to reset "
 630                                      "timer with immediate expiry value");
 631                } else {
 632                        sw->stats.evtim_retry_count++;
 633                        EVTIM_LOG_DBG("event buffer full, resetting rte_timer "
 634                                      "with immediate expiry value");
 635                }
 636
 637                if (unlikely(sw->in_use[lcore].v == 0)) {
 638                        sw->in_use[lcore].v = 1;
 639                        n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
 640                                                     __ATOMIC_RELAXED);
 641                        __atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
 642                                        __ATOMIC_RELAXED);
 643                }
 644        } else {
 645                EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
 646
 647                /* Empty the buffer here, if necessary, to free older expired
 648                 * timers only
 649                 */
 650                if (unlikely(sw->n_expired_timers == EXP_TIM_BUF_SZ)) {
 651                        rte_mempool_put_bulk(sw->tim_pool,
 652                                             (void **)sw->expired_timers,
 653                                             sw->n_expired_timers);
 654                        sw->n_expired_timers = 0;
 655                }
 656
 657                sw->expired_timers[sw->n_expired_timers++] = tim;
 658                sw->stats.evtim_exp_count++;
 659
 660                __atomic_store_n(&evtim->state, RTE_EVENT_TIMER_NOT_ARMED,
 661                                __ATOMIC_RELEASE);
 662        }
 663
 664        if (event_buffer_batch_ready(&sw->buffer)) {
 665                event_buffer_flush(&sw->buffer,
 666                                   adapter->data->event_dev_id,
 667                                   adapter->data->event_port_id,
 668                                   &nb_evs_flushed,
 669                                   &nb_evs_invalid);
 670
 671                sw->stats.ev_enq_count += nb_evs_flushed;
 672                sw->stats.ev_inv_count += nb_evs_invalid;
 673        }
 674}
 675
 676static __rte_always_inline uint64_t
 677get_timeout_cycles(struct rte_event_timer *evtim,
 678                   const struct rte_event_timer_adapter *adapter)
 679{
 680        struct swtim *sw = swtim_pmd_priv(adapter);
 681        uint64_t timeout_ns = evtim->timeout_ticks * sw->timer_tick_ns;
 682        return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
 683}
 684
 685/* This function returns true if one or more (adapter) ticks have occurred since
 686 * the last time it was called.
 687 */
 688static inline bool
 689swtim_did_tick(struct swtim *sw)
 690{
 691        uint64_t cycles_per_adapter_tick, start_cycles;
 692        uint64_t *next_tick_cyclesp;
 693
 694        next_tick_cyclesp = &sw->next_tick_cycles;
 695        cycles_per_adapter_tick = sw->timer_tick_ns *
 696                        (rte_get_timer_hz() / NSECPERSEC);
 697        start_cycles = rte_get_timer_cycles();
 698
 699        /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
 700         * execute, and set things going.
 701         */
 702
 703        if (start_cycles >= *next_tick_cyclesp) {
 704                /* Snap the current cycle count to the preceding adapter tick
 705                 * boundary.
 706                 */
 707                start_cycles -= start_cycles % cycles_per_adapter_tick;
 708                *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
 709
 710                return true;
 711        }
 712
 713        return false;
 714}
 715
 716/* Check that event timer timeout value is in range */
 717static __rte_always_inline int
 718check_timeout(struct rte_event_timer *evtim,
 719              const struct rte_event_timer_adapter *adapter)
 720{
 721        uint64_t tmo_nsec;
 722        struct swtim *sw = swtim_pmd_priv(adapter);
 723
 724        tmo_nsec = evtim->timeout_ticks * sw->timer_tick_ns;
 725        if (tmo_nsec > sw->max_tmo_ns)
 726                return -1;
 727        if (tmo_nsec < sw->timer_tick_ns)
 728                return -2;
 729
 730        return 0;
 731}
 732
 733/* Check that event timer event queue sched type matches destination event queue
 734 * sched type
 735 */
 736static __rte_always_inline int
 737check_destination_event_queue(struct rte_event_timer *evtim,
 738                              const struct rte_event_timer_adapter *adapter)
 739{
 740        int ret;
 741        uint32_t sched_type;
 742
 743        ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
 744                                       evtim->ev.queue_id,
 745                                       RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
 746                                       &sched_type);
 747
 748        if ((ret == 0 && evtim->ev.sched_type == sched_type) ||
 749            ret == -EOVERFLOW)
 750                return 0;
 751
 752        return -1;
 753}
 754
 755static int
 756swtim_service_func(void *arg)
 757{
 758        struct rte_event_timer_adapter *adapter = arg;
 759        struct swtim *sw = swtim_pmd_priv(adapter);
 760        uint16_t nb_evs_flushed = 0;
 761        uint16_t nb_evs_invalid = 0;
 762
 763        if (swtim_did_tick(sw)) {
 764                rte_timer_alt_manage(sw->timer_data_id,
 765                                     sw->poll_lcores,
 766                                     sw->n_poll_lcores,
 767                                     swtim_callback);
 768
 769                /* Return expired timer objects back to mempool */
 770                rte_mempool_put_bulk(sw->tim_pool, (void **)sw->expired_timers,
 771                                     sw->n_expired_timers);
 772                sw->n_expired_timers = 0;
 773
 774                event_buffer_flush(&sw->buffer,
 775                                   adapter->data->event_dev_id,
 776                                   adapter->data->event_port_id,
 777                                   &nb_evs_flushed,
 778                                   &nb_evs_invalid);
 779
 780                sw->stats.ev_enq_count += nb_evs_flushed;
 781                sw->stats.ev_inv_count += nb_evs_invalid;
 782                sw->stats.adapter_tick_count++;
 783        }
 784
 785        rte_event_maintain(adapter->data->event_dev_id,
 786                           adapter->data->event_port_id, 0);
 787
 788        return 0;
 789}
 790
 791/* The adapter initialization function rounds the mempool size up to the next
 792 * power of 2, so we can take the difference between that value and what the
 793 * user requested, and use the space for caches.  This avoids a scenario where a
 794 * user can't arm the number of timers the adapter was configured with because
 795 * mempool objects have been lost to caches.
 796 *
 797 * nb_actual should always be a power of 2, so we can iterate over the powers
 798 * of 2 to see what the largest cache size we can use is.
 799 */
 800static int
 801compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
 802{
 803        int i;
 804        int size;
 805        int cache_size = 0;
 806
 807        for (i = 0;; i++) {
 808                size = 1 << i;
 809
 810                if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
 811                    size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
 812                    size <= nb_actual / 1.5)
 813                        cache_size = size;
 814                else
 815                        break;
 816        }
 817
 818        return cache_size;
 819}
 820
 821static int
 822swtim_init(struct rte_event_timer_adapter *adapter)
 823{
 824        int i, ret;
 825        struct swtim *sw;
 826        unsigned int flags;
 827        struct rte_service_spec service;
 828
 829        /* Allocate storage for private data area */
 830#define SWTIM_NAMESIZE 32
 831        char swtim_name[SWTIM_NAMESIZE];
 832        snprintf(swtim_name, SWTIM_NAMESIZE, "swtim_%"PRIu8,
 833                        adapter->data->id);
 834        sw = rte_zmalloc_socket(swtim_name, sizeof(*sw), RTE_CACHE_LINE_SIZE,
 835                        adapter->data->socket_id);
 836        if (sw == NULL) {
 837                EVTIM_LOG_ERR("failed to allocate space for private data");
 838                rte_errno = ENOMEM;
 839                return -1;
 840        }
 841
 842        /* Connect storage to adapter instance */
 843        adapter->data->adapter_priv = sw;
 844        sw->adapter = adapter;
 845
 846        sw->timer_tick_ns = adapter->data->conf.timer_tick_ns;
 847        sw->max_tmo_ns = adapter->data->conf.max_tmo_ns;
 848
 849        /* Create a timer pool */
 850        char pool_name[SWTIM_NAMESIZE];
 851        snprintf(pool_name, SWTIM_NAMESIZE, "swtim_pool_%"PRIu8,
 852                 adapter->data->id);
 853        /* Optimal mempool size is a power of 2 minus one */
 854        uint64_t nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
 855        int pool_size = nb_timers - 1;
 856        int cache_size = compute_msg_mempool_cache_size(
 857                                adapter->data->conf.nb_timers, nb_timers);
 858        flags = 0; /* pool is multi-producer, multi-consumer */
 859        sw->tim_pool = rte_mempool_create(pool_name, pool_size,
 860                        sizeof(struct rte_timer), cache_size, 0, NULL, NULL,
 861                        NULL, NULL, adapter->data->socket_id, flags);
 862        if (sw->tim_pool == NULL) {
 863                EVTIM_LOG_ERR("failed to create timer object mempool");
 864                rte_errno = ENOMEM;
 865                goto free_alloc;
 866        }
 867
 868        /* Initialize the variables that track in-use timer lists */
 869        for (i = 0; i < RTE_MAX_LCORE; i++)
 870                sw->in_use[i].v = 0;
 871
 872        /* Initialize the timer subsystem and allocate timer data instance */
 873        ret = rte_timer_subsystem_init();
 874        if (ret < 0) {
 875                if (ret != -EALREADY) {
 876                        EVTIM_LOG_ERR("failed to initialize timer subsystem");
 877                        rte_errno = -ret;
 878                        goto free_mempool;
 879                }
 880        }
 881
 882        ret = rte_timer_data_alloc(&sw->timer_data_id);
 883        if (ret < 0) {
 884                EVTIM_LOG_ERR("failed to allocate timer data instance");
 885                rte_errno = -ret;
 886                goto free_mempool;
 887        }
 888
 889        /* Initialize timer event buffer */
 890        event_buffer_init(&sw->buffer);
 891
 892        sw->adapter = adapter;
 893
 894        /* Register a service component to run adapter logic */
 895        memset(&service, 0, sizeof(service));
 896        snprintf(service.name, RTE_SERVICE_NAME_MAX,
 897                 "swtim_svc_%"PRIu8, adapter->data->id);
 898        service.socket_id = adapter->data->socket_id;
 899        service.callback = swtim_service_func;
 900        service.callback_userdata = adapter;
 901        service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
 902        ret = rte_service_component_register(&service, &sw->service_id);
 903        if (ret < 0) {
 904                EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
 905                              ": err = %d", service.name, sw->service_id,
 906                              ret);
 907
 908                rte_errno = ENOSPC;
 909                goto free_mempool;
 910        }
 911
 912        EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
 913                      sw->service_id);
 914
 915        adapter->data->service_id = sw->service_id;
 916        adapter->data->service_inited = 1;
 917
 918        return 0;
 919free_mempool:
 920        rte_mempool_free(sw->tim_pool);
 921free_alloc:
 922        rte_free(sw);
 923        return -1;
 924}
 925
 926static void
 927swtim_free_tim(struct rte_timer *tim, void *arg)
 928{
 929        struct swtim *sw = arg;
 930
 931        rte_mempool_put(sw->tim_pool, tim);
 932}
 933
 934/* Traverse the list of outstanding timers and put them back in the mempool
 935 * before freeing the adapter to avoid leaking the memory.
 936 */
 937static int
 938swtim_uninit(struct rte_event_timer_adapter *adapter)
 939{
 940        int ret;
 941        struct swtim *sw = swtim_pmd_priv(adapter);
 942
 943        /* Free outstanding timers */
 944        rte_timer_stop_all(sw->timer_data_id,
 945                           sw->poll_lcores,
 946                           sw->n_poll_lcores,
 947                           swtim_free_tim,
 948                           sw);
 949
 950        ret = rte_service_component_unregister(sw->service_id);
 951        if (ret < 0) {
 952                EVTIM_LOG_ERR("failed to unregister service component");
 953                return ret;
 954        }
 955
 956        rte_mempool_free(sw->tim_pool);
 957        rte_free(sw);
 958        adapter->data->adapter_priv = NULL;
 959
 960        return 0;
 961}
 962
 963static inline int32_t
 964get_mapped_count_for_service(uint32_t service_id)
 965{
 966        int32_t core_count, i, mapped_count = 0;
 967        uint32_t lcore_arr[RTE_MAX_LCORE];
 968
 969        core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
 970
 971        for (i = 0; i < core_count; i++)
 972                if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
 973                        mapped_count++;
 974
 975        return mapped_count;
 976}
 977
 978static int
 979swtim_start(const struct rte_event_timer_adapter *adapter)
 980{
 981        int mapped_count;
 982        struct swtim *sw = swtim_pmd_priv(adapter);
 983
 984        /* Mapping the service to more than one service core can introduce
 985         * delays while one thread is waiting to acquire a lock, so only allow
 986         * one core to be mapped to the service.
 987         *
 988         * Note: the service could be modified such that it spreads cores to
 989         * poll over multiple service instances.
 990         */
 991        mapped_count = get_mapped_count_for_service(sw->service_id);
 992
 993        if (mapped_count != 1)
 994                return mapped_count < 1 ? -ENOENT : -ENOTSUP;
 995
 996        return rte_service_component_runstate_set(sw->service_id, 1);
 997}
 998
 999static int
1000swtim_stop(const struct rte_event_timer_adapter *adapter)
1001{
1002        int ret;
1003        struct swtim *sw = swtim_pmd_priv(adapter);
1004
1005        ret = rte_service_component_runstate_set(sw->service_id, 0);
1006        if (ret < 0)
1007                return ret;
1008
1009        /* Wait for the service to complete its final iteration */
1010        while (rte_service_may_be_active(sw->service_id))
1011                rte_pause();
1012
1013        return 0;
1014}
1015
1016static void
1017swtim_get_info(const struct rte_event_timer_adapter *adapter,
1018                struct rte_event_timer_adapter_info *adapter_info)
1019{
1020        struct swtim *sw = swtim_pmd_priv(adapter);
1021        adapter_info->min_resolution_ns = sw->timer_tick_ns;
1022        adapter_info->max_tmo_ns = sw->max_tmo_ns;
1023}
1024
1025static int
1026swtim_stats_get(const struct rte_event_timer_adapter *adapter,
1027                struct rte_event_timer_adapter_stats *stats)
1028{
1029        struct swtim *sw = swtim_pmd_priv(adapter);
1030        *stats = sw->stats; /* structure copy */
1031        return 0;
1032}
1033
1034static int
1035swtim_stats_reset(const struct rte_event_timer_adapter *adapter)
1036{
1037        struct swtim *sw = swtim_pmd_priv(adapter);
1038        memset(&sw->stats, 0, sizeof(sw->stats));
1039        return 0;
1040}
1041
1042static uint16_t
1043__swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1044                struct rte_event_timer **evtims,
1045                uint16_t nb_evtims)
1046{
1047        int i, ret;
1048        struct swtim *sw = swtim_pmd_priv(adapter);
1049        uint32_t lcore_id = rte_lcore_id();
1050        struct rte_timer *tim, *tims[nb_evtims];
1051        uint64_t cycles;
1052        int n_lcores;
1053        /* Timer list for this lcore is not in use. */
1054        uint16_t exp_state = 0;
1055        enum rte_event_timer_state n_state;
1056
1057#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1058        /* Check that the service is running. */
1059        if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1060                rte_errno = EINVAL;
1061                return 0;
1062        }
1063#endif
1064
1065        /* Adjust lcore_id if non-EAL thread. Arbitrarily pick the timer list of
1066         * the highest lcore to insert such timers into
1067         */
1068        if (lcore_id == LCORE_ID_ANY)
1069                lcore_id = RTE_MAX_LCORE - 1;
1070
1071        /* If this is the first time we're arming an event timer on this lcore,
1072         * mark this lcore as "in use"; this will cause the service
1073         * function to process the timer list that corresponds to this lcore.
1074         * The atomic compare-and-swap operation can prevent the race condition
1075         * on in_use flag between multiple non-EAL threads.
1076         */
1077        if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
1078                        &exp_state, 1, 0,
1079                        __ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
1080                EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
1081                              lcore_id);
1082                n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
1083                                             __ATOMIC_RELAXED);
1084                __atomic_store_n(&sw->poll_lcores[n_lcores], lcore_id,
1085                                __ATOMIC_RELAXED);
1086        }
1087
1088        ret = rte_mempool_get_bulk(sw->tim_pool, (void **)tims,
1089                                   nb_evtims);
1090        if (ret < 0) {
1091                rte_errno = ENOSPC;
1092                return 0;
1093        }
1094
1095        for (i = 0; i < nb_evtims; i++) {
1096                n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1097                if (n_state == RTE_EVENT_TIMER_ARMED) {
1098                        rte_errno = EALREADY;
1099                        break;
1100                } else if (!(n_state == RTE_EVENT_TIMER_NOT_ARMED ||
1101                             n_state == RTE_EVENT_TIMER_CANCELED)) {
1102                        rte_errno = EINVAL;
1103                        break;
1104                }
1105
1106                ret = check_timeout(evtims[i], adapter);
1107                if (unlikely(ret == -1)) {
1108                        __atomic_store_n(&evtims[i]->state,
1109                                        RTE_EVENT_TIMER_ERROR_TOOLATE,
1110                                        __ATOMIC_RELAXED);
1111                        rte_errno = EINVAL;
1112                        break;
1113                } else if (unlikely(ret == -2)) {
1114                        __atomic_store_n(&evtims[i]->state,
1115                                        RTE_EVENT_TIMER_ERROR_TOOEARLY,
1116                                        __ATOMIC_RELAXED);
1117                        rte_errno = EINVAL;
1118                        break;
1119                }
1120
1121                if (unlikely(check_destination_event_queue(evtims[i],
1122                                                           adapter) < 0)) {
1123                        __atomic_store_n(&evtims[i]->state,
1124                                        RTE_EVENT_TIMER_ERROR,
1125                                        __ATOMIC_RELAXED);
1126                        rte_errno = EINVAL;
1127                        break;
1128                }
1129
1130                tim = tims[i];
1131                rte_timer_init(tim);
1132
1133                evtims[i]->impl_opaque[0] = (uintptr_t)tim;
1134                evtims[i]->impl_opaque[1] = (uintptr_t)adapter;
1135
1136                cycles = get_timeout_cycles(evtims[i], adapter);
1137                ret = rte_timer_alt_reset(sw->timer_data_id, tim, cycles,
1138                                          SINGLE, lcore_id, NULL, evtims[i]);
1139                if (ret < 0) {
1140                        /* tim was in RUNNING or CONFIG state */
1141                        __atomic_store_n(&evtims[i]->state,
1142                                        RTE_EVENT_TIMER_ERROR,
1143                                        __ATOMIC_RELEASE);
1144                        break;
1145                }
1146
1147                EVTIM_LOG_DBG("armed an event timer");
1148                /* RELEASE ordering guarantees the adapter specific value
1149                 * changes observed before the update of state.
1150                 */
1151                __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_ARMED,
1152                                __ATOMIC_RELEASE);
1153        }
1154
1155        if (i < nb_evtims)
1156                rte_mempool_put_bulk(sw->tim_pool,
1157                                     (void **)&tims[i], nb_evtims - i);
1158
1159        return i;
1160}
1161
1162static uint16_t
1163swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
1164                struct rte_event_timer **evtims,
1165                uint16_t nb_evtims)
1166{
1167        return __swtim_arm_burst(adapter, evtims, nb_evtims);
1168}
1169
1170static uint16_t
1171swtim_cancel_burst(const struct rte_event_timer_adapter *adapter,
1172                   struct rte_event_timer **evtims,
1173                   uint16_t nb_evtims)
1174{
1175        int i, ret;
1176        struct rte_timer *timp;
1177        uint64_t opaque;
1178        struct swtim *sw = swtim_pmd_priv(adapter);
1179        enum rte_event_timer_state n_state;
1180
1181#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
1182        /* Check that the service is running. */
1183        if (rte_service_runstate_get(adapter->data->service_id) != 1) {
1184                rte_errno = EINVAL;
1185                return 0;
1186        }
1187#endif
1188
1189        for (i = 0; i < nb_evtims; i++) {
1190                /* Don't modify the event timer state in these cases */
1191                /* ACQUIRE ordering guarantees the access of implementation
1192                 * specific opaque data under the correct state.
1193                 */
1194                n_state = __atomic_load_n(&evtims[i]->state, __ATOMIC_ACQUIRE);
1195                if (n_state == RTE_EVENT_TIMER_CANCELED) {
1196                        rte_errno = EALREADY;
1197                        break;
1198                } else if (n_state != RTE_EVENT_TIMER_ARMED) {
1199                        rte_errno = EINVAL;
1200                        break;
1201                }
1202
1203                opaque = evtims[i]->impl_opaque[0];
1204                timp = (struct rte_timer *)(uintptr_t)opaque;
1205                RTE_ASSERT(timp != NULL);
1206
1207                ret = rte_timer_alt_stop(sw->timer_data_id, timp);
1208                if (ret < 0) {
1209                        /* Timer is running or being configured */
1210                        rte_errno = EAGAIN;
1211                        break;
1212                }
1213
1214                rte_mempool_put(sw->tim_pool, (void **)timp);
1215
1216                /* The RELEASE ordering here pairs with atomic ordering
1217                 * to make sure the state update data observed between
1218                 * threads.
1219                 */
1220                __atomic_store_n(&evtims[i]->state, RTE_EVENT_TIMER_CANCELED,
1221                                __ATOMIC_RELEASE);
1222        }
1223
1224        return i;
1225}
1226
1227static uint16_t
1228swtim_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
1229                         struct rte_event_timer **evtims,
1230                         uint64_t timeout_ticks,
1231                         uint16_t nb_evtims)
1232{
1233        int i;
1234
1235        for (i = 0; i < nb_evtims; i++)
1236                evtims[i]->timeout_ticks = timeout_ticks;
1237
1238        return __swtim_arm_burst(adapter, evtims, nb_evtims);
1239}
1240
1241static const struct event_timer_adapter_ops swtim_ops = {
1242        .init = swtim_init,
1243        .uninit = swtim_uninit,
1244        .start = swtim_start,
1245        .stop = swtim_stop,
1246        .get_info = swtim_get_info,
1247        .stats_get = swtim_stats_get,
1248        .stats_reset = swtim_stats_reset,
1249        .arm_burst = swtim_arm_burst,
1250        .arm_tmo_tick_burst = swtim_arm_tmo_tick_burst,
1251        .cancel_burst = swtim_cancel_burst,
1252};
1253
1254static int
1255handle_ta_info(const char *cmd __rte_unused, const char *params,
1256                struct rte_tel_data *d)
1257{
1258        struct rte_event_timer_adapter_info adapter_info;
1259        struct rte_event_timer_adapter *adapter;
1260        uint16_t adapter_id;
1261        int ret;
1262
1263        if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1264                return -1;
1265
1266        adapter_id = atoi(params);
1267
1268        if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1269                EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1270                return -EINVAL;
1271        }
1272
1273        adapter = &adapters[adapter_id];
1274
1275        ret = rte_event_timer_adapter_get_info(adapter, &adapter_info);
1276        if (ret < 0) {
1277                EVTIM_LOG_ERR("Failed to get info for timer adapter id %u", adapter_id);
1278                return ret;
1279        }
1280
1281        rte_tel_data_start_dict(d);
1282        rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1283        rte_tel_data_add_dict_u64(d, "min_resolution_ns", adapter_info.min_resolution_ns);
1284        rte_tel_data_add_dict_u64(d, "max_tmo_ns", adapter_info.max_tmo_ns);
1285        rte_tel_data_add_dict_u64(d, "event_dev_id", adapter_info.conf.event_dev_id);
1286        rte_tel_data_add_dict_u64(d, "socket_id", adapter_info.conf.socket_id);
1287        rte_tel_data_add_dict_u64(d, "clk_src", adapter_info.conf.clk_src);
1288        rte_tel_data_add_dict_u64(d, "timer_tick_ns", adapter_info.conf.timer_tick_ns);
1289        rte_tel_data_add_dict_u64(d, "nb_timers", adapter_info.conf.nb_timers);
1290        rte_tel_data_add_dict_u64(d, "flags", adapter_info.conf.flags);
1291
1292        return 0;
1293}
1294
1295static int
1296handle_ta_stats(const char *cmd __rte_unused, const char *params,
1297                struct rte_tel_data *d)
1298{
1299        struct rte_event_timer_adapter_stats stats;
1300        struct rte_event_timer_adapter *adapter;
1301        uint16_t adapter_id;
1302        int ret;
1303
1304        if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1305                return -1;
1306
1307        adapter_id = atoi(params);
1308
1309        if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
1310                EVTIM_LOG_ERR("Invalid timer adapter id %u", adapter_id);
1311                return -EINVAL;
1312        }
1313
1314        adapter = &adapters[adapter_id];
1315
1316        ret = rte_event_timer_adapter_stats_get(adapter, &stats);
1317        if (ret < 0) {
1318                EVTIM_LOG_ERR("Failed to get stats for timer adapter id %u", adapter_id);
1319                return ret;
1320        }
1321
1322        rte_tel_data_start_dict(d);
1323        rte_tel_data_add_dict_u64(d, "timer_adapter_id", adapter_id);
1324        rte_tel_data_add_dict_u64(d, "evtim_exp_count", stats.evtim_exp_count);
1325        rte_tel_data_add_dict_u64(d, "ev_enq_count", stats.ev_enq_count);
1326        rte_tel_data_add_dict_u64(d, "ev_inv_count", stats.ev_inv_count);
1327        rte_tel_data_add_dict_u64(d, "evtim_retry_count", stats.evtim_retry_count);
1328        rte_tel_data_add_dict_u64(d, "adapter_tick_count", stats.adapter_tick_count);
1329
1330        return 0;
1331}
1332
1333RTE_INIT(ta_init_telemetry)
1334{
1335        rte_telemetry_register_cmd("/eventdev/ta_info",
1336                handle_ta_info,
1337                "Returns Timer adapter info. Parameter: Timer adapter id");
1338
1339        rte_telemetry_register_cmd("/eventdev/ta_stats",
1340                handle_ta_stats,
1341                "Returns Timer adapter stats. Parameter: Timer adapter id");
1342}
1343