dpdk/drivers/event/cnxk/cn10k_eventdev.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2021 Marvell.
   3 */
   4
   5#include "cn10k_worker.h"
   6#include "cnxk_eventdev.h"
   7#include "cnxk_worker.h"
   8
   9#define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
  10        (deq_op = deq_ops[!!(dev->rx_offloads & NIX_RX_OFFLOAD_SECURITY_F)]    \
  11                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_VLAN_STRIP_F)]  \
  12                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_TSTAMP_F)]      \
  13                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_MARK_UPDATE_F)] \
  14                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_CHECKSUM_F)]    \
  15                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_PTYPE_F)]       \
  16                         [!!(dev->rx_offloads & NIX_RX_OFFLOAD_RSS_F)])
  17
  18#define CN10K_SET_EVDEV_ENQ_OP(dev, enq_op, enq_ops)                           \
  19        (enq_op =                                                              \
  20                 enq_ops[!!(dev->tx_offloads & NIX_TX_OFFLOAD_SECURITY_F)]     \
  21                        [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSTAMP_F)]       \
  22                        [!!(dev->tx_offloads & NIX_TX_OFFLOAD_TSO_F)]          \
  23                        [!!(dev->tx_offloads & NIX_TX_OFFLOAD_MBUF_NOFF_F)]    \
  24                        [!!(dev->tx_offloads & NIX_TX_OFFLOAD_VLAN_QINQ_F)]    \
  25                        [!!(dev->tx_offloads & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)] \
  26                        [!!(dev->tx_offloads & NIX_TX_OFFLOAD_L3_L4_CSUM_F)])
  27
  28static uint32_t
  29cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
  30{
  31        uint32_t wdata = BIT(16) | 1;
  32
  33        switch (dev->gw_mode) {
  34        case CN10K_GW_MODE_NONE:
  35        default:
  36                break;
  37        case CN10K_GW_MODE_PREF:
  38                wdata |= BIT(19);
  39                break;
  40        case CN10K_GW_MODE_PREF_WFE:
  41                wdata |= BIT(20) | BIT(19);
  42                break;
  43        }
  44
  45        return wdata;
  46}
  47
  48static void *
  49cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
  50{
  51        struct cnxk_sso_evdev *dev = arg;
  52        struct cn10k_sso_hws *ws;
  53
  54        /* Allocate event port memory */
  55        ws = rte_zmalloc("cn10k_ws",
  56                         sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
  57                         RTE_CACHE_LINE_SIZE);
  58        if (ws == NULL) {
  59                plt_err("Failed to alloc memory for port=%d", port_id);
  60                return NULL;
  61        }
  62
  63        /* First cache line is reserved for cookie */
  64        ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
  65        ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
  66        ws->tx_base = ws->base;
  67        ws->hws_id = port_id;
  68        ws->swtag_req = 0;
  69        ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
  70        ws->lmt_base = dev->sso.lmt_base;
  71
  72        return ws;
  73}
  74
  75static int
  76cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
  77{
  78        struct cnxk_sso_evdev *dev = arg;
  79        struct cn10k_sso_hws *ws = port;
  80
  81        return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
  82}
  83
  84static int
  85cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
  86{
  87        struct cnxk_sso_evdev *dev = arg;
  88        struct cn10k_sso_hws *ws = port;
  89
  90        return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
  91}
  92
  93static void
  94cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t grp_base)
  95{
  96        struct cnxk_sso_evdev *dev = arg;
  97        struct cn10k_sso_hws *ws = hws;
  98        uint64_t val;
  99
 100        ws->grp_base = grp_base;
 101        ws->fc_mem = (uint64_t *)dev->fc_iova;
 102        ws->xaq_lmt = dev->xaq_lmt;
 103
 104        /* Set get_work timeout for HWS */
 105        val = NSEC2USEC(dev->deq_tmo_ns) - 1;
 106        plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
 107}
 108
 109static void
 110cn10k_sso_hws_release(void *arg, void *hws)
 111{
 112        struct cnxk_sso_evdev *dev = arg;
 113        struct cn10k_sso_hws *ws = hws;
 114        int i;
 115
 116        for (i = 0; i < dev->nb_event_queues; i++)
 117                roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
 118        memset(ws, 0, sizeof(*ws));
 119}
 120
 121static void
 122cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
 123                           cnxk_handle_event_t fn, void *arg)
 124{
 125        struct cn10k_sso_hws *ws = hws;
 126        uint64_t cq_ds_cnt = 1;
 127        uint64_t aq_cnt = 1;
 128        uint64_t ds_cnt = 1;
 129        struct rte_event ev;
 130        uint64_t val, req;
 131
 132        plt_write64(0, base + SSO_LF_GGRP_QCTL);
 133
 134        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
 135        req = queue_id;     /* GGRP ID */
 136        req |= BIT_ULL(18); /* Grouped */
 137        req |= BIT_ULL(16); /* WAIT */
 138
 139        aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
 140        ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
 141        cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
 142        cq_ds_cnt &= 0x3FFF3FFF0000;
 143
 144        while (aq_cnt || cq_ds_cnt || ds_cnt) {
 145                plt_write64(req, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
 146                cn10k_sso_hws_get_work_empty(ws, &ev);
 147                if (fn != NULL && ev.u64 != 0)
 148                        fn(arg, ev);
 149                if (ev.sched_type != SSO_TT_EMPTY)
 150                        cnxk_sso_hws_swtag_flush(
 151                                ws->base + SSOW_LF_GWS_WQE0,
 152                                ws->base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
 153                do {
 154                        val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
 155                } while (val & BIT_ULL(56));
 156                aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
 157                ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
 158                cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
 159                /* Extract cq and ds count */
 160                cq_ds_cnt &= 0x3FFF3FFF0000;
 161        }
 162
 163        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
 164        rte_mb();
 165}
 166
 167static void
 168cn10k_sso_hws_reset(void *arg, void *hws)
 169{
 170        struct cnxk_sso_evdev *dev = arg;
 171        struct cn10k_sso_hws *ws = hws;
 172        uintptr_t base = ws->base;
 173        uint64_t pend_state;
 174        union {
 175                __uint128_t wdata;
 176                uint64_t u64[2];
 177        } gw;
 178        uint8_t pend_tt;
 179
 180        plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
 181        /* Wait till getwork/swtp/waitw/desched completes. */
 182        do {
 183                pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
 184        } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
 185                               BIT_ULL(56) | BIT_ULL(54)));
 186        pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
 187        if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
 188                if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
 189                        cnxk_sso_hws_swtag_untag(base +
 190                                                 SSOW_LF_GWS_OP_SWTAG_UNTAG);
 191                plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
 192        }
 193
 194        /* Wait for desched to complete. */
 195        do {
 196                pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
 197        } while (pend_state & BIT_ULL(58));
 198
 199        switch (dev->gw_mode) {
 200        case CN10K_GW_MODE_PREF:
 201                while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
 202                        ;
 203                break;
 204        case CN10K_GW_MODE_PREF_WFE:
 205                while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
 206                       SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
 207                        continue;
 208                plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
 209                break;
 210        case CN10K_GW_MODE_NONE:
 211        default:
 212                break;
 213        }
 214
 215        if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
 216            SSO_TT_EMPTY) {
 217                plt_write64(BIT_ULL(16) | 1,
 218                            ws->base + SSOW_LF_GWS_OP_GET_WORK0);
 219                do {
 220                        roc_load_pair(gw.u64[0], gw.u64[1],
 221                                      ws->base + SSOW_LF_GWS_WQE0);
 222                } while (gw.u64[0] & BIT_ULL(63));
 223                pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
 224                if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
 225                        if (pend_tt == SSO_TT_ATOMIC ||
 226                            pend_tt == SSO_TT_ORDERED)
 227                                cnxk_sso_hws_swtag_untag(
 228                                        base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
 229                        plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
 230                }
 231        }
 232
 233        plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
 234        rte_mb();
 235}
 236
 237static void
 238cn10k_sso_set_rsrc(void *arg)
 239{
 240        struct cnxk_sso_evdev *dev = arg;
 241
 242        dev->max_event_ports = dev->sso.max_hws;
 243        dev->max_event_queues =
 244                dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
 245                              RTE_EVENT_MAX_QUEUES_PER_DEV :
 246                              dev->sso.max_hwgrp;
 247}
 248
 249static int
 250cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
 251{
 252        struct cnxk_sso_evdev *dev = arg;
 253
 254        return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
 255}
 256
 257static int
 258cn10k_sso_updt_tx_adptr_data(const struct rte_eventdev *event_dev)
 259{
 260        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 261        int i;
 262
 263        if (dev->tx_adptr_data == NULL)
 264                return 0;
 265
 266        for (i = 0; i < dev->nb_event_ports; i++) {
 267                struct cn10k_sso_hws *ws = event_dev->data->ports[i];
 268                void *ws_cookie;
 269
 270                ws_cookie = cnxk_sso_hws_get_cookie(ws);
 271                ws_cookie = rte_realloc_socket(
 272                        ws_cookie,
 273                        sizeof(struct cnxk_sso_hws_cookie) +
 274                                sizeof(struct cn10k_sso_hws) +
 275                                (sizeof(uint64_t) * (dev->max_port_id + 1) *
 276                                 RTE_MAX_QUEUES_PER_PORT),
 277                        RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
 278                if (ws_cookie == NULL)
 279                        return -ENOMEM;
 280                ws = RTE_PTR_ADD(ws_cookie, sizeof(struct cnxk_sso_hws_cookie));
 281                memcpy(&ws->tx_adptr_data, dev->tx_adptr_data,
 282                       sizeof(uint64_t) * (dev->max_port_id + 1) *
 283                               RTE_MAX_QUEUES_PER_PORT);
 284                event_dev->data->ports[i] = ws;
 285        }
 286
 287        return 0;
 288}
 289
 290static void
 291cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 292{
 293        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 294        const event_dequeue_t sso_hws_deq[2][2][2][2][2][2][2] = {
 295#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                            \
 296        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_##name,
 297                NIX_RX_FASTPATH_MODES
 298#undef R
 299        };
 300
 301        const event_dequeue_burst_t sso_hws_deq_burst[2][2][2][2][2][2][2] = {
 302#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 303        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_burst_##name,
 304                NIX_RX_FASTPATH_MODES
 305#undef R
 306        };
 307
 308        const event_dequeue_t sso_hws_deq_tmo[2][2][2][2][2][2][2] = {
 309#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 310        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_##name,
 311                NIX_RX_FASTPATH_MODES
 312#undef R
 313        };
 314
 315        const event_dequeue_burst_t
 316                sso_hws_deq_tmo_burst[2][2][2][2][2][2][2] = {
 317#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 318        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_burst_##name,
 319                NIX_RX_FASTPATH_MODES
 320#undef R
 321        };
 322
 323        const event_dequeue_t sso_hws_deq_ca[2][2][2][2][2][2][2] = {
 324#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 325        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_##name,
 326                NIX_RX_FASTPATH_MODES
 327#undef R
 328        };
 329
 330        const event_dequeue_burst_t
 331                sso_hws_deq_ca_burst[2][2][2][2][2][2][2] = {
 332#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 333        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_burst_##name,
 334                NIX_RX_FASTPATH_MODES
 335#undef R
 336        };
 337
 338        const event_dequeue_t sso_hws_deq_seg[2][2][2][2][2][2][2] = {
 339#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 340        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_##name,
 341                NIX_RX_FASTPATH_MODES
 342#undef R
 343        };
 344
 345        const event_dequeue_burst_t
 346                sso_hws_deq_seg_burst[2][2][2][2][2][2][2] = {
 347#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 348        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_seg_burst_##name,
 349                NIX_RX_FASTPATH_MODES
 350#undef R
 351        };
 352
 353        const event_dequeue_t sso_hws_deq_tmo_seg[2][2][2][2][2][2][2] = {
 354#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 355        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_##name,
 356                NIX_RX_FASTPATH_MODES
 357#undef R
 358        };
 359
 360        const event_dequeue_burst_t
 361                sso_hws_deq_tmo_seg_burst[2][2][2][2][2][2][2] = {
 362#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 363        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_tmo_seg_burst_##name,
 364                        NIX_RX_FASTPATH_MODES
 365#undef R
 366                };
 367
 368        const event_dequeue_t sso_hws_deq_ca_seg[2][2][2][2][2][2][2] = {
 369#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 370        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_##name,
 371                NIX_RX_FASTPATH_MODES
 372#undef R
 373        };
 374
 375        const event_dequeue_burst_t
 376                sso_hws_deq_ca_seg_burst[2][2][2][2][2][2][2] = {
 377#define R(name, f6, f5, f4, f3, f2, f1, f0, flags)                             \
 378        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_deq_ca_seg_burst_##name,
 379                        NIX_RX_FASTPATH_MODES
 380#undef R
 381        };
 382
 383        /* Tx modes */
 384        const event_tx_adapter_enqueue_t
 385                sso_hws_tx_adptr_enq[2][2][2][2][2][2][2] = {
 386#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 387        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_##name,
 388                        NIX_TX_FASTPATH_MODES
 389#undef T
 390                };
 391
 392        const event_tx_adapter_enqueue_t
 393                sso_hws_tx_adptr_enq_seg[2][2][2][2][2][2][2] = {
 394#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)                         \
 395        [f6][f5][f4][f3][f2][f1][f0] = cn10k_sso_hws_tx_adptr_enq_seg_##name,
 396                        NIX_TX_FASTPATH_MODES
 397#undef T
 398                };
 399
 400        event_dev->enqueue = cn10k_sso_hws_enq;
 401        event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
 402        event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
 403        event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
 404        if (dev->rx_offloads & NIX_RX_MULTI_SEG_F) {
 405                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
 406                                       sso_hws_deq_seg);
 407                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
 408                                       sso_hws_deq_seg_burst);
 409                if (dev->is_timeout_deq) {
 410                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
 411                                               sso_hws_deq_tmo_seg);
 412                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
 413                                               sso_hws_deq_tmo_seg_burst);
 414                }
 415                if (dev->is_ca_internal_port) {
 416                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
 417                                               sso_hws_deq_ca_seg);
 418                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
 419                                               sso_hws_deq_ca_seg_burst);
 420                }
 421        } else {
 422                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue, sso_hws_deq);
 423                CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
 424                                       sso_hws_deq_burst);
 425                if (dev->is_timeout_deq) {
 426                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
 427                                               sso_hws_deq_tmo);
 428                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
 429                                               sso_hws_deq_tmo_burst);
 430                }
 431                if (dev->is_ca_internal_port) {
 432                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue,
 433                                               sso_hws_deq_ca);
 434                        CN10K_SET_EVDEV_DEQ_OP(dev, event_dev->dequeue_burst,
 435                                               sso_hws_deq_ca_burst);
 436                }
 437        }
 438        event_dev->ca_enqueue = cn10k_sso_hws_ca_enq;
 439
 440        if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 441                CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
 442                                       sso_hws_tx_adptr_enq_seg);
 443        else
 444                CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue,
 445                                       sso_hws_tx_adptr_enq);
 446
 447        event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
 448}
 449
 450static void
 451cn10k_sso_info_get(struct rte_eventdev *event_dev,
 452                   struct rte_event_dev_info *dev_info)
 453{
 454        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 455
 456        dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
 457        cnxk_sso_info_get(dev, dev_info);
 458}
 459
 460static int
 461cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
 462{
 463        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 464        int rc;
 465
 466        rc = cnxk_sso_dev_validate(event_dev);
 467        if (rc < 0) {
 468                plt_err("Invalid event device configuration");
 469                return -EINVAL;
 470        }
 471
 472        rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
 473                                 dev->nb_event_queues);
 474        if (rc < 0) {
 475                plt_err("Failed to initialize SSO resources");
 476                return -ENODEV;
 477        }
 478
 479        rc = cnxk_sso_xaq_allocate(dev);
 480        if (rc < 0)
 481                goto cnxk_rsrc_fini;
 482
 483        rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
 484                                    cn10k_sso_hws_setup);
 485        if (rc < 0)
 486                goto cnxk_rsrc_fini;
 487
 488        /* Restore any prior port-queue mapping. */
 489        cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
 490
 491        dev->configured = 1;
 492        rte_mb();
 493
 494        return 0;
 495cnxk_rsrc_fini:
 496        roc_sso_rsrc_fini(&dev->sso);
 497        dev->nb_event_ports = 0;
 498        return rc;
 499}
 500
 501static int
 502cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
 503                     const struct rte_event_port_conf *port_conf)
 504{
 505
 506        RTE_SET_USED(port_conf);
 507        return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
 508}
 509
 510static void
 511cn10k_sso_port_release(void *port)
 512{
 513        struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
 514        struct cnxk_sso_evdev *dev;
 515
 516        if (port == NULL)
 517                return;
 518
 519        dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
 520        if (!gws_cookie->configured)
 521                goto free;
 522
 523        cn10k_sso_hws_release(dev, port);
 524        memset(gws_cookie, 0, sizeof(*gws_cookie));
 525free:
 526        rte_free(gws_cookie);
 527}
 528
 529static int
 530cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
 531                    const uint8_t queues[], const uint8_t priorities[],
 532                    uint16_t nb_links)
 533{
 534        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 535        uint16_t hwgrp_ids[nb_links];
 536        uint16_t link;
 537
 538        RTE_SET_USED(priorities);
 539        for (link = 0; link < nb_links; link++)
 540                hwgrp_ids[link] = queues[link];
 541        nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
 542
 543        return (int)nb_links;
 544}
 545
 546static int
 547cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
 548                      uint8_t queues[], uint16_t nb_unlinks)
 549{
 550        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 551        uint16_t hwgrp_ids[nb_unlinks];
 552        uint16_t unlink;
 553
 554        for (unlink = 0; unlink < nb_unlinks; unlink++)
 555                hwgrp_ids[unlink] = queues[unlink];
 556        nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
 557
 558        return (int)nb_unlinks;
 559}
 560
 561static int
 562cn10k_sso_start(struct rte_eventdev *event_dev)
 563{
 564        int rc;
 565
 566        rc = cn10k_sso_updt_tx_adptr_data(event_dev);
 567        if (rc < 0)
 568                return rc;
 569
 570        rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
 571                            cn10k_sso_hws_flush_events);
 572        if (rc < 0)
 573                return rc;
 574        cn10k_sso_fp_fns_set(event_dev);
 575
 576        return rc;
 577}
 578
 579static void
 580cn10k_sso_stop(struct rte_eventdev *event_dev)
 581{
 582        cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
 583                      cn10k_sso_hws_flush_events);
 584}
 585
 586static int
 587cn10k_sso_close(struct rte_eventdev *event_dev)
 588{
 589        return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
 590}
 591
 592static int
 593cn10k_sso_selftest(void)
 594{
 595        return cnxk_sso_selftest(RTE_STR(event_cn10k));
 596}
 597
 598static int
 599cn10k_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
 600                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
 601{
 602        int rc;
 603
 604        RTE_SET_USED(event_dev);
 605        rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 9);
 606        if (rc)
 607                *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
 608        else
 609                *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
 610                        RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ |
 611                        RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID |
 612                        RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR;
 613
 614        return 0;
 615}
 616
 617static void
 618cn10k_sso_set_priv_mem(const struct rte_eventdev *event_dev, void *lookup_mem,
 619                       void *tstmp_info)
 620{
 621        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 622        int i;
 623
 624        for (i = 0; i < dev->nb_event_ports; i++) {
 625                struct cn10k_sso_hws *ws = event_dev->data->ports[i];
 626                ws->lookup_mem = lookup_mem;
 627                ws->tstamp = tstmp_info;
 628        }
 629}
 630
 631static int
 632cn10k_sso_rx_adapter_queue_add(
 633        const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
 634        int32_t rx_queue_id,
 635        const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 636{
 637        struct cn10k_eth_rxq *rxq;
 638        void *lookup_mem;
 639        void *tstmp_info;
 640        int rc;
 641
 642        rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
 643        if (rc)
 644                return -EINVAL;
 645
 646        rc = cnxk_sso_rx_adapter_queue_add(event_dev, eth_dev, rx_queue_id,
 647                                           queue_conf);
 648        if (rc)
 649                return -EINVAL;
 650        rxq = eth_dev->data->rx_queues[0];
 651        lookup_mem = rxq->lookup_mem;
 652        tstmp_info = rxq->tstamp;
 653        cn10k_sso_set_priv_mem(event_dev, lookup_mem, tstmp_info);
 654        cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 655
 656        return 0;
 657}
 658
 659static int
 660cn10k_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 661                               const struct rte_eth_dev *eth_dev,
 662                               int32_t rx_queue_id)
 663{
 664        int rc;
 665
 666        rc = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
 667        if (rc)
 668                return -EINVAL;
 669
 670        return cnxk_sso_rx_adapter_queue_del(event_dev, eth_dev, rx_queue_id);
 671}
 672
 673static int
 674cn10k_sso_rx_adapter_vector_limits(
 675        const struct rte_eventdev *dev, const struct rte_eth_dev *eth_dev,
 676        struct rte_event_eth_rx_adapter_vector_limits *limits)
 677{
 678        struct cnxk_eth_dev *cnxk_eth_dev;
 679        int ret;
 680
 681        RTE_SET_USED(dev);
 682        ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
 683        if (ret)
 684                return -ENOTSUP;
 685
 686        cnxk_eth_dev = cnxk_eth_pmd_priv(eth_dev);
 687        limits->log2_sz = true;
 688        limits->min_sz = 1 << ROC_NIX_VWQE_MIN_SIZE_LOG2;
 689        limits->max_sz = 1 << ROC_NIX_VWQE_MAX_SIZE_LOG2;
 690        limits->min_timeout_ns =
 691                (roc_nix_get_vwqe_interval(&cnxk_eth_dev->nix) + 1) * 100;
 692        limits->max_timeout_ns = BITMASK_ULL(8, 0) * limits->min_timeout_ns;
 693
 694        return 0;
 695}
 696
 697static int
 698cn10k_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
 699                              const struct rte_eth_dev *eth_dev, uint32_t *caps)
 700{
 701        int ret;
 702
 703        RTE_SET_USED(dev);
 704        ret = strncmp(eth_dev->device->driver->name, "net_cn10k", 8);
 705        if (ret)
 706                *caps = 0;
 707        else
 708                *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT |
 709                        RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
 710
 711        return 0;
 712}
 713
 714static int
 715cn10k_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
 716                               const struct rte_eth_dev *eth_dev,
 717                               int32_t tx_queue_id)
 718{
 719        int rc;
 720
 721        RTE_SET_USED(id);
 722        rc = cnxk_sso_tx_adapter_queue_add(event_dev, eth_dev, tx_queue_id);
 723        if (rc < 0)
 724                return rc;
 725        rc = cn10k_sso_updt_tx_adptr_data(event_dev);
 726        if (rc < 0)
 727                return rc;
 728        cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 729
 730        return 0;
 731}
 732
 733static int
 734cn10k_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
 735                               const struct rte_eth_dev *eth_dev,
 736                               int32_t tx_queue_id)
 737{
 738        int rc;
 739
 740        RTE_SET_USED(id);
 741        rc = cnxk_sso_tx_adapter_queue_del(event_dev, eth_dev, tx_queue_id);
 742        if (rc < 0)
 743                return rc;
 744        return cn10k_sso_updt_tx_adptr_data(event_dev);
 745}
 746
 747static int
 748cn10k_crypto_adapter_caps_get(const struct rte_eventdev *event_dev,
 749                              const struct rte_cryptodev *cdev, uint32_t *caps)
 750{
 751        CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 752        CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 753
 754        *caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
 755                RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
 756
 757        return 0;
 758}
 759
 760static int
 761cn10k_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
 762                            const struct rte_cryptodev *cdev,
 763                            int32_t queue_pair_id,
 764                            const struct rte_event *event)
 765{
 766        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 767
 768        RTE_SET_USED(event);
 769
 770        CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 771        CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 772
 773        dev->is_ca_internal_port = 1;
 774        cn10k_sso_fp_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 775
 776        return cnxk_crypto_adapter_qp_add(event_dev, cdev, queue_pair_id);
 777}
 778
 779static int
 780cn10k_crypto_adapter_qp_del(const struct rte_eventdev *event_dev,
 781                            const struct rte_cryptodev *cdev,
 782                            int32_t queue_pair_id)
 783{
 784        CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k");
 785        CNXK_VALID_DEV_OR_ERR_RET(cdev->device, "crypto_cn10k");
 786
 787        return cnxk_crypto_adapter_qp_del(cdev, queue_pair_id);
 788}
 789
 790static struct eventdev_ops cn10k_sso_dev_ops = {
 791        .dev_infos_get = cn10k_sso_info_get,
 792        .dev_configure = cn10k_sso_dev_configure,
 793        .queue_def_conf = cnxk_sso_queue_def_conf,
 794        .queue_setup = cnxk_sso_queue_setup,
 795        .queue_release = cnxk_sso_queue_release,
 796        .port_def_conf = cnxk_sso_port_def_conf,
 797        .port_setup = cn10k_sso_port_setup,
 798        .port_release = cn10k_sso_port_release,
 799        .port_link = cn10k_sso_port_link,
 800        .port_unlink = cn10k_sso_port_unlink,
 801        .timeout_ticks = cnxk_sso_timeout_ticks,
 802
 803        .eth_rx_adapter_caps_get = cn10k_sso_rx_adapter_caps_get,
 804        .eth_rx_adapter_queue_add = cn10k_sso_rx_adapter_queue_add,
 805        .eth_rx_adapter_queue_del = cn10k_sso_rx_adapter_queue_del,
 806        .eth_rx_adapter_start = cnxk_sso_rx_adapter_start,
 807        .eth_rx_adapter_stop = cnxk_sso_rx_adapter_stop,
 808
 809        .eth_rx_adapter_vector_limits_get = cn10k_sso_rx_adapter_vector_limits,
 810
 811        .eth_tx_adapter_caps_get = cn10k_sso_tx_adapter_caps_get,
 812        .eth_tx_adapter_queue_add = cn10k_sso_tx_adapter_queue_add,
 813        .eth_tx_adapter_queue_del = cn10k_sso_tx_adapter_queue_del,
 814
 815        .timer_adapter_caps_get = cnxk_tim_caps_get,
 816
 817        .crypto_adapter_caps_get = cn10k_crypto_adapter_caps_get,
 818        .crypto_adapter_queue_pair_add = cn10k_crypto_adapter_qp_add,
 819        .crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
 820
 821        .dump = cnxk_sso_dump,
 822        .dev_start = cn10k_sso_start,
 823        .dev_stop = cn10k_sso_stop,
 824        .dev_close = cn10k_sso_close,
 825        .dev_selftest = cn10k_sso_selftest,
 826};
 827
 828static int
 829cn10k_sso_init(struct rte_eventdev *event_dev)
 830{
 831        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
 832        int rc;
 833
 834        if (RTE_CACHE_LINE_SIZE != 64) {
 835                plt_err("Driver not compiled for CN10K");
 836                return -EFAULT;
 837        }
 838
 839        rc = roc_plt_init();
 840        if (rc < 0) {
 841                plt_err("Failed to initialize platform model");
 842                return rc;
 843        }
 844
 845        event_dev->dev_ops = &cn10k_sso_dev_ops;
 846        /* For secondary processes, the primary has done all the work */
 847        if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
 848                cn10k_sso_fp_fns_set(event_dev);
 849                return 0;
 850        }
 851
 852        rc = cnxk_sso_init(event_dev);
 853        if (rc < 0)
 854                return rc;
 855
 856        cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
 857        if (!dev->max_event_ports || !dev->max_event_queues) {
 858                plt_err("Not enough eventdev resource queues=%d ports=%d",
 859                        dev->max_event_queues, dev->max_event_ports);
 860                cnxk_sso_fini(event_dev);
 861                return -ENODEV;
 862        }
 863
 864        plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
 865                    event_dev->data->name, dev->max_event_queues,
 866                    dev->max_event_ports);
 867
 868        return 0;
 869}
 870
 871static int
 872cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 873{
 874        return rte_event_pmd_pci_probe(pci_drv, pci_dev,
 875                                       sizeof(struct cnxk_sso_evdev),
 876                                       cn10k_sso_init);
 877}
 878
 879static const struct rte_pci_id cn10k_pci_sso_map[] = {
 880        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
 881        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
 882        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
 883        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
 884        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
 885        CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CNF10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
 886        {
 887                .vendor_id = 0,
 888        },
 889};
 890
 891static struct rte_pci_driver cn10k_pci_sso = {
 892        .id_table = cn10k_pci_sso_map,
 893        .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
 894        .probe = cn10k_sso_probe,
 895        .remove = cnxk_sso_remove,
 896};
 897
 898RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
 899RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
 900RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
 901RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
 902                              CNXK_SSO_GGRP_QOS "=<string>"
 903                              CNXK_SSO_FORCE_BP "=1"
 904                              CN10K_SSO_GW_MODE "=<int>"
 905                              CNXK_TIM_DISABLE_NPA "=1"
 906                              CNXK_TIM_CHNK_SLOTS "=<int>"
 907                              CNXK_TIM_RINGS_LMT "=<int>"
 908                              CNXK_TIM_STATS_ENA "=1");
 909