dpdk/drivers/event/octeontx2/otx2_evdev_adptr.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(C) 2019-2021 Marvell.
   3 */
   4
   5#include "otx2_evdev.h"
   6
   7#define NIX_RQ_AURA_THRESH(x) (((x)*95) / 100)
   8
   9int
  10otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
  11                             const struct rte_eth_dev *eth_dev, uint32_t *caps)
  12{
  13        int rc;
  14
  15        RTE_SET_USED(event_dev);
  16        rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
  17        if (rc)
  18                *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
  19        else
  20                *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT |
  21                        RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ;
  22
  23        return 0;
  24}
  25
  26static inline int
  27sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
  28               uint16_t eth_port_id)
  29{
  30        struct otx2_mbox *mbox = dev->mbox;
  31        struct nix_aq_enq_req *aq;
  32        int rc;
  33
  34        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
  35        aq->qidx = qid;
  36        aq->ctype = NIX_AQ_CTYPE_CQ;
  37        aq->op = NIX_AQ_INSTOP_WRITE;
  38
  39        aq->cq.ena = 0;
  40        aq->cq.caching = 0;
  41
  42        otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
  43        aq->cq_mask.ena = ~(aq->cq_mask.ena);
  44        aq->cq_mask.caching = ~(aq->cq_mask.caching);
  45
  46        rc = otx2_mbox_process(mbox);
  47        if (rc < 0) {
  48                otx2_err("Failed to disable cq context");
  49                goto fail;
  50        }
  51
  52        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
  53        aq->qidx = qid;
  54        aq->ctype = NIX_AQ_CTYPE_RQ;
  55        aq->op = NIX_AQ_INSTOP_WRITE;
  56
  57        aq->rq.sso_ena = 1;
  58        aq->rq.sso_tt = tt;
  59        aq->rq.sso_grp = ggrp;
  60        aq->rq.ena_wqwd = 1;
  61        /* Mbuf Header generation :
  62         * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
  63         * it already has data related to mbuf size, headroom, private area.
  64         * > Using WQE_SKIP we can directly assign
  65         *              mbuf = wqe - sizeof(struct mbuf);
  66         * so that mbuf header will not have unpredicted values while headroom
  67         * and private data starts at the beginning of wqe_data.
  68         */
  69        aq->rq.wqe_skip = 1;
  70        aq->rq.wqe_caching = 1;
  71        aq->rq.spb_ena = 0;
  72        aq->rq.flow_tagw = 20; /* 20-bits */
  73
  74        /* Flow Tag calculation :
  75         *
  76         * rq_tag <31:24> = good/bad_tag<8:0>;
  77         * rq_tag  <23:0> = [ltag]
  78         *
  79         * flow_tag_mask<31:0> =  (1 << flow_tagw) - 1; <31:20>
  80         * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
  81         *
  82         * Setup :
  83         * ltag<23:0> = (eth_port_id & 0xF) << 20;
  84         * good/bad_tag<8:0> =
  85         *      ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
  86         *
  87         * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
  88         *                              <27:20> (eth_port_id) | <20:0> [TAG]
  89         */
  90
  91        aq->rq.ltag = (eth_port_id & 0xF) << 20;
  92        aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
  93                                (RTE_EVENT_TYPE_ETHDEV << 4);
  94        aq->rq.bad_utag = aq->rq.good_utag;
  95
  96        aq->rq.ena = 0;          /* Don't enable RQ yet */
  97        aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
  98        aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
  99
 100        otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
 101        /* mask the bits to write. */
 102        aq->rq_mask.sso_ena      = ~(aq->rq_mask.sso_ena);
 103        aq->rq_mask.sso_tt       = ~(aq->rq_mask.sso_tt);
 104        aq->rq_mask.sso_grp      = ~(aq->rq_mask.sso_grp);
 105        aq->rq_mask.ena_wqwd     = ~(aq->rq_mask.ena_wqwd);
 106        aq->rq_mask.wqe_skip     = ~(aq->rq_mask.wqe_skip);
 107        aq->rq_mask.wqe_caching  = ~(aq->rq_mask.wqe_caching);
 108        aq->rq_mask.spb_ena      = ~(aq->rq_mask.spb_ena);
 109        aq->rq_mask.flow_tagw    = ~(aq->rq_mask.flow_tagw);
 110        aq->rq_mask.ltag         = ~(aq->rq_mask.ltag);
 111        aq->rq_mask.good_utag    = ~(aq->rq_mask.good_utag);
 112        aq->rq_mask.bad_utag     = ~(aq->rq_mask.bad_utag);
 113        aq->rq_mask.ena          = ~(aq->rq_mask.ena);
 114        aq->rq_mask.pb_caching   = ~(aq->rq_mask.pb_caching);
 115        aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
 116
 117        rc = otx2_mbox_process(mbox);
 118        if (rc < 0) {
 119                otx2_err("Failed to init rx adapter context");
 120                goto fail;
 121        }
 122
 123        return 0;
 124fail:
 125        return rc;
 126}
 127
 128static inline int
 129sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
 130{
 131        struct otx2_mbox *mbox = dev->mbox;
 132        struct nix_aq_enq_req *aq;
 133        int rc;
 134
 135        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 136        aq->qidx = qid;
 137        aq->ctype = NIX_AQ_CTYPE_CQ;
 138        aq->op = NIX_AQ_INSTOP_WRITE;
 139
 140        aq->cq.ena = 1;
 141        aq->cq.caching = 1;
 142
 143        otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
 144        aq->cq_mask.ena = ~(aq->cq_mask.ena);
 145        aq->cq_mask.caching = ~(aq->cq_mask.caching);
 146
 147        rc = otx2_mbox_process(mbox);
 148        if (rc < 0) {
 149                otx2_err("Failed to enable cq context");
 150                goto fail;
 151        }
 152
 153        aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
 154        aq->qidx = qid;
 155        aq->ctype = NIX_AQ_CTYPE_RQ;
 156        aq->op = NIX_AQ_INSTOP_WRITE;
 157
 158        aq->rq.sso_ena = 0;
 159        aq->rq.sso_tt = SSO_TT_UNTAGGED;
 160        aq->rq.sso_grp = 0;
 161        aq->rq.ena_wqwd = 0;
 162        aq->rq.wqe_caching = 0;
 163        aq->rq.wqe_skip = 0;
 164        aq->rq.spb_ena = 0;
 165        aq->rq.flow_tagw = 0x20;
 166        aq->rq.ltag = 0;
 167        aq->rq.good_utag = 0;
 168        aq->rq.bad_utag = 0;
 169        aq->rq.ena = 1;
 170        aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
 171        aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
 172
 173        otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
 174        /* mask the bits to write. */
 175        aq->rq_mask.sso_ena      = ~(aq->rq_mask.sso_ena);
 176        aq->rq_mask.sso_tt       = ~(aq->rq_mask.sso_tt);
 177        aq->rq_mask.sso_grp      = ~(aq->rq_mask.sso_grp);
 178        aq->rq_mask.ena_wqwd     = ~(aq->rq_mask.ena_wqwd);
 179        aq->rq_mask.wqe_caching  = ~(aq->rq_mask.wqe_caching);
 180        aq->rq_mask.wqe_skip     = ~(aq->rq_mask.wqe_skip);
 181        aq->rq_mask.spb_ena      = ~(aq->rq_mask.spb_ena);
 182        aq->rq_mask.flow_tagw    = ~(aq->rq_mask.flow_tagw);
 183        aq->rq_mask.ltag         = ~(aq->rq_mask.ltag);
 184        aq->rq_mask.good_utag    = ~(aq->rq_mask.good_utag);
 185        aq->rq_mask.bad_utag     = ~(aq->rq_mask.bad_utag);
 186        aq->rq_mask.ena          = ~(aq->rq_mask.ena);
 187        aq->rq_mask.pb_caching   = ~(aq->rq_mask.pb_caching);
 188        aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
 189
 190        rc = otx2_mbox_process(mbox);
 191        if (rc < 0) {
 192                otx2_err("Failed to clear rx adapter context");
 193                goto fail;
 194        }
 195
 196        return 0;
 197fail:
 198        return rc;
 199}
 200
 201void
 202sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
 203{
 204        int i;
 205
 206        switch (event_type) {
 207        case RTE_EVENT_TYPE_ETHDEV:
 208        {
 209                struct otx2_eth_rxq *rxq = data;
 210                uint64_t *old_ptr;
 211
 212                for (i = 0; i < dev->rx_adptr_pool_cnt; i++) {
 213                        if ((uint64_t)rxq->pool == dev->rx_adptr_pools[i])
 214                                return;
 215                }
 216
 217                dev->rx_adptr_pool_cnt++;
 218                old_ptr = dev->rx_adptr_pools;
 219                dev->rx_adptr_pools = rte_realloc(dev->rx_adptr_pools,
 220                                                  sizeof(uint64_t) *
 221                                                  dev->rx_adptr_pool_cnt, 0);
 222                if (dev->rx_adptr_pools == NULL) {
 223                        dev->adptr_xae_cnt += rxq->pool->size;
 224                        dev->rx_adptr_pools = old_ptr;
 225                        dev->rx_adptr_pool_cnt--;
 226                        return;
 227                }
 228                dev->rx_adptr_pools[dev->rx_adptr_pool_cnt - 1] =
 229                        (uint64_t)rxq->pool;
 230
 231                dev->adptr_xae_cnt += rxq->pool->size;
 232                break;
 233        }
 234        case RTE_EVENT_TYPE_TIMER:
 235        {
 236                struct otx2_tim_ring *timr = data;
 237                uint16_t *old_ring_ptr;
 238                uint64_t *old_sz_ptr;
 239
 240                for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
 241                        if (timr->ring_id != dev->timer_adptr_rings[i])
 242                                continue;
 243                        if (timr->nb_timers == dev->timer_adptr_sz[i])
 244                                return;
 245                        dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
 246                        dev->adptr_xae_cnt += timr->nb_timers;
 247                        dev->timer_adptr_sz[i] = timr->nb_timers;
 248
 249                        return;
 250                }
 251
 252                dev->tim_adptr_ring_cnt++;
 253                old_ring_ptr = dev->timer_adptr_rings;
 254                old_sz_ptr = dev->timer_adptr_sz;
 255
 256                dev->timer_adptr_rings = rte_realloc(dev->timer_adptr_rings,
 257                                                     sizeof(uint16_t) *
 258                                                     dev->tim_adptr_ring_cnt,
 259                                                     0);
 260                if (dev->timer_adptr_rings == NULL) {
 261                        dev->adptr_xae_cnt += timr->nb_timers;
 262                        dev->timer_adptr_rings = old_ring_ptr;
 263                        dev->tim_adptr_ring_cnt--;
 264                        return;
 265                }
 266
 267                dev->timer_adptr_sz = rte_realloc(dev->timer_adptr_sz,
 268                                                  sizeof(uint64_t) *
 269                                                  dev->tim_adptr_ring_cnt,
 270                                                  0);
 271
 272                if (dev->timer_adptr_sz == NULL) {
 273                        dev->adptr_xae_cnt += timr->nb_timers;
 274                        dev->timer_adptr_sz = old_sz_ptr;
 275                        dev->tim_adptr_ring_cnt--;
 276                        return;
 277                }
 278
 279                dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
 280                        timr->ring_id;
 281                dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
 282                        timr->nb_timers;
 283
 284                dev->adptr_xae_cnt += timr->nb_timers;
 285                break;
 286        }
 287        default:
 288                break;
 289        }
 290}
 291
 292static inline void
 293sso_updt_lookup_mem(const struct rte_eventdev *event_dev, void *lookup_mem)
 294{
 295        struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
 296        int i;
 297
 298        for (i = 0; i < dev->nb_event_ports; i++) {
 299                if (dev->dual_ws) {
 300                        struct otx2_ssogws_dual *ws = event_dev->data->ports[i];
 301
 302                        ws->lookup_mem = lookup_mem;
 303                } else {
 304                        struct otx2_ssogws *ws = event_dev->data->ports[i];
 305
 306                        ws->lookup_mem = lookup_mem;
 307                }
 308        }
 309}
 310
 311static inline void
 312sso_cfg_nix_mp_bpid(struct otx2_sso_evdev *dev,
 313                    struct otx2_eth_dev *otx2_eth_dev, struct otx2_eth_rxq *rxq,
 314                    uint8_t ena)
 315{
 316        struct otx2_fc_info *fc = &otx2_eth_dev->fc_info;
 317        struct npa_aq_enq_req *req;
 318        struct npa_aq_enq_rsp *rsp;
 319        struct otx2_npa_lf *lf;
 320        struct otx2_mbox *mbox;
 321        uint32_t limit;
 322        int rc;
 323
 324        if (otx2_dev_is_sdp(otx2_eth_dev))
 325                return;
 326
 327        lf = otx2_npa_lf_obj_get();
 328        if (!lf)
 329                return;
 330        mbox = lf->mbox;
 331
 332        req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
 333        if (req == NULL)
 334                return;
 335
 336        req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
 337        req->ctype = NPA_AQ_CTYPE_AURA;
 338        req->op = NPA_AQ_INSTOP_READ;
 339
 340        rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
 341        if (rc)
 342                return;
 343
 344        limit = rsp->aura.limit;
 345        /* BP is already enabled. */
 346        if (rsp->aura.bp_ena) {
 347                /* If BP ids don't match disable BP. */
 348                if ((rsp->aura.nix0_bpid != fc->bpid[0]) && !dev->force_rx_bp) {
 349                        req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
 350                        if (req == NULL)
 351                                return;
 352
 353                        req->aura_id =
 354                                npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
 355                        req->ctype = NPA_AQ_CTYPE_AURA;
 356                        req->op = NPA_AQ_INSTOP_WRITE;
 357
 358                        req->aura.bp_ena = 0;
 359                        req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
 360
 361                        otx2_mbox_process(mbox);
 362                }
 363                return;
 364        }
 365
 366        /* BP was previously enabled but now disabled skip. */
 367        if (rsp->aura.bp)
 368                return;
 369
 370        req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
 371        if (req == NULL)
 372                return;
 373
 374        req->aura_id = npa_lf_aura_handle_to_aura(rxq->pool->pool_id);
 375        req->ctype = NPA_AQ_CTYPE_AURA;
 376        req->op = NPA_AQ_INSTOP_WRITE;
 377
 378        if (ena) {
 379                req->aura.nix0_bpid = fc->bpid[0];
 380                req->aura_mask.nix0_bpid = ~(req->aura_mask.nix0_bpid);
 381                req->aura.bp = NIX_RQ_AURA_THRESH(
 382                        limit > 128 ? 256 : limit); /* 95% of size*/
 383                req->aura_mask.bp = ~(req->aura_mask.bp);
 384        }
 385
 386        req->aura.bp_ena = !!ena;
 387        req->aura_mask.bp_ena = ~(req->aura_mask.bp_ena);
 388
 389        otx2_mbox_process(mbox);
 390}
 391
 392int
 393otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
 394                              const struct rte_eth_dev *eth_dev,
 395                              int32_t rx_queue_id,
 396                const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
 397{
 398        struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
 399        struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
 400        uint16_t port = eth_dev->data->port_id;
 401        struct otx2_eth_rxq *rxq;
 402        int i, rc;
 403
 404        rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
 405        if (rc)
 406                return -EINVAL;
 407
 408        if (rx_queue_id < 0) {
 409                for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
 410                        rxq = eth_dev->data->rx_queues[i];
 411                        sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
 412                        sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
 413                        rc = sso_xae_reconfigure(
 414                                (struct rte_eventdev *)(uintptr_t)event_dev);
 415                        rc |= sso_rxq_enable(otx2_eth_dev, i,
 416                                             queue_conf->ev.sched_type,
 417                                             queue_conf->ev.queue_id, port);
 418                }
 419                rxq = eth_dev->data->rx_queues[0];
 420                sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
 421        } else {
 422                rxq = eth_dev->data->rx_queues[rx_queue_id];
 423                sso_updt_xae_cnt(dev, rxq, RTE_EVENT_TYPE_ETHDEV);
 424                sso_cfg_nix_mp_bpid(dev, otx2_eth_dev, rxq, true);
 425                rc = sso_xae_reconfigure((struct rte_eventdev *)
 426                                         (uintptr_t)event_dev);
 427                rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
 428                                     queue_conf->ev.sched_type,
 429                                     queue_conf->ev.queue_id, port);
 430                sso_updt_lookup_mem(event_dev, rxq->lookup_mem);
 431        }
 432
 433        if (rc < 0) {
 434                otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
 435                         queue_conf->ev.queue_id);
 436                return rc;
 437        }
 438
 439        dev->rx_offloads |= otx2_eth_dev->rx_offload_flags;
 440        dev->tstamp = &otx2_eth_dev->tstamp;
 441        sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 442
 443        return 0;
 444}
 445
 446int
 447otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
 448                              const struct rte_eth_dev *eth_dev,
 449                              int32_t rx_queue_id)
 450{
 451        struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
 452        struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
 453        int i, rc;
 454
 455        rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
 456        if (rc)
 457                return -EINVAL;
 458
 459        if (rx_queue_id < 0) {
 460                for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
 461                        rc = sso_rxq_disable(otx2_eth_dev, i);
 462                        sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
 463                                            eth_dev->data->rx_queues[i], false);
 464                }
 465        } else {
 466                rc = sso_rxq_disable(otx2_eth_dev, (uint16_t)rx_queue_id);
 467                sso_cfg_nix_mp_bpid(dev, otx2_eth_dev,
 468                                    eth_dev->data->rx_queues[rx_queue_id],
 469                                    false);
 470        }
 471
 472        if (rc < 0)
 473                otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
 474                         eth_dev->data->port_id, rx_queue_id);
 475
 476        return rc;
 477}
 478
 479int
 480otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
 481                          const struct rte_eth_dev *eth_dev)
 482{
 483        RTE_SET_USED(event_dev);
 484        RTE_SET_USED(eth_dev);
 485
 486        return 0;
 487}
 488
 489int
 490otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
 491                         const struct rte_eth_dev *eth_dev)
 492{
 493        RTE_SET_USED(event_dev);
 494        RTE_SET_USED(eth_dev);
 495
 496        return 0;
 497}
 498
 499int
 500otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
 501                             const struct rte_eth_dev *eth_dev, uint32_t *caps)
 502{
 503        int ret;
 504
 505        RTE_SET_USED(dev);
 506        ret = strncmp(eth_dev->device->driver->name, "net_octeontx2,", 13);
 507        if (ret)
 508                *caps = 0;
 509        else
 510                *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
 511
 512        return 0;
 513}
 514
 515static int
 516sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
 517{
 518        struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
 519        struct npa_aq_enq_req *aura_req;
 520
 521        aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
 522        aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
 523        aura_req->ctype = NPA_AQ_CTYPE_AURA;
 524        aura_req->op = NPA_AQ_INSTOP_WRITE;
 525
 526        aura_req->aura.limit = nb_sqb_bufs;
 527        aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
 528
 529        return otx2_mbox_process(npa_lf->mbox);
 530}
 531
 532static int
 533sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
 534                      uint16_t eth_port_id, uint16_t tx_queue_id,
 535                      struct otx2_eth_txq *txq)
 536{
 537        struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
 538        int i;
 539
 540        for (i = 0; i < event_dev->data->nb_ports; i++) {
 541                dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
 542                if (dev->dual_ws) {
 543                        struct otx2_ssogws_dual *old_dws;
 544                        struct otx2_ssogws_dual *dws;
 545
 546                        old_dws = event_dev->data->ports[i];
 547                        dws = rte_realloc_socket(ssogws_get_cookie(old_dws),
 548                                                 sizeof(struct otx2_ssogws_dual)
 549                                                 + RTE_CACHE_LINE_SIZE +
 550                                                 (sizeof(uint64_t) *
 551                                                    (dev->max_port_id + 1) *
 552                                                    RTE_MAX_QUEUES_PER_PORT),
 553                                                 RTE_CACHE_LINE_SIZE,
 554                                                 event_dev->data->socket_id);
 555                        if (dws == NULL)
 556                                return -ENOMEM;
 557
 558                        /* First cache line is reserved for cookie */
 559                        dws = (struct otx2_ssogws_dual *)
 560                                ((uint8_t *)dws + RTE_CACHE_LINE_SIZE);
 561
 562                        ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
 563                         )&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
 564                                (uint64_t)txq;
 565                        event_dev->data->ports[i] = dws;
 566                } else {
 567                        struct otx2_ssogws *old_ws;
 568                        struct otx2_ssogws *ws;
 569
 570                        old_ws = event_dev->data->ports[i];
 571                        ws = rte_realloc_socket(ssogws_get_cookie(old_ws),
 572                                                sizeof(struct otx2_ssogws) +
 573                                                RTE_CACHE_LINE_SIZE +
 574                                                (sizeof(uint64_t) *
 575                                                 (dev->max_port_id + 1) *
 576                                                 RTE_MAX_QUEUES_PER_PORT),
 577                                                RTE_CACHE_LINE_SIZE,
 578                                                event_dev->data->socket_id);
 579                        if (ws == NULL)
 580                                return -ENOMEM;
 581
 582                        /* First cache line is reserved for cookie */
 583                        ws = (struct otx2_ssogws *)
 584                                ((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
 585
 586                        ((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
 587                         )&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
 588                                (uint64_t)txq;
 589                        event_dev->data->ports[i] = ws;
 590                }
 591        }
 592
 593        return 0;
 594}
 595
 596int
 597otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
 598                              const struct rte_eth_dev *eth_dev,
 599                              int32_t tx_queue_id)
 600{
 601        struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
 602        struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
 603        struct otx2_eth_txq *txq;
 604        int i, ret;
 605
 606        RTE_SET_USED(id);
 607        if (tx_queue_id < 0) {
 608                for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
 609                        txq = eth_dev->data->tx_queues[i];
 610                        sso_sqb_aura_limit_edit(txq->sqb_pool,
 611                                        OTX2_SSO_SQB_LIMIT);
 612                        ret = sso_add_tx_queue_data(event_dev,
 613                                                    eth_dev->data->port_id, i,
 614                                                    txq);
 615                        if (ret < 0)
 616                                return ret;
 617                }
 618        } else {
 619                txq = eth_dev->data->tx_queues[tx_queue_id];
 620                sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
 621                ret = sso_add_tx_queue_data(event_dev, eth_dev->data->port_id,
 622                                            tx_queue_id, txq);
 623                if (ret < 0)
 624                        return ret;
 625        }
 626
 627        dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
 628        sso_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)event_dev);
 629
 630        return 0;
 631}
 632
 633int
 634otx2_sso_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *event_dev,
 635                              const struct rte_eth_dev *eth_dev,
 636                              int32_t tx_queue_id)
 637{
 638        struct otx2_eth_txq *txq;
 639        int i;
 640
 641        RTE_SET_USED(id);
 642        RTE_SET_USED(eth_dev);
 643        RTE_SET_USED(event_dev);
 644        if (tx_queue_id < 0) {
 645                for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
 646                        txq = eth_dev->data->tx_queues[i];
 647                        sso_sqb_aura_limit_edit(txq->sqb_pool,
 648                                                txq->nb_sqb_bufs);
 649                }
 650        } else {
 651                txq = eth_dev->data->tx_queues[tx_queue_id];
 652                sso_sqb_aura_limit_edit(txq->sqb_pool, txq->nb_sqb_bufs);
 653        }
 654
 655        return 0;
 656}
 657