dpdk/drivers/net/ionic/ionic_rxtx.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
   2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
   3 */
   4
   5#include <sys/queue.h>
   6#include <stdio.h>
   7#include <stdlib.h>
   8#include <string.h>
   9#include <errno.h>
  10#include <stdint.h>
  11#include <stdarg.h>
  12#include <unistd.h>
  13#include <inttypes.h>
  14
  15#include <rte_byteorder.h>
  16#include <rte_common.h>
  17#include <rte_cycles.h>
  18#include <rte_log.h>
  19#include <rte_debug.h>
  20#include <rte_interrupts.h>
  21#include <rte_pci.h>
  22#include <rte_memory.h>
  23#include <rte_memzone.h>
  24#include <rte_launch.h>
  25#include <rte_eal.h>
  26#include <rte_per_lcore.h>
  27#include <rte_lcore.h>
  28#include <rte_atomic.h>
  29#include <rte_branch_prediction.h>
  30#include <rte_mempool.h>
  31#include <rte_malloc.h>
  32#include <rte_mbuf.h>
  33#include <rte_ether.h>
  34#include <ethdev_driver.h>
  35#include <rte_prefetch.h>
  36#include <rte_udp.h>
  37#include <rte_tcp.h>
  38#include <rte_sctp.h>
  39#include <rte_string_fns.h>
  40#include <rte_errno.h>
  41#include <rte_ip.h>
  42#include <rte_net.h>
  43
  44#include "ionic_logs.h"
  45#include "ionic_mac_api.h"
  46#include "ionic_ethdev.h"
  47#include "ionic_lif.h"
  48#include "ionic_rxtx.h"
  49
  50/*********************************************************************
  51 *
  52 *  TX functions
  53 *
  54 **********************************************************************/
  55
  56void
  57ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
  58                struct rte_eth_txq_info *qinfo)
  59{
  60        struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
  61        struct ionic_queue *q = &txq->qcq.q;
  62
  63        qinfo->nb_desc = q->num_descs;
  64        qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
  65        qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
  66}
  67
  68static __rte_always_inline void
  69ionic_tx_flush(struct ionic_tx_qcq *txq)
  70{
  71        struct ionic_cq *cq = &txq->qcq.cq;
  72        struct ionic_queue *q = &txq->qcq.q;
  73        struct rte_mbuf *txm, *next;
  74        struct ionic_txq_comp *cq_desc_base = cq->base;
  75        struct ionic_txq_comp *cq_desc;
  76        void **info;
  77        u_int32_t comp_index = (u_int32_t)-1;
  78
  79        cq_desc = &cq_desc_base[cq->tail_idx];
  80        while (color_match(cq_desc->color, cq->done_color)) {
  81                cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
  82
  83                /* Prefetch the next 4 descriptors (not really useful here) */
  84                if ((cq->tail_idx & 0x3) == 0)
  85                        rte_prefetch0(&cq_desc_base[cq->tail_idx]);
  86
  87                if (cq->tail_idx == 0)
  88                        cq->done_color = !cq->done_color;
  89
  90                comp_index = cq_desc->comp_index;
  91
  92                cq_desc = &cq_desc_base[cq->tail_idx];
  93        }
  94
  95        if (comp_index != (u_int32_t)-1) {
  96                while (q->tail_idx != comp_index) {
  97                        info = IONIC_INFO_PTR(q, q->tail_idx);
  98
  99                        q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
 100
 101                        /* Prefetch the next 4 descriptors */
 102                        if ((q->tail_idx & 0x3) == 0)
 103                                /* q desc info */
 104                                rte_prefetch0(&q->info[q->tail_idx]);
 105
 106                        /*
 107                         * Note: you can just use rte_pktmbuf_free,
 108                         * but this loop is faster
 109                         */
 110                        txm = info[0];
 111                        while (txm != NULL) {
 112                                next = txm->next;
 113                                rte_pktmbuf_free_seg(txm);
 114                                txm = next;
 115                        }
 116                }
 117        }
 118}
 119
 120void __rte_cold
 121ionic_dev_tx_queue_release(void *tx_queue)
 122{
 123        struct ionic_tx_qcq *txq = tx_queue;
 124        struct ionic_tx_stats *stats = &txq->stats;
 125
 126        IONIC_PRINT_CALL();
 127
 128        IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
 129                txq->qcq.q.index, stats->packets, stats->tso);
 130
 131        ionic_lif_txq_deinit(txq);
 132
 133        ionic_qcq_free(&txq->qcq);
 134}
 135
 136int __rte_cold
 137ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 138{
 139        struct ionic_tx_qcq *txq;
 140
 141        IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
 142
 143        txq = eth_dev->data->tx_queues[tx_queue_id];
 144
 145        eth_dev->data->tx_queue_state[tx_queue_id] =
 146                RTE_ETH_QUEUE_STATE_STOPPED;
 147
 148        /*
 149         * Note: we should better post NOP Tx desc and wait for its completion
 150         * before disabling Tx queue
 151         */
 152
 153        ionic_qcq_disable(&txq->qcq);
 154
 155        ionic_tx_flush(txq);
 156
 157        return 0;
 158}
 159
 160int __rte_cold
 161ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 162                uint16_t nb_desc, uint32_t socket_id,
 163                const struct rte_eth_txconf *tx_conf)
 164{
 165        struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
 166        struct ionic_tx_qcq *txq;
 167        uint64_t offloads;
 168        int err;
 169
 170        if (tx_queue_id >= lif->ntxqcqs) {
 171                IONIC_PRINT(DEBUG, "Queue index %u not available "
 172                        "(max %u queues)",
 173                        tx_queue_id, lif->ntxqcqs);
 174                return -EINVAL;
 175        }
 176
 177        offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
 178        IONIC_PRINT(DEBUG,
 179                "Configuring skt %u TX queue %u with %u buffers, offloads %jx",
 180                socket_id, tx_queue_id, nb_desc, offloads);
 181
 182        /* Validate number of receive descriptors */
 183        if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
 184                return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
 185
 186        /* Free memory prior to re-allocation if needed... */
 187        if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
 188                void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
 189                ionic_dev_tx_queue_release(tx_queue);
 190                eth_dev->data->tx_queues[tx_queue_id] = NULL;
 191        }
 192
 193        eth_dev->data->tx_queue_state[tx_queue_id] =
 194                RTE_ETH_QUEUE_STATE_STOPPED;
 195
 196        err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
 197        if (err) {
 198                IONIC_PRINT(DEBUG, "Queue allocation failure");
 199                return -EINVAL;
 200        }
 201
 202        /* Do not start queue with rte_eth_dev_start() */
 203        if (tx_conf->tx_deferred_start)
 204                txq->flags |= IONIC_QCQ_F_DEFERRED;
 205
 206        /* Convert the offload flags into queue flags */
 207        if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
 208                txq->flags |= IONIC_QCQ_F_CSUM_L3;
 209        if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
 210                txq->flags |= IONIC_QCQ_F_CSUM_TCP;
 211        if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
 212                txq->flags |= IONIC_QCQ_F_CSUM_UDP;
 213
 214        eth_dev->data->tx_queues[tx_queue_id] = txq;
 215
 216        return 0;
 217}
 218
 219/*
 220 * Start Transmit Units for specified queue.
 221 */
 222int __rte_cold
 223ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 224{
 225        uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
 226        struct ionic_tx_qcq *txq;
 227        int err;
 228
 229        if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
 230                IONIC_PRINT(DEBUG, "TX queue %u already started",
 231                        tx_queue_id);
 232                return 0;
 233        }
 234
 235        txq = eth_dev->data->tx_queues[tx_queue_id];
 236
 237        IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
 238                tx_queue_id, txq->qcq.q.num_descs);
 239
 240        if (!(txq->flags & IONIC_QCQ_F_INITED)) {
 241                err = ionic_lif_txq_init(txq);
 242                if (err)
 243                        return err;
 244        } else {
 245                ionic_qcq_enable(&txq->qcq);
 246        }
 247
 248        tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 249
 250        return 0;
 251}
 252
 253static void
 254ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
 255{
 256        struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
 257        char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
 258        struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
 259                (l3_hdr + txm->l3_len);
 260
 261        if (txm->ol_flags & PKT_TX_IP_CKSUM) {
 262                struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
 263                ipv4_hdr->hdr_checksum = 0;
 264                tcp_hdr->cksum = 0;
 265                tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
 266        } else {
 267                struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
 268                tcp_hdr->cksum = 0;
 269                tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
 270        }
 271}
 272
 273static void
 274ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
 275{
 276        struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
 277        char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
 278                txm->outer_l3_len + txm->l2_len;
 279        struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
 280                (l3_hdr + txm->l3_len);
 281
 282        if (txm->ol_flags & PKT_TX_IPV4) {
 283                struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
 284                ipv4_hdr->hdr_checksum = 0;
 285                tcp_hdr->cksum = 0;
 286                tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
 287        } else {
 288                struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
 289                tcp_hdr->cksum = 0;
 290                tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
 291        }
 292}
 293
 294static void
 295ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
 296                struct rte_mbuf *txm,
 297                rte_iova_t addr, uint8_t nsge, uint16_t len,
 298                uint32_t hdrlen, uint32_t mss,
 299                bool encap,
 300                uint16_t vlan_tci, bool has_vlan,
 301                bool start, bool done)
 302{
 303        void **info;
 304        uint8_t flags = 0;
 305        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
 306        flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
 307        flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
 308        flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
 309
 310        desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
 311                flags, nsge, addr);
 312        desc->len = len;
 313        desc->vlan_tci = vlan_tci;
 314        desc->hdr_len = hdrlen;
 315        desc->mss = mss;
 316
 317        if (done) {
 318                info = IONIC_INFO_PTR(q, q->head_idx);
 319                info[0] = txm;
 320        }
 321
 322        q->head_idx = Q_NEXT_TO_POST(q, 1);
 323}
 324
 325static struct ionic_txq_desc *
 326ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
 327{
 328        struct ionic_queue *q = &txq->qcq.q;
 329        struct ionic_txq_desc *desc_base = q->base;
 330        struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
 331        struct ionic_txq_desc *desc = &desc_base[q->head_idx];
 332        struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
 333
 334        *elem = sg_desc->elems;
 335        return desc;
 336}
 337
 338static int
 339ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
 340{
 341        struct ionic_queue *q = &txq->qcq.q;
 342        struct ionic_tx_stats *stats = &txq->stats;
 343        struct ionic_txq_desc *desc;
 344        struct ionic_txq_sg_elem *elem;
 345        struct rte_mbuf *txm_seg;
 346        rte_iova_t data_iova;
 347        uint64_t desc_addr = 0, next_addr;
 348        uint16_t desc_len = 0;
 349        uint8_t desc_nsge;
 350        uint32_t hdrlen;
 351        uint32_t mss = txm->tso_segsz;
 352        uint32_t frag_left = 0;
 353        uint32_t left;
 354        uint32_t seglen;
 355        uint32_t len;
 356        uint32_t offset = 0;
 357        bool start, done;
 358        bool encap;
 359        bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
 360        uint16_t vlan_tci = txm->vlan_tci;
 361        uint64_t ol_flags = txm->ol_flags;
 362
 363        encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
 364                (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
 365                ((ol_flags & PKT_TX_OUTER_IPV4) ||
 366                (ol_flags & PKT_TX_OUTER_IPV6));
 367
 368        /* Preload inner-most TCP csum field with IP pseudo hdr
 369         * calculated with IP length set to zero.  HW will later
 370         * add in length to each TCP segment resulting from the TSO.
 371         */
 372
 373        if (encap) {
 374                ionic_tx_tcp_inner_pseudo_csum(txm);
 375                hdrlen = txm->outer_l2_len + txm->outer_l3_len +
 376                        txm->l2_len + txm->l3_len + txm->l4_len;
 377        } else {
 378                ionic_tx_tcp_pseudo_csum(txm);
 379                hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
 380        }
 381
 382        seglen = hdrlen + mss;
 383        left = txm->data_len;
 384        data_iova = rte_mbuf_data_iova(txm);
 385
 386        desc = ionic_tx_tso_next(txq, &elem);
 387        start = true;
 388
 389        /* Chop data up into desc segments */
 390
 391        while (left > 0) {
 392                len = RTE_MIN(seglen, left);
 393                frag_left = seglen - len;
 394                desc_addr = rte_cpu_to_le_64(data_iova + offset);
 395                desc_len = len;
 396                desc_nsge = 0;
 397                left -= len;
 398                offset += len;
 399                if (txm->nb_segs > 1 && frag_left > 0)
 400                        continue;
 401                done = (txm->nb_segs == 1 && left == 0);
 402                ionic_tx_tso_post(q, desc, txm,
 403                        desc_addr, desc_nsge, desc_len,
 404                        hdrlen, mss,
 405                        encap,
 406                        vlan_tci, has_vlan,
 407                        start, done);
 408                desc = ionic_tx_tso_next(txq, &elem);
 409                start = false;
 410                seglen = mss;
 411        }
 412
 413        /* Chop frags into desc segments */
 414
 415        txm_seg = txm->next;
 416        while (txm_seg != NULL) {
 417                offset = 0;
 418                data_iova = rte_mbuf_data_iova(txm_seg);
 419                left = txm_seg->data_len;
 420
 421                while (left > 0) {
 422                        next_addr = rte_cpu_to_le_64(data_iova + offset);
 423                        if (frag_left > 0) {
 424                                len = RTE_MIN(frag_left, left);
 425                                frag_left -= len;
 426                                elem->addr = next_addr;
 427                                elem->len = len;
 428                                elem++;
 429                                desc_nsge++;
 430                        } else {
 431                                len = RTE_MIN(mss, left);
 432                                frag_left = mss - len;
 433                                desc_addr = next_addr;
 434                                desc_len = len;
 435                                desc_nsge = 0;
 436                        }
 437                        left -= len;
 438                        offset += len;
 439                        if (txm_seg->next != NULL && frag_left > 0)
 440                                continue;
 441
 442                        done = (txm_seg->next == NULL && left == 0);
 443                        ionic_tx_tso_post(q, desc, txm_seg,
 444                                desc_addr, desc_nsge, desc_len,
 445                                hdrlen, mss,
 446                                encap,
 447                                vlan_tci, has_vlan,
 448                                start, done);
 449                        desc = ionic_tx_tso_next(txq, &elem);
 450                        start = false;
 451                }
 452
 453                txm_seg = txm_seg->next;
 454        }
 455
 456        stats->tso++;
 457
 458        return 0;
 459}
 460
 461static __rte_always_inline int
 462ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
 463{
 464        struct ionic_queue *q = &txq->qcq.q;
 465        struct ionic_txq_desc *desc, *desc_base = q->base;
 466        struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
 467        struct ionic_txq_sg_elem *elem;
 468        struct ionic_tx_stats *stats = &txq->stats;
 469        struct rte_mbuf *txm_seg;
 470        void **info;
 471        bool encap;
 472        bool has_vlan;
 473        uint64_t ol_flags = txm->ol_flags;
 474        uint64_t addr;
 475        uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
 476        uint8_t flags = 0;
 477
 478        desc = &desc_base[q->head_idx];
 479        info = IONIC_INFO_PTR(q, q->head_idx);
 480
 481        if ((ol_flags & PKT_TX_IP_CKSUM) &&
 482            (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
 483                opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
 484                flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
 485        }
 486
 487        if (((ol_flags & PKT_TX_TCP_CKSUM) &&
 488             (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
 489            ((ol_flags & PKT_TX_UDP_CKSUM) &&
 490             (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
 491                opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
 492                flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
 493        }
 494
 495        if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
 496                stats->no_csum++;
 497
 498        has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
 499        encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
 500                        (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
 501                        ((ol_flags & PKT_TX_OUTER_IPV4) ||
 502                        (ol_flags & PKT_TX_OUTER_IPV6));
 503
 504        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
 505        flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
 506
 507        addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
 508
 509        desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
 510        desc->len = txm->data_len;
 511        desc->vlan_tci = txm->vlan_tci;
 512
 513        info[0] = txm;
 514
 515        elem = sg_desc_base[q->head_idx].elems;
 516
 517        txm_seg = txm->next;
 518        while (txm_seg != NULL) {
 519                elem->len = txm_seg->data_len;
 520                elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
 521                elem++;
 522                txm_seg = txm_seg->next;
 523        }
 524
 525        q->head_idx = Q_NEXT_TO_POST(q, 1);
 526
 527        return 0;
 528}
 529
 530uint16_t
 531ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 532                uint16_t nb_pkts)
 533{
 534        struct ionic_tx_qcq *txq = tx_queue;
 535        struct ionic_queue *q = &txq->qcq.q;
 536        struct ionic_tx_stats *stats = &txq->stats;
 537        uint32_t next_q_head_idx;
 538        uint32_t bytes_tx = 0;
 539        uint16_t nb_avail, nb_tx = 0;
 540        int err;
 541
 542        /* Cleaning old buffers */
 543        ionic_tx_flush(txq);
 544
 545        nb_avail = ionic_q_space_avail(q);
 546        if (unlikely(nb_avail < nb_pkts)) {
 547                stats->stop += nb_pkts - nb_avail;
 548                nb_pkts = nb_avail;
 549        }
 550
 551        while (nb_tx < nb_pkts) {
 552                next_q_head_idx = Q_NEXT_TO_POST(q, 1);
 553                if ((next_q_head_idx & 0x3) == 0) {
 554                        struct ionic_txq_desc *desc_base = q->base;
 555                        rte_prefetch0(&desc_base[next_q_head_idx]);
 556                        rte_prefetch0(&q->info[next_q_head_idx]);
 557                }
 558
 559                if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
 560                        err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
 561                else
 562                        err = ionic_tx(txq, tx_pkts[nb_tx]);
 563                if (err) {
 564                        stats->drop += nb_pkts - nb_tx;
 565                        break;
 566                }
 567
 568                bytes_tx += tx_pkts[nb_tx]->pkt_len;
 569                nb_tx++;
 570        }
 571
 572        if (nb_tx > 0) {
 573                rte_wmb();
 574                ionic_q_flush(q);
 575        }
 576
 577        stats->packets += nb_tx;
 578        stats->bytes += bytes_tx;
 579
 580        return nb_tx;
 581}
 582
 583/*********************************************************************
 584 *
 585 *  TX prep functions
 586 *
 587 **********************************************************************/
 588
 589#define IONIC_TX_OFFLOAD_MASK ( \
 590        PKT_TX_IPV4 |           \
 591        PKT_TX_IPV6 |           \
 592        PKT_TX_VLAN |           \
 593        PKT_TX_IP_CKSUM |       \
 594        PKT_TX_TCP_SEG |        \
 595        PKT_TX_L4_MASK)
 596
 597#define IONIC_TX_OFFLOAD_NOTSUP_MASK \
 598        (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
 599
 600uint16_t
 601ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 602{
 603        struct ionic_tx_qcq *txq = tx_queue;
 604        struct rte_mbuf *txm;
 605        uint64_t offloads;
 606        int i = 0;
 607
 608        for (i = 0; i < nb_pkts; i++) {
 609                txm = tx_pkts[i];
 610
 611                if (txm->nb_segs > txq->num_segs_fw) {
 612                        rte_errno = -EINVAL;
 613                        break;
 614                }
 615
 616                offloads = txm->ol_flags;
 617
 618                if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
 619                        rte_errno = -ENOTSUP;
 620                        break;
 621                }
 622        }
 623
 624        return i;
 625}
 626
 627/*********************************************************************
 628 *
 629 *  RX functions
 630 *
 631 **********************************************************************/
 632
 633static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
 634                struct rte_mbuf *mbuf);
 635
 636void
 637ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 638                struct rte_eth_rxq_info *qinfo)
 639{
 640        struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
 641        struct ionic_queue *q = &rxq->qcq.q;
 642
 643        qinfo->mp = rxq->mb_pool;
 644        qinfo->scattered_rx = dev->data->scattered_rx;
 645        qinfo->nb_desc = q->num_descs;
 646        qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
 647        qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
 648}
 649
 650static void __rte_cold
 651ionic_rx_empty(struct ionic_rx_qcq *rxq)
 652{
 653        struct ionic_queue *q = &rxq->qcq.q;
 654        struct rte_mbuf *mbuf;
 655        void **info;
 656
 657        while (q->tail_idx != q->head_idx) {
 658                info = IONIC_INFO_PTR(q, q->tail_idx);
 659                mbuf = info[0];
 660                rte_mempool_put(rxq->mb_pool, mbuf);
 661
 662                q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
 663        }
 664}
 665
 666void __rte_cold
 667ionic_dev_rx_queue_release(void *rx_queue)
 668{
 669        struct ionic_rx_qcq *rxq = rx_queue;
 670        struct ionic_rx_stats *stats;
 671
 672        if (!rxq)
 673                return;
 674
 675        IONIC_PRINT_CALL();
 676
 677        stats = &rxq->stats;
 678
 679        IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
 680                rxq->qcq.q.index, stats->packets, stats->mtods);
 681
 682        ionic_rx_empty(rxq);
 683
 684        ionic_lif_rxq_deinit(rxq);
 685
 686        ionic_qcq_free(&rxq->qcq);
 687}
 688
 689int __rte_cold
 690ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 691                uint16_t rx_queue_id,
 692                uint16_t nb_desc,
 693                uint32_t socket_id,
 694                const struct rte_eth_rxconf *rx_conf,
 695                struct rte_mempool *mp)
 696{
 697        struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
 698        struct ionic_rx_qcq *rxq;
 699        uint64_t offloads;
 700        int err;
 701
 702        if (rx_queue_id >= lif->nrxqcqs) {
 703                IONIC_PRINT(ERR,
 704                        "Queue index %u not available (max %u queues)",
 705                        rx_queue_id, lif->nrxqcqs);
 706                return -EINVAL;
 707        }
 708
 709        offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
 710        IONIC_PRINT(DEBUG,
 711                "Configuring skt %u RX queue %u with %u buffers, offloads %jx",
 712                socket_id, rx_queue_id, nb_desc, offloads);
 713
 714        if (!rx_conf->rx_drop_en)
 715                IONIC_PRINT(WARNING, "No-drop mode is not supported");
 716
 717        /* Validate number of receive descriptors */
 718        if (!rte_is_power_of_2(nb_desc) ||
 719                        nb_desc < IONIC_MIN_RING_DESC ||
 720                        nb_desc > IONIC_MAX_RING_DESC) {
 721                IONIC_PRINT(ERR,
 722                        "Bad descriptor count (%u) for queue %u (min: %u)",
 723                        nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
 724                return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
 725        }
 726
 727        /* Free memory prior to re-allocation if needed... */
 728        if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
 729                void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
 730                ionic_dev_rx_queue_release(rx_queue);
 731                eth_dev->data->rx_queues[rx_queue_id] = NULL;
 732        }
 733
 734        eth_dev->data->rx_queue_state[rx_queue_id] =
 735                RTE_ETH_QUEUE_STATE_STOPPED;
 736
 737        err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc,
 738                        &rxq);
 739        if (err) {
 740                IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
 741                return -EINVAL;
 742        }
 743
 744        rxq->mb_pool = mp;
 745
 746        /*
 747         * Note: the interface does not currently support
 748         * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 749         * when the adapter will be able to keep the CRC and subtract
 750         * it to the length for all received packets:
 751         * if (eth_dev->data->dev_conf.rxmode.offloads &
 752         *     DEV_RX_OFFLOAD_KEEP_CRC)
 753         *   rxq->crc_len = ETHER_CRC_LEN;
 754         */
 755
 756        /* Do not start queue with rte_eth_dev_start() */
 757        if (rx_conf->rx_deferred_start)
 758                rxq->flags |= IONIC_QCQ_F_DEFERRED;
 759
 760        eth_dev->data->rx_queues[rx_queue_id] = rxq;
 761
 762        return 0;
 763}
 764
 765static __rte_always_inline void
 766ionic_rx_clean(struct ionic_rx_qcq *rxq,
 767                uint32_t q_desc_index, uint32_t cq_desc_index,
 768                void *service_cb_arg)
 769{
 770        struct ionic_queue *q = &rxq->qcq.q;
 771        struct ionic_cq *cq = &rxq->qcq.cq;
 772        struct ionic_rxq_comp *cq_desc_base = cq->base;
 773        struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
 774        struct rte_mbuf *rxm, *rxm_seg;
 775        uint32_t max_frame_size =
 776                rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 777        uint64_t pkt_flags = 0;
 778        uint32_t pkt_type;
 779        struct ionic_rx_stats *stats = &rxq->stats;
 780        struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
 781                service_cb_arg;
 782        uint32_t buf_size = (uint16_t)
 783                (rte_pktmbuf_data_room_size(rxq->mb_pool) -
 784                RTE_PKTMBUF_HEADROOM);
 785        uint32_t left;
 786        void **info;
 787
 788        assert(q_desc_index == cq_desc->comp_index);
 789
 790        info = IONIC_INFO_PTR(q, cq_desc->comp_index);
 791
 792        rxm = info[0];
 793
 794        if (!recv_args) {
 795                stats->no_cb_arg++;
 796                /* Flush */
 797                rte_pktmbuf_free(rxm);
 798                /*
 799                 * Note: rte_mempool_put is faster with no segs
 800                 * rte_mempool_put(rxq->mb_pool, rxm);
 801                 */
 802                return;
 803        }
 804
 805        if (cq_desc->status) {
 806                stats->bad_cq_status++;
 807                ionic_rx_recycle(q, q_desc_index, rxm);
 808                return;
 809        }
 810
 811        if (recv_args->nb_rx >= recv_args->nb_pkts) {
 812                stats->no_room++;
 813                ionic_rx_recycle(q, q_desc_index, rxm);
 814                return;
 815        }
 816
 817        if (cq_desc->len > max_frame_size ||
 818                        cq_desc->len == 0) {
 819                stats->bad_len++;
 820                ionic_rx_recycle(q, q_desc_index, rxm);
 821                return;
 822        }
 823
 824        rxm->data_off = RTE_PKTMBUF_HEADROOM;
 825        rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
 826        rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
 827        rxm->pkt_len = cq_desc->len;
 828        rxm->port = rxq->qcq.lif->port_id;
 829
 830        left = cq_desc->len;
 831
 832        rxm->data_len = RTE_MIN(buf_size, left);
 833        left -= rxm->data_len;
 834
 835        rxm_seg = rxm->next;
 836        while (rxm_seg && left) {
 837                rxm_seg->data_len = RTE_MIN(buf_size, left);
 838                left -= rxm_seg->data_len;
 839
 840                rxm_seg = rxm_seg->next;
 841                rxm->nb_segs++;
 842        }
 843
 844        /* RSS */
 845        pkt_flags |= PKT_RX_RSS_HASH;
 846        rxm->hash.rss = cq_desc->rss_hash;
 847
 848        /* Vlan Strip */
 849        if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
 850                pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
 851                rxm->vlan_tci = cq_desc->vlan_tci;
 852        }
 853
 854        /* Checksum */
 855        if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
 856                if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
 857                        pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
 858                else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
 859                        pkt_flags |= PKT_RX_IP_CKSUM_BAD;
 860
 861                if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
 862                        (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
 863                        pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
 864                else if ((cq_desc->csum_flags &
 865                                IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
 866                                (cq_desc->csum_flags &
 867                                IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
 868                        pkt_flags |= PKT_RX_L4_CKSUM_BAD;
 869        }
 870
 871        rxm->ol_flags = pkt_flags;
 872
 873        /* Packet Type */
 874        switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
 875        case IONIC_PKT_TYPE_IPV4:
 876                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
 877                break;
 878        case IONIC_PKT_TYPE_IPV6:
 879                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
 880                break;
 881        case IONIC_PKT_TYPE_IPV4_TCP:
 882                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
 883                        RTE_PTYPE_L4_TCP;
 884                break;
 885        case IONIC_PKT_TYPE_IPV6_TCP:
 886                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
 887                        RTE_PTYPE_L4_TCP;
 888                break;
 889        case IONIC_PKT_TYPE_IPV4_UDP:
 890                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
 891                        RTE_PTYPE_L4_UDP;
 892                break;
 893        case IONIC_PKT_TYPE_IPV6_UDP:
 894                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
 895                        RTE_PTYPE_L4_UDP;
 896                break;
 897        default:
 898                {
 899                        struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
 900                                struct rte_ether_hdr *);
 901                        uint16_t ether_type = eth_h->ether_type;
 902                        if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
 903                                pkt_type = RTE_PTYPE_L2_ETHER_ARP;
 904                        else
 905                                pkt_type = RTE_PTYPE_UNKNOWN;
 906                        stats->mtods++;
 907                        break;
 908                }
 909        }
 910
 911        rxm->packet_type = pkt_type;
 912
 913        recv_args->rx_pkts[recv_args->nb_rx] = rxm;
 914        recv_args->nb_rx++;
 915
 916        stats->packets++;
 917        stats->bytes += rxm->pkt_len;
 918}
 919
 920static void
 921ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
 922                 struct rte_mbuf *mbuf)
 923{
 924        struct ionic_rxq_desc *desc_base = q->base;
 925        struct ionic_rxq_desc *old = &desc_base[q_desc_index];
 926        struct ionic_rxq_desc *new = &desc_base[q->head_idx];
 927
 928        new->addr = old->addr;
 929        new->len = old->len;
 930
 931        q->info[q->head_idx] = mbuf;
 932
 933        q->head_idx = Q_NEXT_TO_POST(q, 1);
 934
 935        ionic_q_flush(q);
 936}
 937
 938static __rte_always_inline int
 939ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
 940{
 941        struct ionic_queue *q = &rxq->qcq.q;
 942        struct ionic_rxq_desc *desc, *desc_base = q->base;
 943        struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
 944        struct ionic_rxq_sg_elem *elem;
 945        void **info;
 946        rte_iova_t dma_addr;
 947        uint32_t i, j, nsegs, buf_size, size;
 948
 949        buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
 950                RTE_PKTMBUF_HEADROOM);
 951
 952        /* Initialize software ring entries */
 953        for (i = ionic_q_space_avail(q); i; i--) {
 954                struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
 955                struct rte_mbuf *prev_rxm_seg;
 956
 957                if (rxm == NULL) {
 958                        IONIC_PRINT(ERR, "RX mbuf alloc failed");
 959                        return -ENOMEM;
 960                }
 961
 962                info = IONIC_INFO_PTR(q, q->head_idx);
 963
 964                nsegs = (len + buf_size - 1) / buf_size;
 965
 966                desc = &desc_base[q->head_idx];
 967                dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
 968                desc->addr = dma_addr;
 969                desc->len = buf_size;
 970                size = buf_size;
 971                desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
 972                        IONIC_RXQ_DESC_OPCODE_SIMPLE;
 973                rxm->next = NULL;
 974
 975                prev_rxm_seg = rxm;
 976                sg_desc = &sg_desc_base[q->head_idx];
 977                elem = sg_desc->elems;
 978                for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
 979                        struct rte_mbuf *rxm_seg;
 980                        rte_iova_t data_iova;
 981
 982                        rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
 983                        if (rxm_seg == NULL) {
 984                                IONIC_PRINT(ERR, "RX mbuf alloc failed");
 985                                return -ENOMEM;
 986                        }
 987
 988                        data_iova = rte_mbuf_data_iova(rxm_seg);
 989                        dma_addr = rte_cpu_to_le_64(data_iova);
 990                        elem->addr = dma_addr;
 991                        elem->len = buf_size;
 992                        size += buf_size;
 993                        elem++;
 994                        rxm_seg->next = NULL;
 995                        prev_rxm_seg->next = rxm_seg;
 996                        prev_rxm_seg = rxm_seg;
 997                }
 998
 999                if (size < len)
1000                        IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
1001                                size, len);
1002
1003                info[0] = rxm;
1004
1005                q->head_idx = Q_NEXT_TO_POST(q, 1);
1006        }
1007
1008        ionic_q_flush(q);
1009
1010        return 0;
1011}
1012
1013/*
1014 * Start Receive Units for specified queue.
1015 */
1016int __rte_cold
1017ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1018{
1019        uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1020        uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1021        struct ionic_rx_qcq *rxq;
1022        int err;
1023
1024        if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
1025                IONIC_PRINT(DEBUG, "RX queue %u already started",
1026                        rx_queue_id);
1027                return 0;
1028        }
1029
1030        rxq = eth_dev->data->rx_queues[rx_queue_id];
1031
1032        IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
1033                rx_queue_id, rxq->qcq.q.num_descs, frame_size);
1034
1035        if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
1036                err = ionic_lif_rxq_init(rxq);
1037                if (err)
1038                        return err;
1039        } else {
1040                ionic_qcq_enable(&rxq->qcq);
1041        }
1042
1043        /* Allocate buffers for descriptor rings */
1044        if (ionic_rx_fill(rxq, frame_size) != 0) {
1045                IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
1046                        rx_queue_id);
1047                return -1;
1048        }
1049
1050        rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1051
1052        return 0;
1053}
1054
1055static __rte_always_inline void
1056ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
1057                void *service_cb_arg)
1058{
1059        struct ionic_cq *cq = &rxq->qcq.cq;
1060        struct ionic_queue *q = &rxq->qcq.q;
1061        struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
1062        bool more;
1063        uint32_t curr_q_tail_idx, curr_cq_tail_idx;
1064        uint32_t work_done = 0;
1065
1066        if (work_to_do == 0)
1067                return;
1068
1069        cq_desc = &cq_desc_base[cq->tail_idx];
1070        while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1071                curr_cq_tail_idx = cq->tail_idx;
1072                cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
1073
1074                if (cq->tail_idx == 0)
1075                        cq->done_color = !cq->done_color;
1076
1077                /* Prefetch the next 4 descriptors */
1078                if ((cq->tail_idx & 0x3) == 0)
1079                        rte_prefetch0(&cq_desc_base[cq->tail_idx]);
1080
1081                do {
1082                        more = (q->tail_idx != cq_desc->comp_index);
1083
1084                        curr_q_tail_idx = q->tail_idx;
1085                        q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
1086
1087                        /* Prefetch the next 4 descriptors */
1088                        if ((q->tail_idx & 0x3) == 0)
1089                                /* q desc info */
1090                                rte_prefetch0(&q->info[q->tail_idx]);
1091
1092                        ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx,
1093                                service_cb_arg);
1094
1095                } while (more);
1096
1097                if (++work_done == work_to_do)
1098                        break;
1099
1100                cq_desc = &cq_desc_base[cq->tail_idx];
1101        }
1102}
1103
1104/*
1105 * Stop Receive Units for specified queue.
1106 */
1107int __rte_cold
1108ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1109{
1110        struct ionic_rx_qcq *rxq;
1111
1112        IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
1113
1114        rxq = eth_dev->data->rx_queues[rx_queue_id];
1115
1116        eth_dev->data->rx_queue_state[rx_queue_id] =
1117                RTE_ETH_QUEUE_STATE_STOPPED;
1118
1119        ionic_qcq_disable(&rxq->qcq);
1120
1121        /* Flush */
1122        ionic_rxq_service(rxq, -1, NULL);
1123
1124        return 0;
1125}
1126
1127uint16_t
1128ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1129                uint16_t nb_pkts)
1130{
1131        struct ionic_rx_qcq *rxq = rx_queue;
1132        uint32_t frame_size =
1133                rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1134        struct ionic_rx_service service_cb_arg;
1135
1136        service_cb_arg.rx_pkts = rx_pkts;
1137        service_cb_arg.nb_pkts = nb_pkts;
1138        service_cb_arg.nb_rx = 0;
1139
1140        ionic_rxq_service(rxq, nb_pkts, &service_cb_arg);
1141
1142        ionic_rx_fill(rxq, frame_size);
1143
1144        return service_cb_arg.nb_rx;
1145}
1146