dpdk/drivers/net/ionic/ionic_rxtx.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
   2 * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
   3 */
   4
   5#include <sys/queue.h>
   6#include <stdio.h>
   7#include <stdlib.h>
   8#include <string.h>
   9#include <errno.h>
  10#include <stdint.h>
  11#include <stdarg.h>
  12#include <unistd.h>
  13#include <inttypes.h>
  14
  15#include <rte_byteorder.h>
  16#include <rte_common.h>
  17#include <rte_cycles.h>
  18#include <rte_log.h>
  19#include <rte_debug.h>
  20#include <rte_interrupts.h>
  21#include <rte_pci.h>
  22#include <rte_memory.h>
  23#include <rte_memzone.h>
  24#include <rte_launch.h>
  25#include <rte_eal.h>
  26#include <rte_per_lcore.h>
  27#include <rte_lcore.h>
  28#include <rte_atomic.h>
  29#include <rte_branch_prediction.h>
  30#include <rte_mempool.h>
  31#include <rte_malloc.h>
  32#include <rte_mbuf.h>
  33#include <rte_ether.h>
  34#include <rte_ethdev_driver.h>
  35#include <rte_prefetch.h>
  36#include <rte_udp.h>
  37#include <rte_tcp.h>
  38#include <rte_sctp.h>
  39#include <rte_string_fns.h>
  40#include <rte_errno.h>
  41#include <rte_ip.h>
  42#include <rte_net.h>
  43
  44#include "ionic_logs.h"
  45#include "ionic_mac_api.h"
  46#include "ionic_ethdev.h"
  47#include "ionic_lif.h"
  48#include "ionic_rxtx.h"
  49
  50#define IONIC_RX_RING_DOORBELL_STRIDE           (32 - 1)
  51
  52/*********************************************************************
  53 *
  54 *  TX functions
  55 *
  56 **********************************************************************/
  57
  58void
  59ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
  60                struct rte_eth_txq_info *qinfo)
  61{
  62        struct ionic_qcq *txq = dev->data->tx_queues[queue_id];
  63        struct ionic_queue *q = &txq->q;
  64
  65        qinfo->nb_desc = q->num_descs;
  66        qinfo->conf.offloads = txq->offloads;
  67        qinfo->conf.tx_deferred_start = txq->deferred_start;
  68}
  69
  70static inline void __rte_cold
  71ionic_tx_flush(struct ionic_cq *cq)
  72{
  73        struct ionic_queue *q = cq->bound_q;
  74        struct ionic_desc_info *q_desc_info;
  75        struct rte_mbuf *txm, *next;
  76        struct ionic_txq_comp *cq_desc_base = cq->base;
  77        struct ionic_txq_comp *cq_desc;
  78        u_int32_t comp_index = (u_int32_t)-1;
  79
  80        cq_desc = &cq_desc_base[cq->tail_idx];
  81        while (color_match(cq_desc->color, cq->done_color)) {
  82                cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
  83
  84                /* Prefetch the next 4 descriptors (not really useful here) */
  85                if ((cq->tail_idx & 0x3) == 0)
  86                        rte_prefetch0(&cq_desc_base[cq->tail_idx]);
  87
  88                if (cq->tail_idx == 0)
  89                        cq->done_color = !cq->done_color;
  90
  91                comp_index = cq_desc->comp_index;
  92
  93                cq_desc = &cq_desc_base[cq->tail_idx];
  94        }
  95
  96        if (comp_index != (u_int32_t)-1) {
  97                while (q->tail_idx != comp_index) {
  98                        q_desc_info = &q->info[q->tail_idx];
  99
 100                        q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
 101
 102                        /* Prefetch the next 4 descriptors */
 103                        if ((q->tail_idx & 0x3) == 0)
 104                                /* q desc info */
 105                                rte_prefetch0(&q->info[q->tail_idx]);
 106
 107                        /*
 108                         * Note: you can just use rte_pktmbuf_free,
 109                         * but this loop is faster
 110                         */
 111                        txm = q_desc_info->cb_arg;
 112                        while (txm != NULL) {
 113                                next = txm->next;
 114                                rte_pktmbuf_free_seg(txm);
 115                                txm = next;
 116                        }
 117                }
 118        }
 119}
 120
 121void __rte_cold
 122ionic_dev_tx_queue_release(void *tx_queue)
 123{
 124        struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
 125
 126        IONIC_PRINT_CALL();
 127
 128        ionic_qcq_free(txq);
 129}
 130
 131int __rte_cold
 132ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 133{
 134        struct ionic_qcq *txq;
 135
 136        IONIC_PRINT_CALL();
 137
 138        txq = eth_dev->data->tx_queues[tx_queue_id];
 139
 140        /*
 141         * Note: we should better post NOP Tx desc and wait for its completion
 142         * before disabling Tx queue
 143         */
 144
 145        ionic_qcq_disable(txq);
 146
 147        ionic_tx_flush(&txq->cq);
 148
 149        ionic_lif_txq_deinit(txq);
 150
 151        eth_dev->data->tx_queue_state[tx_queue_id] =
 152                RTE_ETH_QUEUE_STATE_STOPPED;
 153
 154        return 0;
 155}
 156
 157int __rte_cold
 158ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
 159                uint16_t nb_desc, uint32_t socket_id __rte_unused,
 160                const struct rte_eth_txconf *tx_conf)
 161{
 162        struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
 163        struct ionic_qcq *txq;
 164        uint64_t offloads;
 165        int err;
 166
 167        IONIC_PRINT_CALL();
 168
 169        IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers",
 170                tx_queue_id, nb_desc);
 171
 172        if (tx_queue_id >= lif->ntxqcqs) {
 173                IONIC_PRINT(DEBUG, "Queue index %u not available "
 174                        "(max %u queues)",
 175                        tx_queue_id, lif->ntxqcqs);
 176                return -EINVAL;
 177        }
 178
 179        offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
 180
 181        /* Validate number of receive descriptors */
 182        if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
 183                return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
 184
 185        /* Free memory prior to re-allocation if needed... */
 186        if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
 187                void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
 188                ionic_dev_tx_queue_release(tx_queue);
 189                eth_dev->data->tx_queues[tx_queue_id] = NULL;
 190        }
 191
 192        err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
 193        if (err) {
 194                IONIC_PRINT(DEBUG, "Queue allocation failure");
 195                return -EINVAL;
 196        }
 197
 198        /* Do not start queue with rte_eth_dev_start() */
 199        txq->deferred_start = tx_conf->tx_deferred_start;
 200
 201        txq->offloads = offloads;
 202
 203        eth_dev->data->tx_queues[tx_queue_id] = txq;
 204
 205        return 0;
 206}
 207
 208/*
 209 * Start Transmit Units for specified queue.
 210 */
 211int __rte_cold
 212ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
 213{
 214        struct ionic_qcq *txq;
 215        int err;
 216
 217        IONIC_PRINT_CALL();
 218
 219        txq = eth_dev->data->tx_queues[tx_queue_id];
 220
 221        err = ionic_lif_txq_init(txq);
 222        if (err)
 223                return err;
 224
 225        ionic_qcq_enable(txq);
 226
 227        eth_dev->data->tx_queue_state[tx_queue_id] =
 228                RTE_ETH_QUEUE_STATE_STARTED;
 229
 230        return 0;
 231}
 232
 233static void
 234ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
 235{
 236        struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
 237        char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
 238        struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
 239                (l3_hdr + txm->l3_len);
 240
 241        if (txm->ol_flags & PKT_TX_IP_CKSUM) {
 242                struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
 243                ipv4_hdr->hdr_checksum = 0;
 244                tcp_hdr->cksum = 0;
 245                tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
 246        } else {
 247                struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
 248                tcp_hdr->cksum = 0;
 249                tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
 250        }
 251}
 252
 253static void
 254ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
 255{
 256        struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
 257        char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
 258                txm->outer_l3_len + txm->l2_len;
 259        struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
 260                (l3_hdr + txm->l3_len);
 261
 262        if (txm->ol_flags & PKT_TX_IPV4) {
 263                struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
 264                ipv4_hdr->hdr_checksum = 0;
 265                tcp_hdr->cksum = 0;
 266                tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
 267        } else {
 268                struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
 269                tcp_hdr->cksum = 0;
 270                tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
 271        }
 272}
 273
 274static void
 275ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
 276                struct rte_mbuf *txm,
 277                rte_iova_t addr, uint8_t nsge, uint16_t len,
 278                uint32_t hdrlen, uint32_t mss,
 279                bool encap,
 280                uint16_t vlan_tci, bool has_vlan,
 281                bool start, bool done)
 282{
 283        uint8_t flags = 0;
 284        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
 285        flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
 286        flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
 287        flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
 288
 289        desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
 290                flags, nsge, addr);
 291        desc->len = len;
 292        desc->vlan_tci = vlan_tci;
 293        desc->hdr_len = hdrlen;
 294        desc->mss = mss;
 295
 296        ionic_q_post(q, done, NULL, done ? txm : NULL);
 297}
 298
 299static struct ionic_txq_desc *
 300ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem)
 301{
 302        struct ionic_txq_desc *desc_base = q->base;
 303        struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
 304        struct ionic_txq_desc *desc = &desc_base[q->head_idx];
 305        struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
 306
 307        *elem = sg_desc->elems;
 308        return desc;
 309}
 310
 311static int
 312ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
 313                uint64_t offloads __rte_unused, bool not_xmit_more)
 314{
 315        struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
 316        struct ionic_txq_desc *desc;
 317        struct ionic_txq_sg_elem *elem;
 318        struct rte_mbuf *txm_seg;
 319        uint64_t desc_addr = 0;
 320        uint16_t desc_len = 0;
 321        uint8_t desc_nsge;
 322        uint32_t hdrlen;
 323        uint32_t mss = txm->tso_segsz;
 324        uint32_t frag_left = 0;
 325        uint32_t left;
 326        uint32_t seglen;
 327        uint32_t len;
 328        uint32_t offset = 0;
 329        bool start, done;
 330        bool encap;
 331        bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
 332        uint16_t vlan_tci = txm->vlan_tci;
 333        uint64_t ol_flags = txm->ol_flags;
 334
 335        encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
 336                (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
 337                ((ol_flags & PKT_TX_OUTER_IPV4) ||
 338                (ol_flags & PKT_TX_OUTER_IPV6));
 339
 340        /* Preload inner-most TCP csum field with IP pseudo hdr
 341         * calculated with IP length set to zero.  HW will later
 342         * add in length to each TCP segment resulting from the TSO.
 343         */
 344
 345        if (encap) {
 346                ionic_tx_tcp_inner_pseudo_csum(txm);
 347                hdrlen = txm->outer_l2_len + txm->outer_l3_len +
 348                        txm->l2_len + txm->l3_len + txm->l4_len;
 349        } else {
 350                ionic_tx_tcp_pseudo_csum(txm);
 351                hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
 352        }
 353
 354        seglen = hdrlen + mss;
 355        left = txm->data_len;
 356
 357        desc = ionic_tx_tso_next(q, &elem);
 358        start = true;
 359
 360        /* Chop data up into desc segments */
 361
 362        while (left > 0) {
 363                len = RTE_MIN(seglen, left);
 364                frag_left = seglen - len;
 365                desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
 366                desc_len = len;
 367                desc_nsge = 0;
 368                left -= len;
 369                offset += len;
 370                if (txm->nb_segs > 1 && frag_left > 0)
 371                        continue;
 372                done = (txm->nb_segs == 1 && left == 0);
 373                ionic_tx_tso_post(q, desc, txm,
 374                        desc_addr, desc_nsge, desc_len,
 375                        hdrlen, mss,
 376                        encap,
 377                        vlan_tci, has_vlan,
 378                        start, done && not_xmit_more);
 379                desc = ionic_tx_tso_next(q, &elem);
 380                start = false;
 381                seglen = mss;
 382        }
 383
 384        /* Chop frags into desc segments */
 385
 386        txm_seg = txm->next;
 387        while (txm_seg != NULL) {
 388                offset = 0;
 389                left = txm_seg->data_len;
 390                stats->frags++;
 391
 392                while (left > 0) {
 393                        rte_iova_t data_iova;
 394                        data_iova = rte_mbuf_data_iova(txm_seg);
 395                        elem->addr = rte_cpu_to_le_64(data_iova) + offset;
 396                        if (frag_left > 0) {
 397                                len = RTE_MIN(frag_left, left);
 398                                frag_left -= len;
 399                                elem->len = len;
 400                                elem++;
 401                                desc_nsge++;
 402                        } else {
 403                                len = RTE_MIN(mss, left);
 404                                frag_left = mss - len;
 405                                data_iova = rte_mbuf_data_iova(txm_seg);
 406                                desc_addr = rte_cpu_to_le_64(data_iova);
 407                                desc_len = len;
 408                                desc_nsge = 0;
 409                        }
 410                        left -= len;
 411                        offset += len;
 412                        if (txm_seg->next != NULL && frag_left > 0)
 413                                continue;
 414                        done = (txm_seg->next == NULL && left == 0);
 415                        ionic_tx_tso_post(q, desc, txm_seg,
 416                                desc_addr, desc_nsge, desc_len,
 417                                hdrlen, mss,
 418                                encap,
 419                                vlan_tci, has_vlan,
 420                                start, done && not_xmit_more);
 421                        desc = ionic_tx_tso_next(q, &elem);
 422                        start = false;
 423                }
 424
 425                txm_seg = txm_seg->next;
 426        }
 427
 428        stats->tso++;
 429
 430        return 0;
 431}
 432
 433static int
 434ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,
 435                uint64_t offloads, bool not_xmit_more)
 436{
 437        struct ionic_txq_desc *desc_base = q->base;
 438        struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
 439        struct ionic_txq_desc *desc = &desc_base[q->head_idx];
 440        struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
 441        struct ionic_txq_sg_elem *elem = sg_desc->elems;
 442        struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
 443        struct rte_mbuf *txm_seg;
 444        bool encap;
 445        bool has_vlan;
 446        uint64_t ol_flags = txm->ol_flags;
 447        uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
 448        uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
 449        uint8_t flags = 0;
 450
 451        if ((ol_flags & PKT_TX_IP_CKSUM) &&
 452                        (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) {
 453                opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
 454                flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
 455                if (((ol_flags & PKT_TX_TCP_CKSUM) &&
 456                                (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) ||
 457                                ((ol_flags & PKT_TX_UDP_CKSUM) &&
 458                                (offloads & DEV_TX_OFFLOAD_UDP_CKSUM)))
 459                        flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
 460        } else {
 461                stats->no_csum++;
 462        }
 463
 464        has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
 465        encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
 466                        (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) &&
 467                        ((ol_flags & PKT_TX_OUTER_IPV4) ||
 468                        (ol_flags & PKT_TX_OUTER_IPV6));
 469
 470        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
 471        flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
 472
 473        desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
 474        desc->len = txm->data_len;
 475        desc->vlan_tci = txm->vlan_tci;
 476
 477        txm_seg = txm->next;
 478        while (txm_seg != NULL) {
 479                elem->len = txm_seg->data_len;
 480                elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
 481                stats->frags++;
 482                elem++;
 483                txm_seg = txm_seg->next;
 484        }
 485
 486        ionic_q_post(q, not_xmit_more, NULL, txm);
 487
 488        return 0;
 489}
 490
 491uint16_t
 492ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 493                uint16_t nb_pkts)
 494{
 495        struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
 496        struct ionic_queue *q = &txq->q;
 497        struct ionic_cq *cq = &txq->cq;
 498        struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
 499        uint32_t next_q_head_idx;
 500        uint32_t bytes_tx = 0;
 501        uint16_t nb_tx = 0;
 502        int err;
 503        bool last;
 504
 505        /* Cleaning old buffers */
 506        ionic_tx_flush(cq);
 507
 508        if (unlikely(ionic_q_space_avail(q) < nb_pkts)) {
 509                stats->stop += nb_pkts;
 510                return 0;
 511        }
 512
 513        while (nb_tx < nb_pkts) {
 514                last = (nb_tx == (nb_pkts - 1));
 515
 516                next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1);
 517                if ((next_q_head_idx & 0x3) == 0) {
 518                        struct ionic_txq_desc *desc_base = q->base;
 519                        rte_prefetch0(&desc_base[next_q_head_idx]);
 520                        rte_prefetch0(&q->info[next_q_head_idx]);
 521                }
 522
 523                if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
 524                        err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads,
 525                                last);
 526                else
 527                        err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last);
 528                if (err) {
 529                        stats->drop += nb_pkts - nb_tx;
 530                        if (nb_tx > 0)
 531                                ionic_q_flush(q);
 532                        break;
 533                }
 534
 535                bytes_tx += tx_pkts[nb_tx]->pkt_len;
 536                nb_tx++;
 537        }
 538
 539        stats->packets += nb_tx;
 540        stats->bytes += bytes_tx;
 541
 542        return nb_tx;
 543}
 544
 545/*********************************************************************
 546 *
 547 *  TX prep functions
 548 *
 549 **********************************************************************/
 550
 551#define IONIC_TX_OFFLOAD_MASK ( \
 552        PKT_TX_IPV4 |           \
 553        PKT_TX_IPV6 |           \
 554        PKT_TX_VLAN |           \
 555        PKT_TX_IP_CKSUM |       \
 556        PKT_TX_TCP_SEG |        \
 557        PKT_TX_L4_MASK)
 558
 559#define IONIC_TX_OFFLOAD_NOTSUP_MASK \
 560        (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
 561
 562uint16_t
 563ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
 564                uint16_t nb_pkts)
 565{
 566        struct rte_mbuf *txm;
 567        uint64_t offloads;
 568        int i = 0;
 569
 570        for (i = 0; i < nb_pkts; i++) {
 571                txm = tx_pkts[i];
 572
 573                if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) {
 574                        rte_errno = -EINVAL;
 575                        break;
 576                }
 577
 578                offloads = txm->ol_flags;
 579
 580                if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
 581                        rte_errno = -ENOTSUP;
 582                        break;
 583                }
 584        }
 585
 586        return i;
 587}
 588
 589/*********************************************************************
 590 *
 591 *  RX functions
 592 *
 593 **********************************************************************/
 594
 595static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
 596                struct rte_mbuf *mbuf);
 597
 598void
 599ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
 600                struct rte_eth_rxq_info *qinfo)
 601{
 602        struct ionic_qcq *rxq = dev->data->rx_queues[queue_id];
 603        struct ionic_queue *q = &rxq->q;
 604
 605        qinfo->mp = rxq->mb_pool;
 606        qinfo->scattered_rx = dev->data->scattered_rx;
 607        qinfo->nb_desc = q->num_descs;
 608        qinfo->conf.rx_deferred_start = rxq->deferred_start;
 609        qinfo->conf.offloads = rxq->offloads;
 610}
 611
 612static void __rte_cold
 613ionic_rx_empty(struct ionic_queue *q)
 614{
 615        struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
 616        struct ionic_desc_info *cur;
 617        struct rte_mbuf *mbuf;
 618
 619        while (q->tail_idx != q->head_idx) {
 620                cur = &q->info[q->tail_idx];
 621                mbuf = cur->cb_arg;
 622                rte_mempool_put(rxq->mb_pool, mbuf);
 623
 624                q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
 625        }
 626}
 627
 628void __rte_cold
 629ionic_dev_rx_queue_release(void *rx_queue)
 630{
 631        struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
 632
 633        IONIC_PRINT_CALL();
 634
 635        ionic_rx_empty(&rxq->q);
 636
 637        ionic_qcq_free(rxq);
 638}
 639
 640int __rte_cold
 641ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 642                uint16_t rx_queue_id,
 643                uint16_t nb_desc,
 644                uint32_t socket_id __rte_unused,
 645                const struct rte_eth_rxconf *rx_conf,
 646                struct rte_mempool *mp)
 647{
 648        struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
 649        struct ionic_qcq *rxq;
 650        uint64_t offloads;
 651        int err;
 652
 653        IONIC_PRINT_CALL();
 654
 655        IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers",
 656                rx_queue_id, nb_desc);
 657
 658        if (rx_queue_id >= lif->nrxqcqs) {
 659                IONIC_PRINT(ERR,
 660                        "Queue index %u not available (max %u queues)",
 661                        rx_queue_id, lif->nrxqcqs);
 662                return -EINVAL;
 663        }
 664
 665        offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
 666
 667        /* Validate number of receive descriptors */
 668        if (!rte_is_power_of_2(nb_desc) ||
 669                        nb_desc < IONIC_MIN_RING_DESC ||
 670                        nb_desc > IONIC_MAX_RING_DESC) {
 671                IONIC_PRINT(ERR,
 672                        "Bad number of descriptors (%u) for queue %u (min: %u)",
 673                        nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
 674                return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
 675        }
 676
 677        if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
 678                eth_dev->data->scattered_rx = 1;
 679
 680        /* Free memory prior to re-allocation if needed... */
 681        if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
 682                void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
 683                ionic_dev_rx_queue_release(rx_queue);
 684                eth_dev->data->rx_queues[rx_queue_id] = NULL;
 685        }
 686
 687        err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);
 688        if (err) {
 689                IONIC_PRINT(ERR, "Queue allocation failure");
 690                return -EINVAL;
 691        }
 692
 693        rxq->mb_pool = mp;
 694
 695        /*
 696         * Note: the interface does not currently support
 697         * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
 698         * when the adapter will be able to keep the CRC and subtract
 699         * it to the length for all received packets:
 700         * if (eth_dev->data->dev_conf.rxmode.offloads &
 701         *     DEV_RX_OFFLOAD_KEEP_CRC)
 702         *   rxq->crc_len = ETHER_CRC_LEN;
 703         */
 704
 705        /* Do not start queue with rte_eth_dev_start() */
 706        rxq->deferred_start = rx_conf->rx_deferred_start;
 707
 708        rxq->offloads = offloads;
 709
 710        eth_dev->data->rx_queues[rx_queue_id] = rxq;
 711
 712        return 0;
 713}
 714
 715static void
 716ionic_rx_clean(struct ionic_queue *q,
 717                uint32_t q_desc_index, uint32_t cq_desc_index,
 718                void *cb_arg, void *service_cb_arg)
 719{
 720        struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base;
 721        struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
 722        struct rte_mbuf *rxm = cb_arg;
 723        struct rte_mbuf *rxm_seg;
 724        struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
 725        uint32_t max_frame_size =
 726                rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 727        uint64_t pkt_flags = 0;
 728        uint32_t pkt_type;
 729        struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q);
 730        struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
 731                service_cb_arg;
 732        uint32_t buf_size = (uint16_t)
 733                (rte_pktmbuf_data_room_size(rxq->mb_pool) -
 734                RTE_PKTMBUF_HEADROOM);
 735        uint32_t left;
 736
 737        if (!recv_args) {
 738                stats->no_cb_arg++;
 739                /* Flush */
 740                rte_pktmbuf_free(rxm);
 741                /*
 742                 * Note: rte_mempool_put is faster with no segs
 743                 * rte_mempool_put(rxq->mb_pool, rxm);
 744                 */
 745                return;
 746        }
 747
 748        if (cq_desc->status) {
 749                stats->bad_cq_status++;
 750                ionic_rx_recycle(q, q_desc_index, rxm);
 751                return;
 752        }
 753
 754        if (recv_args->nb_rx >= recv_args->nb_pkts) {
 755                stats->no_room++;
 756                ionic_rx_recycle(q, q_desc_index, rxm);
 757                return;
 758        }
 759
 760        if (cq_desc->len > max_frame_size ||
 761                        cq_desc->len == 0) {
 762                stats->bad_len++;
 763                ionic_rx_recycle(q, q_desc_index, rxm);
 764                return;
 765        }
 766
 767        rxm->data_off = RTE_PKTMBUF_HEADROOM;
 768        rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
 769        rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
 770        rxm->pkt_len = cq_desc->len;
 771        rxm->port = rxq->lif->port_id;
 772
 773        left = cq_desc->len;
 774
 775        rxm->data_len = RTE_MIN(buf_size, left);
 776        left -= rxm->data_len;
 777
 778        rxm_seg = rxm->next;
 779        while (rxm_seg && left) {
 780                rxm_seg->data_len = RTE_MIN(buf_size, left);
 781                left -= rxm_seg->data_len;
 782
 783                rxm_seg = rxm_seg->next;
 784                rxm->nb_segs++;
 785        }
 786
 787        /* RSS */
 788        pkt_flags |= PKT_RX_RSS_HASH;
 789        rxm->hash.rss = cq_desc->rss_hash;
 790
 791        /* Vlan Strip */
 792        if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
 793                pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
 794                rxm->vlan_tci = cq_desc->vlan_tci;
 795        }
 796
 797        /* Checksum */
 798        if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
 799                if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
 800                        pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
 801                else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
 802                        pkt_flags |= PKT_RX_IP_CKSUM_BAD;
 803
 804                if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
 805                        (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
 806                        pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
 807                else if ((cq_desc->csum_flags &
 808                                IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
 809                                (cq_desc->csum_flags &
 810                                IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
 811                        pkt_flags |= PKT_RX_L4_CKSUM_BAD;
 812        }
 813
 814        rxm->ol_flags = pkt_flags;
 815
 816        /* Packet Type */
 817        switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
 818        case IONIC_PKT_TYPE_IPV4:
 819                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
 820                break;
 821        case IONIC_PKT_TYPE_IPV6:
 822                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
 823                break;
 824        case IONIC_PKT_TYPE_IPV4_TCP:
 825                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
 826                        RTE_PTYPE_L4_TCP;
 827                break;
 828        case IONIC_PKT_TYPE_IPV6_TCP:
 829                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
 830                        RTE_PTYPE_L4_TCP;
 831                break;
 832        case IONIC_PKT_TYPE_IPV4_UDP:
 833                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
 834                        RTE_PTYPE_L4_UDP;
 835                break;
 836        case IONIC_PKT_TYPE_IPV6_UDP:
 837                pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
 838                        RTE_PTYPE_L4_UDP;
 839                break;
 840        default:
 841                {
 842                        struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
 843                                struct rte_ether_hdr *);
 844                        uint16_t ether_type = eth_h->ether_type;
 845                        if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
 846                                pkt_type = RTE_PTYPE_L2_ETHER_ARP;
 847                        else
 848                                pkt_type = RTE_PTYPE_UNKNOWN;
 849                        break;
 850                }
 851        }
 852
 853        rxm->packet_type = pkt_type;
 854
 855        recv_args->rx_pkts[recv_args->nb_rx] = rxm;
 856        recv_args->nb_rx++;
 857
 858        stats->packets++;
 859        stats->bytes += rxm->pkt_len;
 860}
 861
 862static void
 863ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
 864                 struct rte_mbuf *mbuf)
 865{
 866        struct ionic_rxq_desc *desc_base = q->base;
 867        struct ionic_rxq_desc *old = &desc_base[q_desc_index];
 868        struct ionic_rxq_desc *new = &desc_base[q->head_idx];
 869
 870        new->addr = old->addr;
 871        new->len = old->len;
 872
 873        ionic_q_post(q, true, ionic_rx_clean, mbuf);
 874}
 875
 876static int __rte_cold
 877ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
 878{
 879        struct ionic_queue *q = &rxq->q;
 880        struct ionic_rxq_desc *desc_base = q->base;
 881        struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base;
 882        struct ionic_rxq_desc *desc;
 883        struct ionic_rxq_sg_desc *sg_desc;
 884        struct ionic_rxq_sg_elem *elem;
 885        rte_iova_t dma_addr;
 886        uint32_t i, j, nsegs, buf_size, size;
 887        bool ring_doorbell;
 888
 889        buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
 890                RTE_PKTMBUF_HEADROOM);
 891
 892        /* Initialize software ring entries */
 893        for (i = ionic_q_space_avail(q); i; i--) {
 894                struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
 895                struct rte_mbuf *prev_rxm_seg;
 896
 897                if (rxm == NULL) {
 898                        IONIC_PRINT(ERR, "RX mbuf alloc failed");
 899                        return -ENOMEM;
 900                }
 901
 902                nsegs = (len + buf_size - 1) / buf_size;
 903
 904                desc = &desc_base[q->head_idx];
 905                dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
 906                desc->addr = dma_addr;
 907                desc->len = buf_size;
 908                size = buf_size;
 909                desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
 910                        IONIC_RXQ_DESC_OPCODE_SIMPLE;
 911                rxm->next = NULL;
 912
 913                prev_rxm_seg = rxm;
 914                sg_desc = &sg_desc_base[q->head_idx];
 915                elem = sg_desc->elems;
 916                for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
 917                        struct rte_mbuf *rxm_seg;
 918                        rte_iova_t data_iova;
 919
 920                        rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
 921                        if (rxm_seg == NULL) {
 922                                IONIC_PRINT(ERR, "RX mbuf alloc failed");
 923                                return -ENOMEM;
 924                        }
 925
 926                        data_iova = rte_mbuf_data_iova(rxm_seg);
 927                        dma_addr = rte_cpu_to_le_64(data_iova);
 928                        elem->addr = dma_addr;
 929                        elem->len = buf_size;
 930                        size += buf_size;
 931                        elem++;
 932                        rxm_seg->next = NULL;
 933                        prev_rxm_seg->next = rxm_seg;
 934                        prev_rxm_seg = rxm_seg;
 935                }
 936
 937                if (size < len)
 938                        IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
 939                                size, len);
 940
 941                ring_doorbell = ((q->head_idx + 1) &
 942                        IONIC_RX_RING_DOORBELL_STRIDE) == 0;
 943
 944                ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm);
 945        }
 946
 947        return 0;
 948}
 949
 950/*
 951 * Start Receive Units for specified queue.
 952 */
 953int __rte_cold
 954ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 955{
 956        uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
 957        struct ionic_qcq *rxq;
 958        int err;
 959
 960        IONIC_PRINT_CALL();
 961
 962        IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)",
 963                frame_size);
 964
 965        rxq = eth_dev->data->rx_queues[rx_queue_id];
 966
 967        err = ionic_lif_rxq_init(rxq);
 968        if (err)
 969                return err;
 970
 971        /* Allocate buffers for descriptor rings */
 972        if (ionic_rx_fill(rxq, frame_size) != 0) {
 973                IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
 974                        rx_queue_id);
 975                return -1;
 976        }
 977
 978        ionic_qcq_enable(rxq);
 979
 980        eth_dev->data->rx_queue_state[rx_queue_id] =
 981                RTE_ETH_QUEUE_STATE_STARTED;
 982
 983        return 0;
 984}
 985
 986static inline void __rte_cold
 987ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
 988                void *service_cb_arg)
 989{
 990        struct ionic_queue *q = cq->bound_q;
 991        struct ionic_desc_info *q_desc_info;
 992        struct ionic_rxq_comp *cq_desc_base = cq->base;
 993        struct ionic_rxq_comp *cq_desc;
 994        bool more;
 995        uint32_t curr_q_tail_idx, curr_cq_tail_idx;
 996        uint32_t work_done = 0;
 997
 998        if (work_to_do == 0)
 999                return;
1000
1001        cq_desc = &cq_desc_base[cq->tail_idx];
1002        while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1003                curr_cq_tail_idx = cq->tail_idx;
1004                cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
1005
1006                if (cq->tail_idx == 0)
1007                        cq->done_color = !cq->done_color;
1008
1009                /* Prefetch the next 4 descriptors */
1010                if ((cq->tail_idx & 0x3) == 0)
1011                        rte_prefetch0(&cq_desc_base[cq->tail_idx]);
1012
1013                do {
1014                        more = (q->tail_idx != cq_desc->comp_index);
1015
1016                        q_desc_info = &q->info[q->tail_idx];
1017
1018                        curr_q_tail_idx = q->tail_idx;
1019                        q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
1020
1021                        /* Prefetch the next 4 descriptors */
1022                        if ((q->tail_idx & 0x3) == 0)
1023                                /* q desc info */
1024                                rte_prefetch0(&q->info[q->tail_idx]);
1025
1026                        ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx,
1027                                q_desc_info->cb_arg, service_cb_arg);
1028
1029                } while (more);
1030
1031                if (++work_done == work_to_do)
1032                        break;
1033
1034                cq_desc = &cq_desc_base[cq->tail_idx];
1035        }
1036}
1037
1038/*
1039 * Stop Receive Units for specified queue.
1040 */
1041int __rte_cold
1042ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1043{
1044        struct ionic_qcq *rxq;
1045
1046        IONIC_PRINT_CALL();
1047
1048        rxq = eth_dev->data->rx_queues[rx_queue_id];
1049
1050        ionic_qcq_disable(rxq);
1051
1052        /* Flush */
1053        ionic_rxq_service(&rxq->cq, -1, NULL);
1054
1055        ionic_lif_rxq_deinit(rxq);
1056
1057        eth_dev->data->rx_queue_state[rx_queue_id] =
1058                RTE_ETH_QUEUE_STATE_STOPPED;
1059
1060        return 0;
1061}
1062
1063uint16_t
1064ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1065                uint16_t nb_pkts)
1066{
1067        struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
1068        uint32_t frame_size =
1069                rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1070        struct ionic_cq *cq = &rxq->cq;
1071        struct ionic_rx_service service_cb_arg;
1072
1073        service_cb_arg.rx_pkts = rx_pkts;
1074        service_cb_arg.nb_pkts = nb_pkts;
1075        service_cb_arg.nb_rx = 0;
1076
1077        ionic_rxq_service(cq, nb_pkts, &service_cb_arg);
1078
1079        ionic_rx_fill(rxq, frame_size);
1080
1081        return service_cb_arg.nb_rx;
1082}
1083