linux/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
   3
   4#include <linux/ip.h>
   5#include <linux/ipv6.h>
   6#include <linux/if_vlan.h>
   7#include <net/ip6_checksum.h>
   8
   9#include "ionic.h"
  10#include "ionic_lif.h"
  11#include "ionic_txrx.h"
  12
  13
  14static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
  15                                  ionic_desc_cb cb_func, void *cb_arg)
  16{
  17        DEBUG_STATS_TXQ_POST(q, ring_dbell);
  18
  19        ionic_q_post(q, ring_dbell, cb_func, cb_arg);
  20}
  21
  22static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
  23                                  ionic_desc_cb cb_func, void *cb_arg)
  24{
  25        ionic_q_post(q, ring_dbell, cb_func, cb_arg);
  26
  27        DEBUG_STATS_RX_BUFF_CNT(q);
  28}
  29
  30static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
  31{
  32        return netdev_get_tx_queue(q->lif->netdev, q->index);
  33}
  34
  35static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info)
  36{
  37        buf_info->page = NULL;
  38        buf_info->page_offset = 0;
  39        buf_info->dma_addr = 0;
  40}
  41
  42static int ionic_rx_page_alloc(struct ionic_queue *q,
  43                               struct ionic_buf_info *buf_info)
  44{
  45        struct net_device *netdev = q->lif->netdev;
  46        struct ionic_rx_stats *stats;
  47        struct device *dev;
  48
  49        dev = q->dev;
  50        stats = q_to_rx_stats(q);
  51
  52        if (unlikely(!buf_info)) {
  53                net_err_ratelimited("%s: %s invalid buf_info in alloc\n",
  54                                    netdev->name, q->name);
  55                return -EINVAL;
  56        }
  57
  58        buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0);
  59        if (unlikely(!buf_info->page)) {
  60                net_err_ratelimited("%s: %s page alloc failed\n",
  61                                    netdev->name, q->name);
  62                stats->alloc_err++;
  63                return -ENOMEM;
  64        }
  65        buf_info->page_offset = 0;
  66
  67        buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset,
  68                                          IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
  69        if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) {
  70                __free_pages(buf_info->page, 0);
  71                ionic_rx_buf_reset(buf_info);
  72                net_err_ratelimited("%s: %s dma map failed\n",
  73                                    netdev->name, q->name);
  74                stats->dma_map_err++;
  75                return -EIO;
  76        }
  77
  78        return 0;
  79}
  80
  81static void ionic_rx_page_free(struct ionic_queue *q,
  82                               struct ionic_buf_info *buf_info)
  83{
  84        struct net_device *netdev = q->lif->netdev;
  85        struct device *dev = q->dev;
  86
  87        if (unlikely(!buf_info)) {
  88                net_err_ratelimited("%s: %s invalid buf_info in free\n",
  89                                    netdev->name, q->name);
  90                return;
  91        }
  92
  93        if (!buf_info->page)
  94                return;
  95
  96        dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
  97        __free_pages(buf_info->page, 0);
  98        ionic_rx_buf_reset(buf_info);
  99}
 100
 101static bool ionic_rx_buf_recycle(struct ionic_queue *q,
 102                                 struct ionic_buf_info *buf_info, u32 used)
 103{
 104        u32 size;
 105
 106        /* don't re-use pages allocated in low-mem condition */
 107        if (page_is_pfmemalloc(buf_info->page))
 108                return false;
 109
 110        /* don't re-use buffers from non-local numa nodes */
 111        if (page_to_nid(buf_info->page) != numa_mem_id())
 112                return false;
 113
 114        size = ALIGN(used, IONIC_PAGE_SPLIT_SZ);
 115        buf_info->page_offset += size;
 116        if (buf_info->page_offset >= IONIC_PAGE_SIZE)
 117                return false;
 118
 119        get_page(buf_info->page);
 120
 121        return true;
 122}
 123
 124static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
 125                                      struct ionic_desc_info *desc_info,
 126                                      struct ionic_rxq_comp *comp)
 127{
 128        struct net_device *netdev = q->lif->netdev;
 129        struct ionic_buf_info *buf_info;
 130        struct ionic_rx_stats *stats;
 131        struct device *dev = q->dev;
 132        struct sk_buff *skb;
 133        unsigned int i;
 134        u16 frag_len;
 135        u16 len;
 136
 137        stats = q_to_rx_stats(q);
 138
 139        buf_info = &desc_info->bufs[0];
 140        len = le16_to_cpu(comp->len);
 141
 142        prefetch(buf_info->page);
 143
 144        skb = napi_get_frags(&q_to_qcq(q)->napi);
 145        if (unlikely(!skb)) {
 146                net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
 147                                     netdev->name, q->name);
 148                stats->alloc_err++;
 149                return NULL;
 150        }
 151
 152        i = comp->num_sg_elems + 1;
 153        do {
 154                if (unlikely(!buf_info->page)) {
 155                        dev_kfree_skb(skb);
 156                        return NULL;
 157                }
 158
 159                frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
 160                len -= frag_len;
 161
 162                dma_sync_single_for_cpu(dev,
 163                                        buf_info->dma_addr + buf_info->page_offset,
 164                                        frag_len, DMA_FROM_DEVICE);
 165
 166                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
 167                                buf_info->page, buf_info->page_offset, frag_len,
 168                                IONIC_PAGE_SIZE);
 169
 170                if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) {
 171                        dma_unmap_page(dev, buf_info->dma_addr,
 172                                       IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
 173                        ionic_rx_buf_reset(buf_info);
 174                }
 175
 176                buf_info++;
 177
 178                i--;
 179        } while (i > 0);
 180
 181        return skb;
 182}
 183
 184static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
 185                                          struct ionic_desc_info *desc_info,
 186                                          struct ionic_rxq_comp *comp)
 187{
 188        struct net_device *netdev = q->lif->netdev;
 189        struct ionic_buf_info *buf_info;
 190        struct ionic_rx_stats *stats;
 191        struct device *dev = q->dev;
 192        struct sk_buff *skb;
 193        u16 len;
 194
 195        stats = q_to_rx_stats(q);
 196
 197        buf_info = &desc_info->bufs[0];
 198        len = le16_to_cpu(comp->len);
 199
 200        skb = napi_alloc_skb(&q_to_qcq(q)->napi, len);
 201        if (unlikely(!skb)) {
 202                net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
 203                                     netdev->name, q->name);
 204                stats->alloc_err++;
 205                return NULL;
 206        }
 207
 208        if (unlikely(!buf_info->page)) {
 209                dev_kfree_skb(skb);
 210                return NULL;
 211        }
 212
 213        dma_sync_single_for_cpu(dev, buf_info->dma_addr + buf_info->page_offset,
 214                                len, DMA_FROM_DEVICE);
 215        skb_copy_to_linear_data(skb, page_address(buf_info->page) + buf_info->page_offset, len);
 216        dma_sync_single_for_device(dev, buf_info->dma_addr + buf_info->page_offset,
 217                                   len, DMA_FROM_DEVICE);
 218
 219        skb_put(skb, len);
 220        skb->protocol = eth_type_trans(skb, q->lif->netdev);
 221
 222        return skb;
 223}
 224
 225static void ionic_rx_clean(struct ionic_queue *q,
 226                           struct ionic_desc_info *desc_info,
 227                           struct ionic_cq_info *cq_info,
 228                           void *cb_arg)
 229{
 230        struct net_device *netdev = q->lif->netdev;
 231        struct ionic_qcq *qcq = q_to_qcq(q);
 232        struct ionic_rx_stats *stats;
 233        struct ionic_rxq_comp *comp;
 234        struct sk_buff *skb;
 235
 236        comp = cq_info->cq_desc + qcq->cq.desc_size - sizeof(*comp);
 237
 238        stats = q_to_rx_stats(q);
 239
 240        if (comp->status) {
 241                stats->dropped++;
 242                return;
 243        }
 244
 245        stats->pkts++;
 246        stats->bytes += le16_to_cpu(comp->len);
 247
 248        if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
 249                skb = ionic_rx_copybreak(q, desc_info, comp);
 250        else
 251                skb = ionic_rx_frags(q, desc_info, comp);
 252
 253        if (unlikely(!skb)) {
 254                stats->dropped++;
 255                return;
 256        }
 257
 258        skb_record_rx_queue(skb, q->index);
 259
 260        if (likely(netdev->features & NETIF_F_RXHASH)) {
 261                switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
 262                case IONIC_PKT_TYPE_IPV4:
 263                case IONIC_PKT_TYPE_IPV6:
 264                        skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
 265                                     PKT_HASH_TYPE_L3);
 266                        break;
 267                case IONIC_PKT_TYPE_IPV4_TCP:
 268                case IONIC_PKT_TYPE_IPV6_TCP:
 269                case IONIC_PKT_TYPE_IPV4_UDP:
 270                case IONIC_PKT_TYPE_IPV6_UDP:
 271                        skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
 272                                     PKT_HASH_TYPE_L4);
 273                        break;
 274                }
 275        }
 276
 277        if (likely(netdev->features & NETIF_F_RXCSUM)) {
 278                if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
 279                        skb->ip_summed = CHECKSUM_COMPLETE;
 280                        skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
 281                        stats->csum_complete++;
 282                }
 283        } else {
 284                stats->csum_none++;
 285        }
 286
 287        if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
 288                     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
 289                     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
 290                stats->csum_error++;
 291
 292        if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 293            (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
 294                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 295                                       le16_to_cpu(comp->vlan_tci));
 296                stats->vlan_stripped++;
 297        }
 298
 299        if (unlikely(q->features & IONIC_RXQ_F_HWSTAMP)) {
 300                __le64 *cq_desc_hwstamp;
 301                u64 hwstamp;
 302
 303                cq_desc_hwstamp =
 304                        cq_info->cq_desc +
 305                        qcq->cq.desc_size -
 306                        sizeof(struct ionic_rxq_comp) -
 307                        IONIC_HWSTAMP_CQ_NEGOFFSET;
 308
 309                hwstamp = le64_to_cpu(*cq_desc_hwstamp);
 310
 311                if (hwstamp != IONIC_HWSTAMP_INVALID) {
 312                        skb_hwtstamps(skb)->hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
 313                        stats->hwstamp_valid++;
 314                } else {
 315                        stats->hwstamp_invalid++;
 316                }
 317        }
 318
 319        if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
 320                napi_gro_receive(&qcq->napi, skb);
 321        else
 322                napi_gro_frags(&qcq->napi);
 323}
 324
 325bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
 326{
 327        struct ionic_queue *q = cq->bound_q;
 328        struct ionic_desc_info *desc_info;
 329        struct ionic_rxq_comp *comp;
 330
 331        comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
 332
 333        if (!color_match(comp->pkt_type_color, cq->done_color))
 334                return false;
 335
 336        /* check for empty queue */
 337        if (q->tail_idx == q->head_idx)
 338                return false;
 339
 340        if (q->tail_idx != le16_to_cpu(comp->comp_index))
 341                return false;
 342
 343        desc_info = &q->info[q->tail_idx];
 344        q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
 345
 346        /* clean the related q entry, only one per qc completion */
 347        ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
 348
 349        desc_info->cb = NULL;
 350        desc_info->cb_arg = NULL;
 351
 352        return true;
 353}
 354
 355void ionic_rx_fill(struct ionic_queue *q)
 356{
 357        struct net_device *netdev = q->lif->netdev;
 358        struct ionic_desc_info *desc_info;
 359        struct ionic_rxq_sg_desc *sg_desc;
 360        struct ionic_rxq_sg_elem *sg_elem;
 361        struct ionic_buf_info *buf_info;
 362        struct ionic_rxq_desc *desc;
 363        unsigned int remain_len;
 364        unsigned int frag_len;
 365        unsigned int nfrags;
 366        unsigned int i, j;
 367        unsigned int len;
 368
 369        len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
 370
 371        for (i = ionic_q_space_avail(q); i; i--) {
 372                nfrags = 0;
 373                remain_len = len;
 374                desc_info = &q->info[q->head_idx];
 375                desc = desc_info->desc;
 376                buf_info = &desc_info->bufs[0];
 377
 378                if (!buf_info->page) { /* alloc a new buffer? */
 379                        if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
 380                                desc->addr = 0;
 381                                desc->len = 0;
 382                                return;
 383                        }
 384                }
 385
 386                /* fill main descriptor - buf[0] */
 387                desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
 388                frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
 389                desc->len = cpu_to_le16(frag_len);
 390                remain_len -= frag_len;
 391                buf_info++;
 392                nfrags++;
 393
 394                /* fill sg descriptors - buf[1..n] */
 395                sg_desc = desc_info->sg_desc;
 396                for (j = 0; remain_len > 0 && j < q->max_sg_elems; j++) {
 397                        sg_elem = &sg_desc->elems[j];
 398                        if (!buf_info->page) { /* alloc a new sg buffer? */
 399                                if (unlikely(ionic_rx_page_alloc(q, buf_info))) {
 400                                        sg_elem->addr = 0;
 401                                        sg_elem->len = 0;
 402                                        return;
 403                                }
 404                        }
 405
 406                        sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
 407                        frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
 408                        sg_elem->len = cpu_to_le16(frag_len);
 409                        remain_len -= frag_len;
 410                        buf_info++;
 411                        nfrags++;
 412                }
 413
 414                /* clear end sg element as a sentinel */
 415                if (j < q->max_sg_elems) {
 416                        sg_elem = &sg_desc->elems[j];
 417                        memset(sg_elem, 0, sizeof(*sg_elem));
 418                }
 419
 420                desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
 421                                              IONIC_RXQ_DESC_OPCODE_SIMPLE;
 422                desc_info->nbufs = nfrags;
 423
 424                ionic_rxq_post(q, false, ionic_rx_clean, NULL);
 425        }
 426
 427        ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
 428                         q->dbval | q->head_idx);
 429}
 430
 431void ionic_rx_empty(struct ionic_queue *q)
 432{
 433        struct ionic_desc_info *desc_info;
 434        struct ionic_buf_info *buf_info;
 435        unsigned int i, j;
 436
 437        for (i = 0; i < q->num_descs; i++) {
 438                desc_info = &q->info[i];
 439                for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
 440                        buf_info = &desc_info->bufs[j];
 441                        if (buf_info->page)
 442                                ionic_rx_page_free(q, buf_info);
 443                }
 444
 445                desc_info->nbufs = 0;
 446                desc_info->cb = NULL;
 447                desc_info->cb_arg = NULL;
 448        }
 449
 450        q->head_idx = 0;
 451        q->tail_idx = 0;
 452}
 453
 454static void ionic_dim_update(struct ionic_qcq *qcq)
 455{
 456        struct dim_sample dim_sample;
 457        struct ionic_lif *lif;
 458        unsigned int qi;
 459
 460        if (!qcq->intr.dim_coal_hw)
 461                return;
 462
 463        lif = qcq->q.lif;
 464        qi = qcq->cq.bound_q->index;
 465
 466        ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
 467                             lif->rxqcqs[qi]->intr.index,
 468                             qcq->intr.dim_coal_hw);
 469
 470        dim_update_sample(qcq->cq.bound_intr->rearm_count,
 471                          lif->txqstats[qi].pkts,
 472                          lif->txqstats[qi].bytes,
 473                          &dim_sample);
 474
 475        net_dim(&qcq->dim, dim_sample);
 476}
 477
 478int ionic_tx_napi(struct napi_struct *napi, int budget)
 479{
 480        struct ionic_qcq *qcq = napi_to_qcq(napi);
 481        struct ionic_cq *cq = napi_to_cq(napi);
 482        struct ionic_dev *idev;
 483        struct ionic_lif *lif;
 484        u32 work_done = 0;
 485        u32 flags = 0;
 486
 487        lif = cq->bound_q->lif;
 488        idev = &lif->ionic->idev;
 489
 490        work_done = ionic_cq_service(cq, budget,
 491                                     ionic_tx_service, NULL, NULL);
 492
 493        if (work_done < budget && napi_complete_done(napi, work_done)) {
 494                ionic_dim_update(qcq);
 495                flags |= IONIC_INTR_CRED_UNMASK;
 496                cq->bound_intr->rearm_count++;
 497        }
 498
 499        if (work_done || flags) {
 500                flags |= IONIC_INTR_CRED_RESET_COALESCE;
 501                ionic_intr_credits(idev->intr_ctrl,
 502                                   cq->bound_intr->index,
 503                                   work_done, flags);
 504        }
 505
 506        DEBUG_STATS_NAPI_POLL(qcq, work_done);
 507
 508        return work_done;
 509}
 510
 511int ionic_rx_napi(struct napi_struct *napi, int budget)
 512{
 513        struct ionic_qcq *qcq = napi_to_qcq(napi);
 514        struct ionic_cq *cq = napi_to_cq(napi);
 515        struct ionic_dev *idev;
 516        struct ionic_lif *lif;
 517        u16 rx_fill_threshold;
 518        u32 work_done = 0;
 519        u32 flags = 0;
 520
 521        lif = cq->bound_q->lif;
 522        idev = &lif->ionic->idev;
 523
 524        work_done = ionic_cq_service(cq, budget,
 525                                     ionic_rx_service, NULL, NULL);
 526
 527        rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
 528                                  cq->num_descs / IONIC_RX_FILL_DIV);
 529        if (work_done && ionic_q_space_avail(cq->bound_q) >= rx_fill_threshold)
 530                ionic_rx_fill(cq->bound_q);
 531
 532        if (work_done < budget && napi_complete_done(napi, work_done)) {
 533                ionic_dim_update(qcq);
 534                flags |= IONIC_INTR_CRED_UNMASK;
 535                cq->bound_intr->rearm_count++;
 536        }
 537
 538        if (work_done || flags) {
 539                flags |= IONIC_INTR_CRED_RESET_COALESCE;
 540                ionic_intr_credits(idev->intr_ctrl,
 541                                   cq->bound_intr->index,
 542                                   work_done, flags);
 543        }
 544
 545        DEBUG_STATS_NAPI_POLL(qcq, work_done);
 546
 547        return work_done;
 548}
 549
 550int ionic_txrx_napi(struct napi_struct *napi, int budget)
 551{
 552        struct ionic_qcq *qcq = napi_to_qcq(napi);
 553        struct ionic_cq *rxcq = napi_to_cq(napi);
 554        unsigned int qi = rxcq->bound_q->index;
 555        struct ionic_dev *idev;
 556        struct ionic_lif *lif;
 557        struct ionic_cq *txcq;
 558        u16 rx_fill_threshold;
 559        u32 rx_work_done = 0;
 560        u32 tx_work_done = 0;
 561        u32 flags = 0;
 562
 563        lif = rxcq->bound_q->lif;
 564        idev = &lif->ionic->idev;
 565        txcq = &lif->txqcqs[qi]->cq;
 566
 567        tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
 568                                        ionic_tx_service, NULL, NULL);
 569
 570        rx_work_done = ionic_cq_service(rxcq, budget,
 571                                        ionic_rx_service, NULL, NULL);
 572
 573        rx_fill_threshold = min_t(u16, IONIC_RX_FILL_THRESHOLD,
 574                                  rxcq->num_descs / IONIC_RX_FILL_DIV);
 575        if (rx_work_done && ionic_q_space_avail(rxcq->bound_q) >= rx_fill_threshold)
 576                ionic_rx_fill(rxcq->bound_q);
 577
 578        if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
 579                ionic_dim_update(qcq);
 580                flags |= IONIC_INTR_CRED_UNMASK;
 581                rxcq->bound_intr->rearm_count++;
 582        }
 583
 584        if (rx_work_done || flags) {
 585                flags |= IONIC_INTR_CRED_RESET_COALESCE;
 586                ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
 587                                   tx_work_done + rx_work_done, flags);
 588        }
 589
 590        DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
 591        DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
 592
 593        return rx_work_done;
 594}
 595
 596static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
 597                                      void *data, size_t len)
 598{
 599        struct ionic_tx_stats *stats = q_to_tx_stats(q);
 600        struct device *dev = q->dev;
 601        dma_addr_t dma_addr;
 602
 603        dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
 604        if (dma_mapping_error(dev, dma_addr)) {
 605                net_warn_ratelimited("%s: DMA single map failed on %s!\n",
 606                                     q->lif->netdev->name, q->name);
 607                stats->dma_map_err++;
 608                return 0;
 609        }
 610        return dma_addr;
 611}
 612
 613static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
 614                                    const skb_frag_t *frag,
 615                                    size_t offset, size_t len)
 616{
 617        struct ionic_tx_stats *stats = q_to_tx_stats(q);
 618        struct device *dev = q->dev;
 619        dma_addr_t dma_addr;
 620
 621        dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
 622        if (dma_mapping_error(dev, dma_addr)) {
 623                net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
 624                                     q->lif->netdev->name, q->name);
 625                stats->dma_map_err++;
 626        }
 627        return dma_addr;
 628}
 629
 630static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
 631                            struct ionic_desc_info *desc_info)
 632{
 633        struct ionic_buf_info *buf_info = desc_info->bufs;
 634        struct ionic_tx_stats *stats = q_to_tx_stats(q);
 635        struct device *dev = q->dev;
 636        dma_addr_t dma_addr;
 637        unsigned int nfrags;
 638        skb_frag_t *frag;
 639        int frag_idx;
 640
 641        dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
 642        if (dma_mapping_error(dev, dma_addr)) {
 643                stats->dma_map_err++;
 644                return -EIO;
 645        }
 646        buf_info->dma_addr = dma_addr;
 647        buf_info->len = skb_headlen(skb);
 648        buf_info++;
 649
 650        frag = skb_shinfo(skb)->frags;
 651        nfrags = skb_shinfo(skb)->nr_frags;
 652        for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
 653                dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
 654                if (dma_mapping_error(dev, dma_addr)) {
 655                        stats->dma_map_err++;
 656                        goto dma_fail;
 657                }
 658                buf_info->dma_addr = dma_addr;
 659                buf_info->len = skb_frag_size(frag);
 660                buf_info++;
 661        }
 662
 663        desc_info->nbufs = 1 + nfrags;
 664
 665        return 0;
 666
 667dma_fail:
 668        /* unwind the frag mappings and the head mapping */
 669        while (frag_idx > 0) {
 670                frag_idx--;
 671                buf_info--;
 672                dma_unmap_page(dev, buf_info->dma_addr,
 673                               buf_info->len, DMA_TO_DEVICE);
 674        }
 675        dma_unmap_single(dev, buf_info->dma_addr, buf_info->len, DMA_TO_DEVICE);
 676        return -EIO;
 677}
 678
 679static void ionic_tx_clean(struct ionic_queue *q,
 680                           struct ionic_desc_info *desc_info,
 681                           struct ionic_cq_info *cq_info,
 682                           void *cb_arg)
 683{
 684        struct ionic_buf_info *buf_info = desc_info->bufs;
 685        struct ionic_tx_stats *stats = q_to_tx_stats(q);
 686        struct ionic_qcq *qcq = q_to_qcq(q);
 687        struct sk_buff *skb = cb_arg;
 688        struct device *dev = q->dev;
 689        unsigned int i;
 690        u16 qi;
 691
 692        if (desc_info->nbufs) {
 693                dma_unmap_single(dev, (dma_addr_t)buf_info->dma_addr,
 694                                 buf_info->len, DMA_TO_DEVICE);
 695                buf_info++;
 696                for (i = 1; i < desc_info->nbufs; i++, buf_info++)
 697                        dma_unmap_page(dev, (dma_addr_t)buf_info->dma_addr,
 698                                       buf_info->len, DMA_TO_DEVICE);
 699        }
 700
 701        if (!skb)
 702                return;
 703
 704        qi = skb_get_queue_mapping(skb);
 705
 706        if (unlikely(q->features & IONIC_TXQ_F_HWSTAMP)) {
 707                if (cq_info) {
 708                        struct skb_shared_hwtstamps hwts = {};
 709                        __le64 *cq_desc_hwstamp;
 710                        u64 hwstamp;
 711
 712                        cq_desc_hwstamp =
 713                                cq_info->cq_desc +
 714                                qcq->cq.desc_size -
 715                                sizeof(struct ionic_txq_comp) -
 716                                IONIC_HWSTAMP_CQ_NEGOFFSET;
 717
 718                        hwstamp = le64_to_cpu(*cq_desc_hwstamp);
 719
 720                        if (hwstamp != IONIC_HWSTAMP_INVALID) {
 721                                hwts.hwtstamp = ionic_lif_phc_ktime(q->lif, hwstamp);
 722
 723                                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
 724                                skb_tstamp_tx(skb, &hwts);
 725
 726                                stats->hwstamp_valid++;
 727                        } else {
 728                                stats->hwstamp_invalid++;
 729                        }
 730                }
 731
 732        } else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
 733                netif_wake_subqueue(q->lif->netdev, qi);
 734                q->wake++;
 735        }
 736
 737        desc_info->bytes = skb->len;
 738        stats->clean++;
 739
 740        dev_consume_skb_any(skb);
 741}
 742
 743bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
 744{
 745        struct ionic_queue *q = cq->bound_q;
 746        struct ionic_desc_info *desc_info;
 747        struct ionic_txq_comp *comp;
 748        int bytes = 0;
 749        int pkts = 0;
 750        u16 index;
 751
 752        comp = cq_info->cq_desc + cq->desc_size - sizeof(*comp);
 753
 754        if (!color_match(comp->color, cq->done_color))
 755                return false;
 756
 757        /* clean the related q entries, there could be
 758         * several q entries completed for each cq completion
 759         */
 760        do {
 761                desc_info = &q->info[q->tail_idx];
 762                desc_info->bytes = 0;
 763                index = q->tail_idx;
 764                q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
 765                ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
 766                if (desc_info->cb_arg) {
 767                        pkts++;
 768                        bytes += desc_info->bytes;
 769                }
 770                desc_info->cb = NULL;
 771                desc_info->cb_arg = NULL;
 772        } while (index != le16_to_cpu(comp->comp_index));
 773
 774        if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
 775                netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
 776
 777        return true;
 778}
 779
 780void ionic_tx_flush(struct ionic_cq *cq)
 781{
 782        struct ionic_dev *idev = &cq->lif->ionic->idev;
 783        u32 work_done;
 784
 785        work_done = ionic_cq_service(cq, cq->num_descs,
 786                                     ionic_tx_service, NULL, NULL);
 787        if (work_done)
 788                ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
 789                                   work_done, IONIC_INTR_CRED_RESET_COALESCE);
 790}
 791
 792void ionic_tx_empty(struct ionic_queue *q)
 793{
 794        struct ionic_desc_info *desc_info;
 795        int bytes = 0;
 796        int pkts = 0;
 797
 798        /* walk the not completed tx entries, if any */
 799        while (q->head_idx != q->tail_idx) {
 800                desc_info = &q->info[q->tail_idx];
 801                desc_info->bytes = 0;
 802                q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
 803                ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
 804                if (desc_info->cb_arg) {
 805                        pkts++;
 806                        bytes += desc_info->bytes;
 807                }
 808                desc_info->cb = NULL;
 809                desc_info->cb_arg = NULL;
 810        }
 811
 812        if (pkts && bytes && !unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
 813                netdev_tx_completed_queue(q_to_ndq(q), pkts, bytes);
 814}
 815
 816static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
 817{
 818        int err;
 819
 820        err = skb_cow_head(skb, 0);
 821        if (err)
 822                return err;
 823
 824        if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
 825                inner_ip_hdr(skb)->check = 0;
 826                inner_tcp_hdr(skb)->check =
 827                        ~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
 828                                           inner_ip_hdr(skb)->daddr,
 829                                           0, IPPROTO_TCP, 0);
 830        } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
 831                inner_tcp_hdr(skb)->check =
 832                        ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
 833                                         &inner_ipv6_hdr(skb)->daddr,
 834                                         0, IPPROTO_TCP, 0);
 835        }
 836
 837        return 0;
 838}
 839
 840static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
 841{
 842        int err;
 843
 844        err = skb_cow_head(skb, 0);
 845        if (err)
 846                return err;
 847
 848        if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
 849                ip_hdr(skb)->check = 0;
 850                tcp_hdr(skb)->check =
 851                        ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
 852                                           ip_hdr(skb)->daddr,
 853                                           0, IPPROTO_TCP, 0);
 854        } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
 855                tcp_v6_gso_csum_prep(skb);
 856        }
 857
 858        return 0;
 859}
 860
 861static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
 862                              struct sk_buff *skb,
 863                              dma_addr_t addr, u8 nsge, u16 len,
 864                              unsigned int hdrlen, unsigned int mss,
 865                              bool outer_csum,
 866                              u16 vlan_tci, bool has_vlan,
 867                              bool start, bool done)
 868{
 869        u8 flags = 0;
 870        u64 cmd;
 871
 872        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
 873        flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
 874        flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
 875        flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
 876
 877        cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
 878        desc->cmd = cpu_to_le64(cmd);
 879        desc->len = cpu_to_le16(len);
 880        desc->vlan_tci = cpu_to_le16(vlan_tci);
 881        desc->hdr_len = cpu_to_le16(hdrlen);
 882        desc->mss = cpu_to_le16(mss);
 883
 884        if (start) {
 885                skb_tx_timestamp(skb);
 886                if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
 887                        netdev_tx_sent_queue(q_to_ndq(q), skb->len);
 888                ionic_txq_post(q, false, ionic_tx_clean, skb);
 889        } else {
 890                ionic_txq_post(q, done, NULL, NULL);
 891        }
 892}
 893
 894static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
 895{
 896        struct ionic_tx_stats *stats = q_to_tx_stats(q);
 897        struct ionic_desc_info *desc_info;
 898        struct ionic_buf_info *buf_info;
 899        struct ionic_txq_sg_elem *elem;
 900        struct ionic_txq_desc *desc;
 901        unsigned int chunk_len;
 902        unsigned int frag_rem;
 903        unsigned int tso_rem;
 904        unsigned int seg_rem;
 905        dma_addr_t desc_addr;
 906        dma_addr_t frag_addr;
 907        unsigned int hdrlen;
 908        unsigned int len;
 909        unsigned int mss;
 910        bool start, done;
 911        bool outer_csum;
 912        bool has_vlan;
 913        u16 desc_len;
 914        u8 desc_nsge;
 915        u16 vlan_tci;
 916        bool encap;
 917        int err;
 918
 919        desc_info = &q->info[q->head_idx];
 920        buf_info = desc_info->bufs;
 921
 922        if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
 923                return -EIO;
 924
 925        len = skb->len;
 926        mss = skb_shinfo(skb)->gso_size;
 927        outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
 928                     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
 929        has_vlan = !!skb_vlan_tag_present(skb);
 930        vlan_tci = skb_vlan_tag_get(skb);
 931        encap = skb->encapsulation;
 932
 933        /* Preload inner-most TCP csum field with IP pseudo hdr
 934         * calculated with IP length set to zero.  HW will later
 935         * add in length to each TCP segment resulting from the TSO.
 936         */
 937
 938        if (encap)
 939                err = ionic_tx_tcp_inner_pseudo_csum(skb);
 940        else
 941                err = ionic_tx_tcp_pseudo_csum(skb);
 942        if (err)
 943                return err;
 944
 945        if (encap)
 946                hdrlen = skb_inner_transport_header(skb) - skb->data +
 947                         inner_tcp_hdrlen(skb);
 948        else
 949                hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
 950
 951        tso_rem = len;
 952        seg_rem = min(tso_rem, hdrlen + mss);
 953
 954        frag_addr = 0;
 955        frag_rem = 0;
 956
 957        start = true;
 958
 959        while (tso_rem > 0) {
 960                desc = NULL;
 961                elem = NULL;
 962                desc_addr = 0;
 963                desc_len = 0;
 964                desc_nsge = 0;
 965                /* use fragments until we have enough to post a single descriptor */
 966                while (seg_rem > 0) {
 967                        /* if the fragment is exhausted then move to the next one */
 968                        if (frag_rem == 0) {
 969                                /* grab the next fragment */
 970                                frag_addr = buf_info->dma_addr;
 971                                frag_rem = buf_info->len;
 972                                buf_info++;
 973                        }
 974                        chunk_len = min(frag_rem, seg_rem);
 975                        if (!desc) {
 976                                /* fill main descriptor */
 977                                desc = desc_info->txq_desc;
 978                                elem = desc_info->txq_sg_desc->elems;
 979                                desc_addr = frag_addr;
 980                                desc_len = chunk_len;
 981                        } else {
 982                                /* fill sg descriptor */
 983                                elem->addr = cpu_to_le64(frag_addr);
 984                                elem->len = cpu_to_le16(chunk_len);
 985                                elem++;
 986                                desc_nsge++;
 987                        }
 988                        frag_addr += chunk_len;
 989                        frag_rem -= chunk_len;
 990                        tso_rem -= chunk_len;
 991                        seg_rem -= chunk_len;
 992                }
 993                seg_rem = min(tso_rem, mss);
 994                done = (tso_rem == 0);
 995                /* post descriptor */
 996                ionic_tx_tso_post(q, desc, skb,
 997                                  desc_addr, desc_nsge, desc_len,
 998                                  hdrlen, mss, outer_csum, vlan_tci, has_vlan,
 999                                  start, done);
1000                start = false;
1001                /* Buffer information is stored with the first tso descriptor */
1002                desc_info = &q->info[q->head_idx];
1003                desc_info->nbufs = 0;
1004        }
1005
1006        stats->pkts += DIV_ROUND_UP(len - hdrlen, mss);
1007        stats->bytes += len;
1008        stats->tso++;
1009        stats->tso_bytes = len;
1010
1011        return 0;
1012}
1013
1014static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
1015                              struct ionic_desc_info *desc_info)
1016{
1017        struct ionic_txq_desc *desc = desc_info->txq_desc;
1018        struct ionic_buf_info *buf_info = desc_info->bufs;
1019        struct ionic_tx_stats *stats = q_to_tx_stats(q);
1020        bool has_vlan;
1021        u8 flags = 0;
1022        bool encap;
1023        u64 cmd;
1024
1025        has_vlan = !!skb_vlan_tag_present(skb);
1026        encap = skb->encapsulation;
1027
1028        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1029        flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1030
1031        cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
1032                                  flags, skb_shinfo(skb)->nr_frags,
1033                                  buf_info->dma_addr);
1034        desc->cmd = cpu_to_le64(cmd);
1035        desc->len = cpu_to_le16(buf_info->len);
1036        if (has_vlan) {
1037                desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1038                stats->vlan_inserted++;
1039        } else {
1040                desc->vlan_tci = 0;
1041        }
1042        desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
1043        desc->csum_offset = cpu_to_le16(skb->csum_offset);
1044
1045        if (skb_csum_is_sctp(skb))
1046                stats->crc32_csum++;
1047        else
1048                stats->csum++;
1049
1050        return 0;
1051}
1052
1053static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
1054                                 struct ionic_desc_info *desc_info)
1055{
1056        struct ionic_txq_desc *desc = desc_info->txq_desc;
1057        struct ionic_buf_info *buf_info = desc_info->bufs;
1058        struct ionic_tx_stats *stats = q_to_tx_stats(q);
1059        bool has_vlan;
1060        u8 flags = 0;
1061        bool encap;
1062        u64 cmd;
1063
1064        has_vlan = !!skb_vlan_tag_present(skb);
1065        encap = skb->encapsulation;
1066
1067        flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1068        flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1069
1070        cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1071                                  flags, skb_shinfo(skb)->nr_frags,
1072                                  buf_info->dma_addr);
1073        desc->cmd = cpu_to_le64(cmd);
1074        desc->len = cpu_to_le16(buf_info->len);
1075        if (has_vlan) {
1076                desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1077                stats->vlan_inserted++;
1078        } else {
1079                desc->vlan_tci = 0;
1080        }
1081        desc->csum_start = 0;
1082        desc->csum_offset = 0;
1083
1084        stats->csum_none++;
1085
1086        return 0;
1087}
1088
1089static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb,
1090                              struct ionic_desc_info *desc_info)
1091{
1092        struct ionic_txq_sg_desc *sg_desc = desc_info->txq_sg_desc;
1093        struct ionic_buf_info *buf_info = &desc_info->bufs[1];
1094        struct ionic_txq_sg_elem *elem = sg_desc->elems;
1095        struct ionic_tx_stats *stats = q_to_tx_stats(q);
1096        unsigned int i;
1097
1098        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, buf_info++, elem++) {
1099                elem->addr = cpu_to_le64(buf_info->dma_addr);
1100                elem->len = cpu_to_le16(buf_info->len);
1101        }
1102
1103        stats->frags += skb_shinfo(skb)->nr_frags;
1104
1105        return 0;
1106}
1107
1108static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1109{
1110        struct ionic_desc_info *desc_info = &q->info[q->head_idx];
1111        struct ionic_tx_stats *stats = q_to_tx_stats(q);
1112        int err;
1113
1114        if (unlikely(ionic_tx_map_skb(q, skb, desc_info)))
1115                return -EIO;
1116
1117        /* set up the initial descriptor */
1118        if (skb->ip_summed == CHECKSUM_PARTIAL)
1119                err = ionic_tx_calc_csum(q, skb, desc_info);
1120        else
1121                err = ionic_tx_calc_no_csum(q, skb, desc_info);
1122        if (err)
1123                return err;
1124
1125        /* add frags */
1126        err = ionic_tx_skb_frags(q, skb, desc_info);
1127        if (err)
1128                return err;
1129
1130        skb_tx_timestamp(skb);
1131        stats->pkts++;
1132        stats->bytes += skb->len;
1133
1134        if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
1135                netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1136        ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1137
1138        return 0;
1139}
1140
1141static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1142{
1143        struct ionic_tx_stats *stats = q_to_tx_stats(q);
1144        int ndescs;
1145        int err;
1146
1147        /* Each desc is mss long max, so a descriptor for each gso_seg */
1148        if (skb_is_gso(skb))
1149                ndescs = skb_shinfo(skb)->gso_segs;
1150        else
1151                ndescs = 1;
1152
1153        /* If non-TSO, just need 1 desc and nr_frags sg elems */
1154        if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
1155                return ndescs;
1156
1157        /* Too many frags, so linearize */
1158        err = skb_linearize(skb);
1159        if (err)
1160                return err;
1161
1162        stats->linearize++;
1163
1164        return ndescs;
1165}
1166
1167static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1168{
1169        int stopped = 0;
1170
1171        if (unlikely(!ionic_q_has_space(q, ndescs))) {
1172                netif_stop_subqueue(q->lif->netdev, q->index);
1173                q->stop++;
1174                stopped = 1;
1175
1176                /* Might race with ionic_tx_clean, check again */
1177                smp_rmb();
1178                if (ionic_q_has_space(q, ndescs)) {
1179                        netif_wake_subqueue(q->lif->netdev, q->index);
1180                        stopped = 0;
1181                }
1182        }
1183
1184        return stopped;
1185}
1186
1187static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
1188                                            struct net_device *netdev)
1189{
1190        struct ionic_lif *lif = netdev_priv(netdev);
1191        struct ionic_queue *q = &lif->hwstamp_txq->q;
1192        int err, ndescs;
1193
1194        /* Does not stop/start txq, because we post to a separate tx queue
1195         * for timestamping, and if a packet can't be posted immediately to
1196         * the timestamping queue, it is dropped.
1197         */
1198
1199        ndescs = ionic_tx_descs_needed(q, skb);
1200        if (unlikely(ndescs < 0))
1201                goto err_out_drop;
1202
1203        if (unlikely(!ionic_q_has_space(q, ndescs)))
1204                goto err_out_drop;
1205
1206        skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP;
1207        if (skb_is_gso(skb))
1208                err = ionic_tx_tso(q, skb);
1209        else
1210                err = ionic_tx(q, skb);
1211
1212        if (err)
1213                goto err_out_drop;
1214
1215        return NETDEV_TX_OK;
1216
1217err_out_drop:
1218        q->drop++;
1219        dev_kfree_skb(skb);
1220        return NETDEV_TX_OK;
1221}
1222
1223netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1224{
1225        u16 queue_index = skb_get_queue_mapping(skb);
1226        struct ionic_lif *lif = netdev_priv(netdev);
1227        struct ionic_queue *q;
1228        int ndescs;
1229        int err;
1230
1231        if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1232                dev_kfree_skb(skb);
1233                return NETDEV_TX_OK;
1234        }
1235
1236        if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1237                if (lif->hwstamp_txq && lif->phc->ts_config_tx_mode)
1238                        return ionic_start_hwstamp_xmit(skb, netdev);
1239
1240        if (unlikely(queue_index >= lif->nxqs))
1241                queue_index = 0;
1242        q = &lif->txqcqs[queue_index]->q;
1243
1244        ndescs = ionic_tx_descs_needed(q, skb);
1245        if (ndescs < 0)
1246                goto err_out_drop;
1247
1248        if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1249                return NETDEV_TX_BUSY;
1250
1251        if (skb_is_gso(skb))
1252                err = ionic_tx_tso(q, skb);
1253        else
1254                err = ionic_tx(q, skb);
1255
1256        if (err)
1257                goto err_out_drop;
1258
1259        /* Stop the queue if there aren't descriptors for the next packet.
1260         * Since our SG lists per descriptor take care of most of the possible
1261         * fragmentation, we don't need to have many descriptors available.
1262         */
1263        ionic_maybe_stop_tx(q, 4);
1264
1265        return NETDEV_TX_OK;
1266
1267err_out_drop:
1268        q->stop++;
1269        q->drop++;
1270        dev_kfree_skb(skb);
1271        return NETDEV_TX_OK;
1272}
1273