linux/drivers/net/ethernet/intel/ice/ice_txrx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4/* The driver transmit and receive code */
   5
   6#include <linux/prefetch.h>
   7#include <linux/mm.h>
   8#include <linux/bpf_trace.h>
   9#include <net/xdp.h>
  10#include "ice_txrx_lib.h"
  11#include "ice_lib.h"
  12#include "ice.h"
  13#include "ice_dcb_lib.h"
  14#include "ice_xsk.h"
  15
  16#define ICE_RX_HDR_SIZE         256
  17
  18#define FDIR_DESC_RXDID 0x40
  19#define ICE_FDIR_CLEAN_DELAY 10
  20
  21/**
  22 * ice_prgm_fdir_fltr - Program a Flow Director filter
  23 * @vsi: VSI to send dummy packet
  24 * @fdir_desc: flow director descriptor
  25 * @raw_packet: allocated buffer for flow director
  26 */
  27int
  28ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
  29                   u8 *raw_packet)
  30{
  31        struct ice_tx_buf *tx_buf, *first;
  32        struct ice_fltr_desc *f_desc;
  33        struct ice_tx_desc *tx_desc;
  34        struct ice_ring *tx_ring;
  35        struct device *dev;
  36        dma_addr_t dma;
  37        u32 td_cmd;
  38        u16 i;
  39
  40        /* VSI and Tx ring */
  41        if (!vsi)
  42                return -ENOENT;
  43        tx_ring = vsi->tx_rings[0];
  44        if (!tx_ring || !tx_ring->desc)
  45                return -ENOENT;
  46        dev = tx_ring->dev;
  47
  48        /* we are using two descriptors to add/del a filter and we can wait */
  49        for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
  50                if (!i)
  51                        return -EAGAIN;
  52                msleep_interruptible(1);
  53        }
  54
  55        dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
  56                             DMA_TO_DEVICE);
  57
  58        if (dma_mapping_error(dev, dma))
  59                return -EINVAL;
  60
  61        /* grab the next descriptor */
  62        i = tx_ring->next_to_use;
  63        first = &tx_ring->tx_buf[i];
  64        f_desc = ICE_TX_FDIRDESC(tx_ring, i);
  65        memcpy(f_desc, fdir_desc, sizeof(*f_desc));
  66
  67        i++;
  68        i = (i < tx_ring->count) ? i : 0;
  69        tx_desc = ICE_TX_DESC(tx_ring, i);
  70        tx_buf = &tx_ring->tx_buf[i];
  71
  72        i++;
  73        tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  74
  75        memset(tx_buf, 0, sizeof(*tx_buf));
  76        dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
  77        dma_unmap_addr_set(tx_buf, dma, dma);
  78
  79        tx_desc->buf_addr = cpu_to_le64(dma);
  80        td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
  81                 ICE_TX_DESC_CMD_RE;
  82
  83        tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
  84        tx_buf->raw_buf = raw_packet;
  85
  86        tx_desc->cmd_type_offset_bsz =
  87                ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
  88
  89        /* Force memory write to complete before letting h/w know
  90         * there are new descriptors to fetch.
  91         */
  92        wmb();
  93
  94        /* mark the data descriptor to be watched */
  95        first->next_to_watch = tx_desc;
  96
  97        writel(tx_ring->next_to_use, tx_ring->tail);
  98
  99        return 0;
 100}
 101
 102/**
 103 * ice_unmap_and_free_tx_buf - Release a Tx buffer
 104 * @ring: the ring that owns the buffer
 105 * @tx_buf: the buffer to free
 106 */
 107static void
 108ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
 109{
 110        if (tx_buf->skb) {
 111                if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
 112                        devm_kfree(ring->dev, tx_buf->raw_buf);
 113                else if (ice_ring_is_xdp(ring))
 114                        page_frag_free(tx_buf->raw_buf);
 115                else
 116                        dev_kfree_skb_any(tx_buf->skb);
 117                if (dma_unmap_len(tx_buf, len))
 118                        dma_unmap_single(ring->dev,
 119                                         dma_unmap_addr(tx_buf, dma),
 120                                         dma_unmap_len(tx_buf, len),
 121                                         DMA_TO_DEVICE);
 122        } else if (dma_unmap_len(tx_buf, len)) {
 123                dma_unmap_page(ring->dev,
 124                               dma_unmap_addr(tx_buf, dma),
 125                               dma_unmap_len(tx_buf, len),
 126                               DMA_TO_DEVICE);
 127        }
 128
 129        tx_buf->next_to_watch = NULL;
 130        tx_buf->skb = NULL;
 131        dma_unmap_len_set(tx_buf, len, 0);
 132        /* tx_buf must be completely set up in the transmit path */
 133}
 134
 135static struct netdev_queue *txring_txq(const struct ice_ring *ring)
 136{
 137        return netdev_get_tx_queue(ring->netdev, ring->q_index);
 138}
 139
 140/**
 141 * ice_clean_tx_ring - Free any empty Tx buffers
 142 * @tx_ring: ring to be cleaned
 143 */
 144void ice_clean_tx_ring(struct ice_ring *tx_ring)
 145{
 146        u16 i;
 147
 148        if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
 149                ice_xsk_clean_xdp_ring(tx_ring);
 150                goto tx_skip_free;
 151        }
 152
 153        /* ring already cleared, nothing to do */
 154        if (!tx_ring->tx_buf)
 155                return;
 156
 157        /* Free all the Tx ring sk_buffs */
 158        for (i = 0; i < tx_ring->count; i++)
 159                ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
 160
 161tx_skip_free:
 162        memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
 163
 164        /* Zero out the descriptor ring */
 165        memset(tx_ring->desc, 0, tx_ring->size);
 166
 167        tx_ring->next_to_use = 0;
 168        tx_ring->next_to_clean = 0;
 169
 170        if (!tx_ring->netdev)
 171                return;
 172
 173        /* cleanup Tx queue statistics */
 174        netdev_tx_reset_queue(txring_txq(tx_ring));
 175}
 176
 177/**
 178 * ice_free_tx_ring - Free Tx resources per queue
 179 * @tx_ring: Tx descriptor ring for a specific queue
 180 *
 181 * Free all transmit software resources
 182 */
 183void ice_free_tx_ring(struct ice_ring *tx_ring)
 184{
 185        ice_clean_tx_ring(tx_ring);
 186        devm_kfree(tx_ring->dev, tx_ring->tx_buf);
 187        tx_ring->tx_buf = NULL;
 188
 189        if (tx_ring->desc) {
 190                dmam_free_coherent(tx_ring->dev, tx_ring->size,
 191                                   tx_ring->desc, tx_ring->dma);
 192                tx_ring->desc = NULL;
 193        }
 194}
 195
 196/**
 197 * ice_clean_tx_irq - Reclaim resources after transmit completes
 198 * @tx_ring: Tx ring to clean
 199 * @napi_budget: Used to determine if we are in netpoll
 200 *
 201 * Returns true if there's any budget left (e.g. the clean is finished)
 202 */
 203static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
 204{
 205        unsigned int total_bytes = 0, total_pkts = 0;
 206        unsigned int budget = ICE_DFLT_IRQ_WORK;
 207        struct ice_vsi *vsi = tx_ring->vsi;
 208        s16 i = tx_ring->next_to_clean;
 209        struct ice_tx_desc *tx_desc;
 210        struct ice_tx_buf *tx_buf;
 211
 212        tx_buf = &tx_ring->tx_buf[i];
 213        tx_desc = ICE_TX_DESC(tx_ring, i);
 214        i -= tx_ring->count;
 215
 216        prefetch(&vsi->state);
 217
 218        do {
 219                struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
 220
 221                /* if next_to_watch is not set then there is no work pending */
 222                if (!eop_desc)
 223                        break;
 224
 225                smp_rmb();      /* prevent any other reads prior to eop_desc */
 226
 227                /* if the descriptor isn't done, no work yet to do */
 228                if (!(eop_desc->cmd_type_offset_bsz &
 229                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
 230                        break;
 231
 232                /* clear next_to_watch to prevent false hangs */
 233                tx_buf->next_to_watch = NULL;
 234
 235                /* update the statistics for this packet */
 236                total_bytes += tx_buf->bytecount;
 237                total_pkts += tx_buf->gso_segs;
 238
 239                if (ice_ring_is_xdp(tx_ring))
 240                        page_frag_free(tx_buf->raw_buf);
 241                else
 242                        /* free the skb */
 243                        napi_consume_skb(tx_buf->skb, napi_budget);
 244
 245                /* unmap skb header data */
 246                dma_unmap_single(tx_ring->dev,
 247                                 dma_unmap_addr(tx_buf, dma),
 248                                 dma_unmap_len(tx_buf, len),
 249                                 DMA_TO_DEVICE);
 250
 251                /* clear tx_buf data */
 252                tx_buf->skb = NULL;
 253                dma_unmap_len_set(tx_buf, len, 0);
 254
 255                /* unmap remaining buffers */
 256                while (tx_desc != eop_desc) {
 257                        tx_buf++;
 258                        tx_desc++;
 259                        i++;
 260                        if (unlikely(!i)) {
 261                                i -= tx_ring->count;
 262                                tx_buf = tx_ring->tx_buf;
 263                                tx_desc = ICE_TX_DESC(tx_ring, 0);
 264                        }
 265
 266                        /* unmap any remaining paged data */
 267                        if (dma_unmap_len(tx_buf, len)) {
 268                                dma_unmap_page(tx_ring->dev,
 269                                               dma_unmap_addr(tx_buf, dma),
 270                                               dma_unmap_len(tx_buf, len),
 271                                               DMA_TO_DEVICE);
 272                                dma_unmap_len_set(tx_buf, len, 0);
 273                        }
 274                }
 275
 276                /* move us one more past the eop_desc for start of next pkt */
 277                tx_buf++;
 278                tx_desc++;
 279                i++;
 280                if (unlikely(!i)) {
 281                        i -= tx_ring->count;
 282                        tx_buf = tx_ring->tx_buf;
 283                        tx_desc = ICE_TX_DESC(tx_ring, 0);
 284                }
 285
 286                prefetch(tx_desc);
 287
 288                /* update budget accounting */
 289                budget--;
 290        } while (likely(budget));
 291
 292        i += tx_ring->count;
 293        tx_ring->next_to_clean = i;
 294
 295        ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
 296
 297        if (ice_ring_is_xdp(tx_ring))
 298                return !!budget;
 299
 300        netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
 301                                  total_bytes);
 302
 303#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
 304        if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
 305                     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 306                /* Make sure that anybody stopping the queue after this
 307                 * sees the new next_to_clean.
 308                 */
 309                smp_mb();
 310                if (__netif_subqueue_stopped(tx_ring->netdev,
 311                                             tx_ring->q_index) &&
 312                    !test_bit(__ICE_DOWN, vsi->state)) {
 313                        netif_wake_subqueue(tx_ring->netdev,
 314                                            tx_ring->q_index);
 315                        ++tx_ring->tx_stats.restart_q;
 316                }
 317        }
 318
 319        return !!budget;
 320}
 321
 322/**
 323 * ice_setup_tx_ring - Allocate the Tx descriptors
 324 * @tx_ring: the Tx ring to set up
 325 *
 326 * Return 0 on success, negative on error
 327 */
 328int ice_setup_tx_ring(struct ice_ring *tx_ring)
 329{
 330        struct device *dev = tx_ring->dev;
 331
 332        if (!dev)
 333                return -ENOMEM;
 334
 335        /* warn if we are about to overwrite the pointer */
 336        WARN_ON(tx_ring->tx_buf);
 337        tx_ring->tx_buf =
 338                devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
 339                             GFP_KERNEL);
 340        if (!tx_ring->tx_buf)
 341                return -ENOMEM;
 342
 343        /* round up to nearest page */
 344        tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
 345                              PAGE_SIZE);
 346        tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
 347                                            GFP_KERNEL);
 348        if (!tx_ring->desc) {
 349                dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 350                        tx_ring->size);
 351                goto err;
 352        }
 353
 354        tx_ring->next_to_use = 0;
 355        tx_ring->next_to_clean = 0;
 356        tx_ring->tx_stats.prev_pkt = -1;
 357        return 0;
 358
 359err:
 360        devm_kfree(dev, tx_ring->tx_buf);
 361        tx_ring->tx_buf = NULL;
 362        return -ENOMEM;
 363}
 364
 365/**
 366 * ice_clean_rx_ring - Free Rx buffers
 367 * @rx_ring: ring to be cleaned
 368 */
 369void ice_clean_rx_ring(struct ice_ring *rx_ring)
 370{
 371        struct device *dev = rx_ring->dev;
 372        u16 i;
 373
 374        /* ring already cleared, nothing to do */
 375        if (!rx_ring->rx_buf)
 376                return;
 377
 378        if (rx_ring->xsk_umem) {
 379                ice_xsk_clean_rx_ring(rx_ring);
 380                goto rx_skip_free;
 381        }
 382
 383        /* Free all the Rx ring sk_buffs */
 384        for (i = 0; i < rx_ring->count; i++) {
 385                struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
 386
 387                if (rx_buf->skb) {
 388                        dev_kfree_skb(rx_buf->skb);
 389                        rx_buf->skb = NULL;
 390                }
 391                if (!rx_buf->page)
 392                        continue;
 393
 394                /* Invalidate cache lines that may have been written to by
 395                 * device so that we avoid corrupting memory.
 396                 */
 397                dma_sync_single_range_for_cpu(dev, rx_buf->dma,
 398                                              rx_buf->page_offset,
 399                                              rx_ring->rx_buf_len,
 400                                              DMA_FROM_DEVICE);
 401
 402                /* free resources associated with mapping */
 403                dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
 404                                     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 405                __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
 406
 407                rx_buf->page = NULL;
 408                rx_buf->page_offset = 0;
 409        }
 410
 411rx_skip_free:
 412        memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
 413
 414        /* Zero out the descriptor ring */
 415        memset(rx_ring->desc, 0, rx_ring->size);
 416
 417        rx_ring->next_to_alloc = 0;
 418        rx_ring->next_to_clean = 0;
 419        rx_ring->next_to_use = 0;
 420}
 421
 422/**
 423 * ice_free_rx_ring - Free Rx resources
 424 * @rx_ring: ring to clean the resources from
 425 *
 426 * Free all receive software resources
 427 */
 428void ice_free_rx_ring(struct ice_ring *rx_ring)
 429{
 430        ice_clean_rx_ring(rx_ring);
 431        if (rx_ring->vsi->type == ICE_VSI_PF)
 432                if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
 433                        xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 434        rx_ring->xdp_prog = NULL;
 435        devm_kfree(rx_ring->dev, rx_ring->rx_buf);
 436        rx_ring->rx_buf = NULL;
 437
 438        if (rx_ring->desc) {
 439                dmam_free_coherent(rx_ring->dev, rx_ring->size,
 440                                   rx_ring->desc, rx_ring->dma);
 441                rx_ring->desc = NULL;
 442        }
 443}
 444
 445/**
 446 * ice_setup_rx_ring - Allocate the Rx descriptors
 447 * @rx_ring: the Rx ring to set up
 448 *
 449 * Return 0 on success, negative on error
 450 */
 451int ice_setup_rx_ring(struct ice_ring *rx_ring)
 452{
 453        struct device *dev = rx_ring->dev;
 454
 455        if (!dev)
 456                return -ENOMEM;
 457
 458        /* warn if we are about to overwrite the pointer */
 459        WARN_ON(rx_ring->rx_buf);
 460        rx_ring->rx_buf =
 461                devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
 462                             GFP_KERNEL);
 463        if (!rx_ring->rx_buf)
 464                return -ENOMEM;
 465
 466        /* round up to nearest page */
 467        rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
 468                              PAGE_SIZE);
 469        rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
 470                                            GFP_KERNEL);
 471        if (!rx_ring->desc) {
 472                dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
 473                        rx_ring->size);
 474                goto err;
 475        }
 476
 477        rx_ring->next_to_use = 0;
 478        rx_ring->next_to_clean = 0;
 479
 480        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
 481                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
 482
 483        if (rx_ring->vsi->type == ICE_VSI_PF &&
 484            !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
 485                if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
 486                                     rx_ring->q_index))
 487                        goto err;
 488        return 0;
 489
 490err:
 491        devm_kfree(dev, rx_ring->rx_buf);
 492        rx_ring->rx_buf = NULL;
 493        return -ENOMEM;
 494}
 495
 496/**
 497 * ice_rx_offset - Return expected offset into page to access data
 498 * @rx_ring: Ring we are requesting offset of
 499 *
 500 * Returns the offset value for ring into the data buffer.
 501 */
 502static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
 503{
 504        if (ice_ring_uses_build_skb(rx_ring))
 505                return ICE_SKB_PAD;
 506        else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
 507                return XDP_PACKET_HEADROOM;
 508
 509        return 0;
 510}
 511
 512static unsigned int
 513ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
 514{
 515        unsigned int truesize;
 516
 517#if (PAGE_SIZE < 8192)
 518        truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
 519#else
 520        truesize = ice_rx_offset(rx_ring) ?
 521                SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
 522                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
 523                SKB_DATA_ALIGN(size);
 524#endif
 525        return truesize;
 526}
 527
 528/**
 529 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
 530 * @rx_ring: Rx ring
 531 * @xdp: xdp_buff used as input to the XDP program
 532 * @xdp_prog: XDP program to run
 533 *
 534 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
 535 */
 536static int
 537ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
 538            struct bpf_prog *xdp_prog)
 539{
 540        int err, result = ICE_XDP_PASS;
 541        struct ice_ring *xdp_ring;
 542        u32 act;
 543
 544        act = bpf_prog_run_xdp(xdp_prog, xdp);
 545        switch (act) {
 546        case XDP_PASS:
 547                break;
 548        case XDP_TX:
 549                xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
 550                result = ice_xmit_xdp_buff(xdp, xdp_ring);
 551                break;
 552        case XDP_REDIRECT:
 553                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
 554                result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
 555                break;
 556        default:
 557                bpf_warn_invalid_xdp_action(act);
 558                fallthrough;
 559        case XDP_ABORTED:
 560                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 561                fallthrough;
 562        case XDP_DROP:
 563                result = ICE_XDP_CONSUMED;
 564                break;
 565        }
 566
 567        return result;
 568}
 569
 570/**
 571 * ice_xdp_xmit - submit packets to XDP ring for transmission
 572 * @dev: netdev
 573 * @n: number of XDP frames to be transmitted
 574 * @frames: XDP frames to be transmitted
 575 * @flags: transmit flags
 576 *
 577 * Returns number of frames successfully sent. Frames that fail are
 578 * free'ed via XDP return API.
 579 * For error cases, a negative errno code is returned and no-frames
 580 * are transmitted (caller must handle freeing frames).
 581 */
 582int
 583ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 584             u32 flags)
 585{
 586        struct ice_netdev_priv *np = netdev_priv(dev);
 587        unsigned int queue_index = smp_processor_id();
 588        struct ice_vsi *vsi = np->vsi;
 589        struct ice_ring *xdp_ring;
 590        int drops = 0, i;
 591
 592        if (test_bit(__ICE_DOWN, vsi->state))
 593                return -ENETDOWN;
 594
 595        if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
 596                return -ENXIO;
 597
 598        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 599                return -EINVAL;
 600
 601        xdp_ring = vsi->xdp_rings[queue_index];
 602        for (i = 0; i < n; i++) {
 603                struct xdp_frame *xdpf = frames[i];
 604                int err;
 605
 606                err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
 607                if (err != ICE_XDP_TX) {
 608                        xdp_return_frame_rx_napi(xdpf);
 609                        drops++;
 610                }
 611        }
 612
 613        if (unlikely(flags & XDP_XMIT_FLUSH))
 614                ice_xdp_ring_update_tail(xdp_ring);
 615
 616        return n - drops;
 617}
 618
 619/**
 620 * ice_alloc_mapped_page - recycle or make a new page
 621 * @rx_ring: ring to use
 622 * @bi: rx_buf struct to modify
 623 *
 624 * Returns true if the page was successfully allocated or
 625 * reused.
 626 */
 627static bool
 628ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
 629{
 630        struct page *page = bi->page;
 631        dma_addr_t dma;
 632
 633        /* since we are recycling buffers we should seldom need to alloc */
 634        if (likely(page))
 635                return true;
 636
 637        /* alloc new page for storage */
 638        page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
 639        if (unlikely(!page)) {
 640                rx_ring->rx_stats.alloc_page_failed++;
 641                return false;
 642        }
 643
 644        /* map page for use */
 645        dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
 646                                 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 647
 648        /* if mapping failed free memory back to system since
 649         * there isn't much point in holding memory we can't use
 650         */
 651        if (dma_mapping_error(rx_ring->dev, dma)) {
 652                __free_pages(page, ice_rx_pg_order(rx_ring));
 653                rx_ring->rx_stats.alloc_page_failed++;
 654                return false;
 655        }
 656
 657        bi->dma = dma;
 658        bi->page = page;
 659        bi->page_offset = ice_rx_offset(rx_ring);
 660        page_ref_add(page, USHRT_MAX - 1);
 661        bi->pagecnt_bias = USHRT_MAX;
 662
 663        return true;
 664}
 665
 666/**
 667 * ice_alloc_rx_bufs - Replace used receive buffers
 668 * @rx_ring: ring to place buffers on
 669 * @cleaned_count: number of buffers to replace
 670 *
 671 * Returns false if all allocations were successful, true if any fail. Returning
 672 * true signals to the caller that we didn't replace cleaned_count buffers and
 673 * there is more work to do.
 674 *
 675 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
 676 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
 677 * multiple tail writes per call.
 678 */
 679bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
 680{
 681        union ice_32b_rx_flex_desc *rx_desc;
 682        u16 ntu = rx_ring->next_to_use;
 683        struct ice_rx_buf *bi;
 684
 685        /* do nothing if no valid netdev defined */
 686        if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
 687            !cleaned_count)
 688                return false;
 689
 690        /* get the Rx descriptor and buffer based on next_to_use */
 691        rx_desc = ICE_RX_DESC(rx_ring, ntu);
 692        bi = &rx_ring->rx_buf[ntu];
 693
 694        do {
 695                /* if we fail here, we have work remaining */
 696                if (!ice_alloc_mapped_page(rx_ring, bi))
 697                        break;
 698
 699                /* sync the buffer for use by the device */
 700                dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
 701                                                 bi->page_offset,
 702                                                 rx_ring->rx_buf_len,
 703                                                 DMA_FROM_DEVICE);
 704
 705                /* Refresh the desc even if buffer_addrs didn't change
 706                 * because each write-back erases this info.
 707                 */
 708                rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 709
 710                rx_desc++;
 711                bi++;
 712                ntu++;
 713                if (unlikely(ntu == rx_ring->count)) {
 714                        rx_desc = ICE_RX_DESC(rx_ring, 0);
 715                        bi = rx_ring->rx_buf;
 716                        ntu = 0;
 717                }
 718
 719                /* clear the status bits for the next_to_use descriptor */
 720                rx_desc->wb.status_error0 = 0;
 721
 722                cleaned_count--;
 723        } while (cleaned_count);
 724
 725        if (rx_ring->next_to_use != ntu)
 726                ice_release_rx_desc(rx_ring, ntu);
 727
 728        return !!cleaned_count;
 729}
 730
 731/**
 732 * ice_page_is_reserved - check if reuse is possible
 733 * @page: page struct to check
 734 */
 735static bool ice_page_is_reserved(struct page *page)
 736{
 737        return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 738}
 739
 740/**
 741 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
 742 * @rx_buf: Rx buffer to adjust
 743 * @size: Size of adjustment
 744 *
 745 * Update the offset within page so that Rx buf will be ready to be reused.
 746 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
 747 * so the second half of page assigned to Rx buffer will be used, otherwise
 748 * the offset is moved by "size" bytes
 749 */
 750static void
 751ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
 752{
 753#if (PAGE_SIZE < 8192)
 754        /* flip page offset to other buffer */
 755        rx_buf->page_offset ^= size;
 756#else
 757        /* move offset up to the next cache line */
 758        rx_buf->page_offset += size;
 759#endif
 760}
 761
 762/**
 763 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
 764 * @rx_buf: buffer containing the page
 765 *
 766 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
 767 * which will assign the current buffer to the buffer that next_to_alloc is
 768 * pointing to; otherwise, the DMA mapping needs to be destroyed and
 769 * page freed
 770 */
 771static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
 772{
 773        unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
 774        struct page *page = rx_buf->page;
 775
 776        /* avoid re-using remote pages */
 777        if (unlikely(ice_page_is_reserved(page)))
 778                return false;
 779
 780#if (PAGE_SIZE < 8192)
 781        /* if we are only owner of page we can reuse it */
 782        if (unlikely((page_count(page) - pagecnt_bias) > 1))
 783                return false;
 784#else
 785#define ICE_LAST_OFFSET \
 786        (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
 787        if (rx_buf->page_offset > ICE_LAST_OFFSET)
 788                return false;
 789#endif /* PAGE_SIZE < 8192) */
 790
 791        /* If we have drained the page fragment pool we need to update
 792         * the pagecnt_bias and page count so that we fully restock the
 793         * number of references the driver holds.
 794         */
 795        if (unlikely(pagecnt_bias == 1)) {
 796                page_ref_add(page, USHRT_MAX - 1);
 797                rx_buf->pagecnt_bias = USHRT_MAX;
 798        }
 799
 800        return true;
 801}
 802
 803/**
 804 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
 805 * @rx_ring: Rx descriptor ring to transact packets on
 806 * @rx_buf: buffer containing page to add
 807 * @skb: sk_buff to place the data into
 808 * @size: packet length from rx_desc
 809 *
 810 * This function will add the data contained in rx_buf->page to the skb.
 811 * It will just attach the page as a frag to the skb.
 812 * The function will then update the page offset.
 813 */
 814static void
 815ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 816                struct sk_buff *skb, unsigned int size)
 817{
 818#if (PAGE_SIZE >= 8192)
 819        unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
 820#else
 821        unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 822#endif
 823
 824        if (!size)
 825                return;
 826        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
 827                        rx_buf->page_offset, size, truesize);
 828
 829        /* page is being used so we must update the page offset */
 830        ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 831}
 832
 833/**
 834 * ice_reuse_rx_page - page flip buffer and store it back on the ring
 835 * @rx_ring: Rx descriptor ring to store buffers on
 836 * @old_buf: donor buffer to have page reused
 837 *
 838 * Synchronizes page for reuse by the adapter
 839 */
 840static void
 841ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
 842{
 843        u16 nta = rx_ring->next_to_alloc;
 844        struct ice_rx_buf *new_buf;
 845
 846        new_buf = &rx_ring->rx_buf[nta];
 847
 848        /* update, and store next to alloc */
 849        nta++;
 850        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 851
 852        /* Transfer page from old buffer to new buffer.
 853         * Move each member individually to avoid possible store
 854         * forwarding stalls and unnecessary copy of skb.
 855         */
 856        new_buf->dma = old_buf->dma;
 857        new_buf->page = old_buf->page;
 858        new_buf->page_offset = old_buf->page_offset;
 859        new_buf->pagecnt_bias = old_buf->pagecnt_bias;
 860}
 861
 862/**
 863 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
 864 * @rx_ring: Rx descriptor ring to transact packets on
 865 * @skb: skb to be used
 866 * @size: size of buffer to add to skb
 867 *
 868 * This function will pull an Rx buffer from the ring and synchronize it
 869 * for use by the CPU.
 870 */
 871static struct ice_rx_buf *
 872ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
 873               const unsigned int size)
 874{
 875        struct ice_rx_buf *rx_buf;
 876
 877        rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
 878        prefetchw(rx_buf->page);
 879        *skb = rx_buf->skb;
 880
 881        if (!size)
 882                return rx_buf;
 883        /* we are reusing so sync this buffer for CPU use */
 884        dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
 885                                      rx_buf->page_offset, size,
 886                                      DMA_FROM_DEVICE);
 887
 888        /* We have pulled a buffer for use, so decrement pagecnt_bias */
 889        rx_buf->pagecnt_bias--;
 890
 891        return rx_buf;
 892}
 893
 894/**
 895 * ice_build_skb - Build skb around an existing buffer
 896 * @rx_ring: Rx descriptor ring to transact packets on
 897 * @rx_buf: Rx buffer to pull data from
 898 * @xdp: xdp_buff pointing to the data
 899 *
 900 * This function builds an skb around an existing Rx buffer, taking care
 901 * to set up the skb correctly and avoid any memcpy overhead.
 902 */
 903static struct sk_buff *
 904ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 905              struct xdp_buff *xdp)
 906{
 907        u8 metasize = xdp->data - xdp->data_meta;
 908#if (PAGE_SIZE < 8192)
 909        unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 910#else
 911        unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
 912                                SKB_DATA_ALIGN(xdp->data_end -
 913                                               xdp->data_hard_start);
 914#endif
 915        struct sk_buff *skb;
 916
 917        /* Prefetch first cache line of first page. If xdp->data_meta
 918         * is unused, this points exactly as xdp->data, otherwise we
 919         * likely have a consumer accessing first few bytes of meta
 920         * data, and then actual data.
 921         */
 922        prefetch(xdp->data_meta);
 923#if L1_CACHE_BYTES < 128
 924        prefetch((void *)(xdp->data + L1_CACHE_BYTES));
 925#endif
 926        /* build an skb around the page buffer */
 927        skb = build_skb(xdp->data_hard_start, truesize);
 928        if (unlikely(!skb))
 929                return NULL;
 930
 931        /* must to record Rx queue, otherwise OS features such as
 932         * symmetric queue won't work
 933         */
 934        skb_record_rx_queue(skb, rx_ring->q_index);
 935
 936        /* update pointers within the skb to store the data */
 937        skb_reserve(skb, xdp->data - xdp->data_hard_start);
 938        __skb_put(skb, xdp->data_end - xdp->data);
 939        if (metasize)
 940                skb_metadata_set(skb, metasize);
 941
 942        /* buffer is used by skb, update page_offset */
 943        ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 944
 945        return skb;
 946}
 947
 948/**
 949 * ice_construct_skb - Allocate skb and populate it
 950 * @rx_ring: Rx descriptor ring to transact packets on
 951 * @rx_buf: Rx buffer to pull data from
 952 * @xdp: xdp_buff pointing to the data
 953 *
 954 * This function allocates an skb. It then populates it with the page
 955 * data from the current receive descriptor, taking care to set up the
 956 * skb correctly.
 957 */
 958static struct sk_buff *
 959ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 960                  struct xdp_buff *xdp)
 961{
 962        unsigned int size = xdp->data_end - xdp->data;
 963        unsigned int headlen;
 964        struct sk_buff *skb;
 965
 966        /* prefetch first cache line of first page */
 967        prefetch(xdp->data);
 968#if L1_CACHE_BYTES < 128
 969        prefetch((void *)(xdp->data + L1_CACHE_BYTES));
 970#endif /* L1_CACHE_BYTES */
 971
 972        /* allocate a skb to store the frags */
 973        skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
 974                               GFP_ATOMIC | __GFP_NOWARN);
 975        if (unlikely(!skb))
 976                return NULL;
 977
 978        skb_record_rx_queue(skb, rx_ring->q_index);
 979        /* Determine available headroom for copy */
 980        headlen = size;
 981        if (headlen > ICE_RX_HDR_SIZE)
 982                headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
 983
 984        /* align pull length to size of long to optimize memcpy performance */
 985        memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
 986                                                         sizeof(long)));
 987
 988        /* if we exhaust the linear part then add what is left as a frag */
 989        size -= headlen;
 990        if (size) {
 991#if (PAGE_SIZE >= 8192)
 992                unsigned int truesize = SKB_DATA_ALIGN(size);
 993#else
 994                unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 995#endif
 996                skb_add_rx_frag(skb, 0, rx_buf->page,
 997                                rx_buf->page_offset + headlen, size, truesize);
 998                /* buffer is used by skb, update page_offset */
 999                ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1000        } else {
1001                /* buffer is unused, reset bias back to rx_buf; data was copied
1002                 * onto skb's linear part so there's no need for adjusting
1003                 * page offset and we can reuse this buffer as-is
1004                 */
1005                rx_buf->pagecnt_bias++;
1006        }
1007
1008        return skb;
1009}
1010
1011/**
1012 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1013 * @rx_ring: Rx descriptor ring to transact packets on
1014 * @rx_buf: Rx buffer to pull data from
1015 *
1016 * This function will update next_to_clean and then clean up the contents
1017 * of the rx_buf. It will either recycle the buffer or unmap it and free
1018 * the associated resources.
1019 */
1020static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
1021{
1022        u16 ntc = rx_ring->next_to_clean + 1;
1023
1024        /* fetch, update, and store next to clean */
1025        ntc = (ntc < rx_ring->count) ? ntc : 0;
1026        rx_ring->next_to_clean = ntc;
1027
1028        if (!rx_buf)
1029                return;
1030
1031        if (ice_can_reuse_rx_page(rx_buf)) {
1032                /* hand second half of page back to the ring */
1033                ice_reuse_rx_page(rx_ring, rx_buf);
1034        } else {
1035                /* we are not reusing the buffer so unmap it */
1036                dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1037                                     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1038                                     ICE_RX_DMA_ATTR);
1039                __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1040        }
1041
1042        /* clear contents of buffer_info */
1043        rx_buf->page = NULL;
1044        rx_buf->skb = NULL;
1045}
1046
1047/**
1048 * ice_is_non_eop - process handling of non-EOP buffers
1049 * @rx_ring: Rx ring being processed
1050 * @rx_desc: Rx descriptor for current buffer
1051 * @skb: Current socket buffer containing buffer in progress
1052 *
1053 * If the buffer is an EOP buffer, this function exits returning false,
1054 * otherwise return true indicating that this is in fact a non-EOP buffer.
1055 */
1056static bool
1057ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
1058               struct sk_buff *skb)
1059{
1060        /* if we are the last buffer then there is nothing else to do */
1061#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1062        if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1063                return false;
1064
1065        /* place skb in next buffer to be received */
1066        rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
1067        rx_ring->rx_stats.non_eop_descs++;
1068
1069        return true;
1070}
1071
1072/**
1073 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1074 * @rx_ring: Rx descriptor ring to transact packets on
1075 * @budget: Total limit on number of packets to process
1076 *
1077 * This function provides a "bounce buffer" approach to Rx interrupt
1078 * processing. The advantage to this is that on systems that have
1079 * expensive overhead for IOMMU access this provides a means of avoiding
1080 * it by maintaining the mapping of the page to the system.
1081 *
1082 * Returns amount of work completed
1083 */
1084int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1085{
1086        unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1087        u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1088        unsigned int xdp_res, xdp_xmit = 0;
1089        struct bpf_prog *xdp_prog = NULL;
1090        struct xdp_buff xdp;
1091        bool failure;
1092
1093        xdp.rxq = &rx_ring->xdp_rxq;
1094        /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1095#if (PAGE_SIZE < 8192)
1096        xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1097#endif
1098
1099        /* start the loop to process Rx packets bounded by 'budget' */
1100        while (likely(total_rx_pkts < (unsigned int)budget)) {
1101                union ice_32b_rx_flex_desc *rx_desc;
1102                struct ice_rx_buf *rx_buf;
1103                struct sk_buff *skb;
1104                unsigned int size;
1105                u16 stat_err_bits;
1106                u16 vlan_tag = 0;
1107                u8 rx_ptype;
1108
1109                /* get the Rx desc from Rx ring based on 'next_to_clean' */
1110                rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1111
1112                /* status_error_len will always be zero for unused descriptors
1113                 * because it's cleared in cleanup, and overlaps with hdr_addr
1114                 * which is always zero because packet split isn't used, if the
1115                 * hardware wrote DD then it will be non-zero
1116                 */
1117                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1118                if (!ice_test_staterr(rx_desc, stat_err_bits))
1119                        break;
1120
1121                /* This memory barrier is needed to keep us from reading
1122                 * any other fields out of the rx_desc until we know the
1123                 * DD bit is set.
1124                 */
1125                dma_rmb();
1126
1127                if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1128                        ice_put_rx_buf(rx_ring, NULL);
1129                        cleaned_count++;
1130                        continue;
1131                }
1132
1133                size = le16_to_cpu(rx_desc->wb.pkt_len) &
1134                        ICE_RX_FLX_DESC_PKT_LEN_M;
1135
1136                /* retrieve a buffer from the ring */
1137                rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1138
1139                if (!size) {
1140                        xdp.data = NULL;
1141                        xdp.data_end = NULL;
1142                        xdp.data_hard_start = NULL;
1143                        xdp.data_meta = NULL;
1144                        goto construct_skb;
1145                }
1146
1147                xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
1148                xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
1149                xdp.data_meta = xdp.data;
1150                xdp.data_end = xdp.data + size;
1151#if (PAGE_SIZE > 4096)
1152                /* At larger PAGE_SIZE, frame_sz depend on len size */
1153                xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1154#endif
1155
1156                rcu_read_lock();
1157                xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1158                if (!xdp_prog) {
1159                        rcu_read_unlock();
1160                        goto construct_skb;
1161                }
1162
1163                xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1164                rcu_read_unlock();
1165                if (!xdp_res)
1166                        goto construct_skb;
1167                if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1168                        xdp_xmit |= xdp_res;
1169                        ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1170                } else {
1171                        rx_buf->pagecnt_bias++;
1172                }
1173                total_rx_bytes += size;
1174                total_rx_pkts++;
1175
1176                cleaned_count++;
1177                ice_put_rx_buf(rx_ring, rx_buf);
1178                continue;
1179construct_skb:
1180                if (skb) {
1181                        ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1182                } else if (likely(xdp.data)) {
1183                        if (ice_ring_uses_build_skb(rx_ring))
1184                                skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1185                        else
1186                                skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1187                }
1188                /* exit if we failed to retrieve a buffer */
1189                if (!skb) {
1190                        rx_ring->rx_stats.alloc_buf_failed++;
1191                        if (rx_buf)
1192                                rx_buf->pagecnt_bias++;
1193                        break;
1194                }
1195
1196                ice_put_rx_buf(rx_ring, rx_buf);
1197                cleaned_count++;
1198
1199                /* skip if it is NOP desc */
1200                if (ice_is_non_eop(rx_ring, rx_desc, skb))
1201                        continue;
1202
1203                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1204                if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1205                        dev_kfree_skb_any(skb);
1206                        continue;
1207                }
1208
1209                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1210                if (ice_test_staterr(rx_desc, stat_err_bits))
1211                        vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1212
1213                /* pad the skb if needed, to make a valid ethernet frame */
1214                if (eth_skb_pad(skb)) {
1215                        skb = NULL;
1216                        continue;
1217                }
1218
1219                /* probably a little skewed due to removing CRC */
1220                total_rx_bytes += skb->len;
1221
1222                /* populate checksum, VLAN, and protocol */
1223                rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1224                        ICE_RX_FLEX_DESC_PTYPE_M;
1225
1226                ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1227
1228                /* send completed skb up the stack */
1229                ice_receive_skb(rx_ring, skb, vlan_tag);
1230
1231                /* update budget accounting */
1232                total_rx_pkts++;
1233        }
1234
1235        /* return up to cleaned_count buffers to hardware */
1236        failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1237
1238        if (xdp_prog)
1239                ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1240
1241        ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1242
1243        /* guarantee a trip back through this routine if there was a failure */
1244        return failure ? budget : (int)total_rx_pkts;
1245}
1246
1247/**
1248 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1249 * @port_info: port_info structure containing the current link speed
1250 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1251 * @itr: ITR value to update
1252 *
1253 * Calculate how big of an increment should be applied to the ITR value passed
1254 * in based on wmem_default, SKB overhead, ethernet overhead, and the current
1255 * link speed.
1256 *
1257 * The following is a calculation derived from:
1258 *  wmem_default / (size + overhead) = desired_pkts_per_int
1259 *  rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1260 *  (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1261 *
1262 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1263 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1264 * formula down to:
1265 *
1266 *       wmem_default * bits_per_byte * usecs_per_sec   pkt_size + 24
1267 * ITR = -------------------------------------------- * --------------
1268 *                           rate                       pkt_size + 640
1269 */
1270static unsigned int
1271ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1272                                 unsigned int avg_pkt_size,
1273                                 unsigned int itr)
1274{
1275        switch (port_info->phy.link_info.link_speed) {
1276        case ICE_AQ_LINK_SPEED_100GB:
1277                itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1278                                    avg_pkt_size + 640);
1279                break;
1280        case ICE_AQ_LINK_SPEED_50GB:
1281                itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1282                                    avg_pkt_size + 640);
1283                break;
1284        case ICE_AQ_LINK_SPEED_40GB:
1285                itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1286                                    avg_pkt_size + 640);
1287                break;
1288        case ICE_AQ_LINK_SPEED_25GB:
1289                itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1290                                    avg_pkt_size + 640);
1291                break;
1292        case ICE_AQ_LINK_SPEED_20GB:
1293                itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1294                                    avg_pkt_size + 640);
1295                break;
1296        case ICE_AQ_LINK_SPEED_10GB:
1297        default:
1298                itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1299                                    avg_pkt_size + 640);
1300                break;
1301        }
1302
1303        if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1304                itr &= ICE_ITR_ADAPTIVE_LATENCY;
1305                itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1306        }
1307
1308        return itr;
1309}
1310
1311/**
1312 * ice_update_itr - update the adaptive ITR value based on statistics
1313 * @q_vector: structure containing interrupt and ring information
1314 * @rc: structure containing ring performance data
1315 *
1316 * Stores a new ITR value based on packets and byte
1317 * counts during the last interrupt.  The advantage of per interrupt
1318 * computation is faster updates and more accurate ITR for the current
1319 * traffic pattern.  Constants in this function were computed
1320 * based on theoretical maximum wire speed and thresholds were set based
1321 * on testing data as well as attempting to minimize response time
1322 * while increasing bulk throughput.
1323 */
1324static void
1325ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1326{
1327        unsigned long next_update = jiffies;
1328        unsigned int packets, bytes, itr;
1329        bool container_is_rx;
1330
1331        if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1332                return;
1333
1334        /* If itr_countdown is set it means we programmed an ITR within
1335         * the last 4 interrupt cycles. This has a side effect of us
1336         * potentially firing an early interrupt. In order to work around
1337         * this we need to throw out any data received for a few
1338         * interrupts following the update.
1339         */
1340        if (q_vector->itr_countdown) {
1341                itr = rc->target_itr;
1342                goto clear_counts;
1343        }
1344
1345        container_is_rx = (&q_vector->rx == rc);
1346        /* For Rx we want to push the delay up and default to low latency.
1347         * for Tx we want to pull the delay down and default to high latency.
1348         */
1349        itr = container_is_rx ?
1350                ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1351                ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1352
1353        /* If we didn't update within up to 1 - 2 jiffies we can assume
1354         * that either packets are coming in so slow there hasn't been
1355         * any work, or that there is so much work that NAPI is dealing
1356         * with interrupt moderation and we don't need to do anything.
1357         */
1358        if (time_after(next_update, rc->next_update))
1359                goto clear_counts;
1360
1361        prefetch(q_vector->vsi->port_info);
1362
1363        packets = rc->total_pkts;
1364        bytes = rc->total_bytes;
1365
1366        if (container_is_rx) {
1367                /* If Rx there are 1 to 4 packets and bytes are less than
1368                 * 9000 assume insufficient data to use bulk rate limiting
1369                 * approach unless Tx is already in bulk rate limiting. We
1370                 * are likely latency driven.
1371                 */
1372                if (packets && packets < 4 && bytes < 9000 &&
1373                    (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1374                        itr = ICE_ITR_ADAPTIVE_LATENCY;
1375                        goto adjust_by_size_and_speed;
1376                }
1377        } else if (packets < 4) {
1378                /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1379                 * bulk mode and we are receiving 4 or fewer packets just
1380                 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1381                 * that the Rx can relax.
1382                 */
1383                if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1384                    (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1385                    ICE_ITR_ADAPTIVE_MAX_USECS)
1386                        goto clear_counts;
1387        } else if (packets > 32) {
1388                /* If we have processed over 32 packets in a single interrupt
1389                 * for Tx assume we need to switch over to "bulk" mode.
1390                 */
1391                rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1392        }
1393
1394        /* We have no packets to actually measure against. This means
1395         * either one of the other queues on this vector is active or
1396         * we are a Tx queue doing TSO with too high of an interrupt rate.
1397         *
1398         * Between 4 and 56 we can assume that our current interrupt delay
1399         * is only slightly too low. As such we should increase it by a small
1400         * fixed amount.
1401         */
1402        if (packets < 56) {
1403                itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1404                if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1405                        itr &= ICE_ITR_ADAPTIVE_LATENCY;
1406                        itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1407                }
1408                goto clear_counts;
1409        }
1410
1411        if (packets <= 256) {
1412                itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1413                itr &= ICE_ITR_MASK;
1414
1415                /* Between 56 and 112 is our "goldilocks" zone where we are
1416                 * working out "just right". Just report that our current
1417                 * ITR is good for us.
1418                 */
1419                if (packets <= 112)
1420                        goto clear_counts;
1421
1422                /* If packet count is 128 or greater we are likely looking
1423                 * at a slight overrun of the delay we want. Try halving
1424                 * our delay to see if that will cut the number of packets
1425                 * in half per interrupt.
1426                 */
1427                itr >>= 1;
1428                itr &= ICE_ITR_MASK;
1429                if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1430                        itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1431
1432                goto clear_counts;
1433        }
1434
1435        /* The paths below assume we are dealing with a bulk ITR since
1436         * number of packets is greater than 256. We are just going to have
1437         * to compute a value and try to bring the count under control,
1438         * though for smaller packet sizes there isn't much we can do as
1439         * NAPI polling will likely be kicking in sooner rather than later.
1440         */
1441        itr = ICE_ITR_ADAPTIVE_BULK;
1442
1443adjust_by_size_and_speed:
1444
1445        /* based on checks above packets cannot be 0 so division is safe */
1446        itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1447                                               bytes / packets, itr);
1448
1449clear_counts:
1450        /* write back value */
1451        rc->target_itr = itr;
1452
1453        /* next update should occur within next jiffy */
1454        rc->next_update = next_update + 1;
1455
1456        rc->total_bytes = 0;
1457        rc->total_pkts = 0;
1458}
1459
1460/**
1461 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1462 * @itr_idx: interrupt throttling index
1463 * @itr: interrupt throttling value in usecs
1464 */
1465static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1466{
1467        /* The ITR value is reported in microseconds, and the register value is
1468         * recorded in 2 microsecond units. For this reason we only need to
1469         * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1470         * granularity as a shift instead of division. The mask makes sure the
1471         * ITR value is never odd so we don't accidentally write into the field
1472         * prior to the ITR field.
1473         */
1474        itr &= ICE_ITR_MASK;
1475
1476        return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1477                (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1478                (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1479}
1480
1481/* The act of updating the ITR will cause it to immediately trigger. In order
1482 * to prevent this from throwing off adaptive update statistics we defer the
1483 * update so that it can only happen so often. So after either Tx or Rx are
1484 * updated we make the adaptive scheme wait until either the ITR completely
1485 * expires via the next_update expiration or we have been through at least
1486 * 3 interrupts.
1487 */
1488#define ITR_COUNTDOWN_START 3
1489
1490/**
1491 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1492 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1493 */
1494static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1495{
1496        struct ice_ring_container *tx = &q_vector->tx;
1497        struct ice_ring_container *rx = &q_vector->rx;
1498        struct ice_vsi *vsi = q_vector->vsi;
1499        u32 itr_val;
1500
1501        /* when exiting WB_ON_ITR lets set a low ITR value and trigger
1502         * interrupts to expire right away in case we have more work ready to go
1503         * already
1504         */
1505        if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1506                itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1507                wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1508                /* set target back to last user set value */
1509                rx->target_itr = rx->itr_setting;
1510                /* set current to what we just wrote and dynamic if needed */
1511                rx->current_itr = ICE_WB_ON_ITR_USECS |
1512                        (rx->itr_setting & ICE_ITR_DYNAMIC);
1513                /* allow normal interrupt flow to start */
1514                q_vector->itr_countdown = 0;
1515                return;
1516        }
1517
1518        /* This will do nothing if dynamic updates are not enabled */
1519        ice_update_itr(q_vector, tx);
1520        ice_update_itr(q_vector, rx);
1521
1522        /* This block of logic allows us to get away with only updating
1523         * one ITR value with each interrupt. The idea is to perform a
1524         * pseudo-lazy update with the following criteria.
1525         *
1526         * 1. Rx is given higher priority than Tx if both are in same state
1527         * 2. If we must reduce an ITR that is given highest priority.
1528         * 3. We then give priority to increasing ITR based on amount.
1529         */
1530        if (rx->target_itr < rx->current_itr) {
1531                /* Rx ITR needs to be reduced, this is highest priority */
1532                itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1533                rx->current_itr = rx->target_itr;
1534                q_vector->itr_countdown = ITR_COUNTDOWN_START;
1535        } else if ((tx->target_itr < tx->current_itr) ||
1536                   ((rx->target_itr - rx->current_itr) <
1537                    (tx->target_itr - tx->current_itr))) {
1538                /* Tx ITR needs to be reduced, this is second priority
1539                 * Tx ITR needs to be increased more than Rx, fourth priority
1540                 */
1541                itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1542                tx->current_itr = tx->target_itr;
1543                q_vector->itr_countdown = ITR_COUNTDOWN_START;
1544        } else if (rx->current_itr != rx->target_itr) {
1545                /* Rx ITR needs to be increased, third priority */
1546                itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1547                rx->current_itr = rx->target_itr;
1548                q_vector->itr_countdown = ITR_COUNTDOWN_START;
1549        } else {
1550                /* Still have to re-enable the interrupts */
1551                itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1552                if (q_vector->itr_countdown)
1553                        q_vector->itr_countdown--;
1554        }
1555
1556        if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1557                wr32(&q_vector->vsi->back->hw,
1558                     GLINT_DYN_CTL(q_vector->reg_idx),
1559                     itr_val);
1560}
1561
1562/**
1563 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1564 * @q_vector: q_vector to set WB_ON_ITR on
1565 *
1566 * We need to tell hardware to write-back completed descriptors even when
1567 * interrupts are disabled. Descriptors will be written back on cache line
1568 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1569 * descriptors may not be written back if they don't fill a cache line until the
1570 * next interrupt.
1571 *
1572 * This sets the write-back frequency to 2 microseconds as that is the minimum
1573 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1574 * make sure hardware knows we aren't meddling with the INTENA_M bit.
1575 */
1576static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1577{
1578        struct ice_vsi *vsi = q_vector->vsi;
1579
1580        /* already in WB_ON_ITR mode no need to change it */
1581        if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1582                return;
1583
1584        if (q_vector->num_ring_rx)
1585                wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1586                     ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1587                                                 ICE_RX_ITR));
1588
1589        if (q_vector->num_ring_tx)
1590                wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1591                     ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1592                                                 ICE_TX_ITR));
1593
1594        q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1595}
1596
1597/**
1598 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1599 * @napi: napi struct with our devices info in it
1600 * @budget: amount of work driver is allowed to do this pass, in packets
1601 *
1602 * This function will clean all queues associated with a q_vector.
1603 *
1604 * Returns the amount of work done
1605 */
1606int ice_napi_poll(struct napi_struct *napi, int budget)
1607{
1608        struct ice_q_vector *q_vector =
1609                                container_of(napi, struct ice_q_vector, napi);
1610        bool clean_complete = true;
1611        struct ice_ring *ring;
1612        int budget_per_ring;
1613        int work_done = 0;
1614
1615        /* Since the actual Tx work is minimal, we can give the Tx a larger
1616         * budget and be more aggressive about cleaning up the Tx descriptors.
1617         */
1618        ice_for_each_ring(ring, q_vector->tx) {
1619                bool wd = ring->xsk_umem ?
1620                          ice_clean_tx_irq_zc(ring, budget) :
1621                          ice_clean_tx_irq(ring, budget);
1622
1623                if (!wd)
1624                        clean_complete = false;
1625        }
1626
1627        /* Handle case where we are called by netpoll with a budget of 0 */
1628        if (unlikely(budget <= 0))
1629                return budget;
1630
1631        /* normally we have 1 Rx ring per q_vector */
1632        if (unlikely(q_vector->num_ring_rx > 1))
1633                /* We attempt to distribute budget to each Rx queue fairly, but
1634                 * don't allow the budget to go below 1 because that would exit
1635                 * polling early.
1636                 */
1637                budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1638        else
1639                /* Max of 1 Rx ring in this q_vector so give it the budget */
1640                budget_per_ring = budget;
1641
1642        ice_for_each_ring(ring, q_vector->rx) {
1643                int cleaned;
1644
1645                /* A dedicated path for zero-copy allows making a single
1646                 * comparison in the irq context instead of many inside the
1647                 * ice_clean_rx_irq function and makes the codebase cleaner.
1648                 */
1649                cleaned = ring->xsk_umem ?
1650                          ice_clean_rx_irq_zc(ring, budget_per_ring) :
1651                          ice_clean_rx_irq(ring, budget_per_ring);
1652                work_done += cleaned;
1653                /* if we clean as many as budgeted, we must not be done */
1654                if (cleaned >= budget_per_ring)
1655                        clean_complete = false;
1656        }
1657
1658        /* If work not completed, return budget and polling will return */
1659        if (!clean_complete)
1660                return budget;
1661
1662        /* Exit the polling mode, but don't re-enable interrupts if stack might
1663         * poll us due to busy-polling
1664         */
1665        if (likely(napi_complete_done(napi, work_done)))
1666                ice_update_ena_itr(q_vector);
1667        else
1668                ice_set_wb_on_itr(q_vector);
1669
1670        return min_t(int, work_done, budget - 1);
1671}
1672
1673/**
1674 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1675 * @tx_ring: the ring to be checked
1676 * @size: the size buffer we want to assure is available
1677 *
1678 * Returns -EBUSY if a stop is needed, else 0
1679 */
1680static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1681{
1682        netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1683        /* Memory barrier before checking head and tail */
1684        smp_mb();
1685
1686        /* Check again in a case another CPU has just made room available. */
1687        if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1688                return -EBUSY;
1689
1690        /* A reprieve! - use start_subqueue because it doesn't call schedule */
1691        netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1692        ++tx_ring->tx_stats.restart_q;
1693        return 0;
1694}
1695
1696/**
1697 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1698 * @tx_ring: the ring to be checked
1699 * @size:    the size buffer we want to assure is available
1700 *
1701 * Returns 0 if stop is not needed
1702 */
1703static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1704{
1705        if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1706                return 0;
1707
1708        return __ice_maybe_stop_tx(tx_ring, size);
1709}
1710
1711/**
1712 * ice_tx_map - Build the Tx descriptor
1713 * @tx_ring: ring to send buffer on
1714 * @first: first buffer info buffer to use
1715 * @off: pointer to struct that holds offload parameters
1716 *
1717 * This function loops over the skb data pointed to by *first
1718 * and gets a physical address for each memory location and programs
1719 * it and the length into the transmit descriptor.
1720 */
1721static void
1722ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1723           struct ice_tx_offload_params *off)
1724{
1725        u64 td_offset, td_tag, td_cmd;
1726        u16 i = tx_ring->next_to_use;
1727        unsigned int data_len, size;
1728        struct ice_tx_desc *tx_desc;
1729        struct ice_tx_buf *tx_buf;
1730        struct sk_buff *skb;
1731        skb_frag_t *frag;
1732        dma_addr_t dma;
1733
1734        td_tag = off->td_l2tag1;
1735        td_cmd = off->td_cmd;
1736        td_offset = off->td_offset;
1737        skb = first->skb;
1738
1739        data_len = skb->data_len;
1740        size = skb_headlen(skb);
1741
1742        tx_desc = ICE_TX_DESC(tx_ring, i);
1743
1744        if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1745                td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1746                td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1747                          ICE_TX_FLAGS_VLAN_S;
1748        }
1749
1750        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1751
1752        tx_buf = first;
1753
1754        for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1755                unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1756
1757                if (dma_mapping_error(tx_ring->dev, dma))
1758                        goto dma_error;
1759
1760                /* record length, and DMA address */
1761                dma_unmap_len_set(tx_buf, len, size);
1762                dma_unmap_addr_set(tx_buf, dma, dma);
1763
1764                /* align size to end of page */
1765                max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1766                tx_desc->buf_addr = cpu_to_le64(dma);
1767
1768                /* account for data chunks larger than the hardware
1769                 * can handle
1770                 */
1771                while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1772                        tx_desc->cmd_type_offset_bsz =
1773                                ice_build_ctob(td_cmd, td_offset, max_data,
1774                                               td_tag);
1775
1776                        tx_desc++;
1777                        i++;
1778
1779                        if (i == tx_ring->count) {
1780                                tx_desc = ICE_TX_DESC(tx_ring, 0);
1781                                i = 0;
1782                        }
1783
1784                        dma += max_data;
1785                        size -= max_data;
1786
1787                        max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1788                        tx_desc->buf_addr = cpu_to_le64(dma);
1789                }
1790
1791                if (likely(!data_len))
1792                        break;
1793
1794                tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1795                                                              size, td_tag);
1796
1797                tx_desc++;
1798                i++;
1799
1800                if (i == tx_ring->count) {
1801                        tx_desc = ICE_TX_DESC(tx_ring, 0);
1802                        i = 0;
1803                }
1804
1805                size = skb_frag_size(frag);
1806                data_len -= size;
1807
1808                dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1809                                       DMA_TO_DEVICE);
1810
1811                tx_buf = &tx_ring->tx_buf[i];
1812        }
1813
1814        /* record bytecount for BQL */
1815        netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1816
1817        /* record SW timestamp if HW timestamp is not available */
1818        skb_tx_timestamp(first->skb);
1819
1820        i++;
1821        if (i == tx_ring->count)
1822                i = 0;
1823
1824        /* write last descriptor with RS and EOP bits */
1825        td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1826        tx_desc->cmd_type_offset_bsz =
1827                        ice_build_ctob(td_cmd, td_offset, size, td_tag);
1828
1829        /* Force memory writes to complete before letting h/w know there
1830         * are new descriptors to fetch.
1831         *
1832         * We also use this memory barrier to make certain all of the
1833         * status bits have been updated before next_to_watch is written.
1834         */
1835        wmb();
1836
1837        /* set next_to_watch value indicating a packet is present */
1838        first->next_to_watch = tx_desc;
1839
1840        tx_ring->next_to_use = i;
1841
1842        ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1843
1844        /* notify HW of packet */
1845        if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1846                writel(i, tx_ring->tail);
1847
1848        return;
1849
1850dma_error:
1851        /* clear DMA mappings for failed tx_buf map */
1852        for (;;) {
1853                tx_buf = &tx_ring->tx_buf[i];
1854                ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1855                if (tx_buf == first)
1856                        break;
1857                if (i == 0)
1858                        i = tx_ring->count;
1859                i--;
1860        }
1861
1862        tx_ring->next_to_use = i;
1863}
1864
1865/**
1866 * ice_tx_csum - Enable Tx checksum offloads
1867 * @first: pointer to the first descriptor
1868 * @off: pointer to struct that holds offload parameters
1869 *
1870 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1871 */
1872static
1873int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1874{
1875        u32 l4_len = 0, l3_len = 0, l2_len = 0;
1876        struct sk_buff *skb = first->skb;
1877        union {
1878                struct iphdr *v4;
1879                struct ipv6hdr *v6;
1880                unsigned char *hdr;
1881        } ip;
1882        union {
1883                struct tcphdr *tcp;
1884                unsigned char *hdr;
1885        } l4;
1886        __be16 frag_off, protocol;
1887        unsigned char *exthdr;
1888        u32 offset, cmd = 0;
1889        u8 l4_proto = 0;
1890
1891        if (skb->ip_summed != CHECKSUM_PARTIAL)
1892                return 0;
1893
1894        ip.hdr = skb_network_header(skb);
1895        l4.hdr = skb_transport_header(skb);
1896
1897        /* compute outer L2 header size */
1898        l2_len = ip.hdr - skb->data;
1899        offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1900
1901        protocol = vlan_get_protocol(skb);
1902
1903        if (protocol == htons(ETH_P_IP))
1904                first->tx_flags |= ICE_TX_FLAGS_IPV4;
1905        else if (protocol == htons(ETH_P_IPV6))
1906                first->tx_flags |= ICE_TX_FLAGS_IPV6;
1907
1908        if (skb->encapsulation) {
1909                bool gso_ena = false;
1910                u32 tunnel = 0;
1911
1912                /* define outer network header type */
1913                if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1914                        tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1915                                  ICE_TX_CTX_EIPT_IPV4 :
1916                                  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1917                        l4_proto = ip.v4->protocol;
1918                } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1919                        tunnel |= ICE_TX_CTX_EIPT_IPV6;
1920                        exthdr = ip.hdr + sizeof(*ip.v6);
1921                        l4_proto = ip.v6->nexthdr;
1922                        if (l4.hdr != exthdr)
1923                                ipv6_skip_exthdr(skb, exthdr - skb->data,
1924                                                 &l4_proto, &frag_off);
1925                }
1926
1927                /* define outer transport */
1928                switch (l4_proto) {
1929                case IPPROTO_UDP:
1930                        tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1931                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1932                        break;
1933                case IPPROTO_GRE:
1934                        tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1935                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1936                        break;
1937                case IPPROTO_IPIP:
1938                case IPPROTO_IPV6:
1939                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1940                        l4.hdr = skb_inner_network_header(skb);
1941                        break;
1942                default:
1943                        if (first->tx_flags & ICE_TX_FLAGS_TSO)
1944                                return -1;
1945
1946                        skb_checksum_help(skb);
1947                        return 0;
1948                }
1949
1950                /* compute outer L3 header size */
1951                tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1952                          ICE_TXD_CTX_QW0_EIPLEN_S;
1953
1954                /* switch IP header pointer from outer to inner header */
1955                ip.hdr = skb_inner_network_header(skb);
1956
1957                /* compute tunnel header size */
1958                tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1959                           ICE_TXD_CTX_QW0_NATLEN_S;
1960
1961                gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1962                /* indicate if we need to offload outer UDP header */
1963                if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1964                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1965                        tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1966
1967                /* record tunnel offload values */
1968                off->cd_tunnel_params |= tunnel;
1969
1970                /* set DTYP=1 to indicate that it's an Tx context descriptor
1971                 * in IPsec tunnel mode with Tx offloads in Quad word 1
1972                 */
1973                off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1974
1975                /* switch L4 header pointer from outer to inner */
1976                l4.hdr = skb_inner_transport_header(skb);
1977                l4_proto = 0;
1978
1979                /* reset type as we transition from outer to inner headers */
1980                first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1981                if (ip.v4->version == 4)
1982                        first->tx_flags |= ICE_TX_FLAGS_IPV4;
1983                if (ip.v6->version == 6)
1984                        first->tx_flags |= ICE_TX_FLAGS_IPV6;
1985        }
1986
1987        /* Enable IP checksum offloads */
1988        if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1989                l4_proto = ip.v4->protocol;
1990                /* the stack computes the IP header already, the only time we
1991                 * need the hardware to recompute it is in the case of TSO.
1992                 */
1993                if (first->tx_flags & ICE_TX_FLAGS_TSO)
1994                        cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1995                else
1996                        cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1997
1998        } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1999                cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
2000                exthdr = ip.hdr + sizeof(*ip.v6);
2001                l4_proto = ip.v6->nexthdr;
2002                if (l4.hdr != exthdr)
2003                        ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
2004                                         &frag_off);
2005        } else {
2006                return -1;
2007        }
2008
2009        /* compute inner L3 header size */
2010        l3_len = l4.hdr - ip.hdr;
2011        offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
2012
2013        /* Enable L4 checksum offloads */
2014        switch (l4_proto) {
2015        case IPPROTO_TCP:
2016                /* enable checksum offloads */
2017                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2018                l4_len = l4.tcp->doff;
2019                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2020                break;
2021        case IPPROTO_UDP:
2022                /* enable UDP checksum offload */
2023                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2024                l4_len = (sizeof(struct udphdr) >> 2);
2025                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2026                break;
2027        case IPPROTO_SCTP:
2028                /* enable SCTP checksum offload */
2029                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2030                l4_len = sizeof(struct sctphdr) >> 2;
2031                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2032                break;
2033
2034        default:
2035                if (first->tx_flags & ICE_TX_FLAGS_TSO)
2036                        return -1;
2037                skb_checksum_help(skb);
2038                return 0;
2039        }
2040
2041        off->td_cmd |= cmd;
2042        off->td_offset |= offset;
2043        return 1;
2044}
2045
2046/**
2047 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2048 * @tx_ring: ring to send buffer on
2049 * @first: pointer to struct ice_tx_buf
2050 *
2051 * Checks the skb and set up correspondingly several generic transmit flags
2052 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2053 */
2054static void
2055ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
2056{
2057        struct sk_buff *skb = first->skb;
2058
2059        /* nothing left to do, software offloaded VLAN */
2060        if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2061                return;
2062
2063        /* currently, we always assume 802.1Q for VLAN insertion as VLAN
2064         * insertion for 802.1AD is not supported
2065         */
2066        if (skb_vlan_tag_present(skb)) {
2067                first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
2068                first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2069        }
2070
2071        ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2072}
2073
2074/**
2075 * ice_tso - computes mss and TSO length to prepare for TSO
2076 * @first: pointer to struct ice_tx_buf
2077 * @off: pointer to struct that holds offload parameters
2078 *
2079 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
2080 */
2081static
2082int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2083{
2084        struct sk_buff *skb = first->skb;
2085        union {
2086                struct iphdr *v4;
2087                struct ipv6hdr *v6;
2088                unsigned char *hdr;
2089        } ip;
2090        union {
2091                struct tcphdr *tcp;
2092                struct udphdr *udp;
2093                unsigned char *hdr;
2094        } l4;
2095        u64 cd_mss, cd_tso_len;
2096        u32 paylen;
2097        u8 l4_start;
2098        int err;
2099
2100        if (skb->ip_summed != CHECKSUM_PARTIAL)
2101                return 0;
2102
2103        if (!skb_is_gso(skb))
2104                return 0;
2105
2106        err = skb_cow_head(skb, 0);
2107        if (err < 0)
2108                return err;
2109
2110        /* cppcheck-suppress unreadVariable */
2111        ip.hdr = skb_network_header(skb);
2112        l4.hdr = skb_transport_header(skb);
2113
2114        /* initialize outer IP header fields */
2115        if (ip.v4->version == 4) {
2116                ip.v4->tot_len = 0;
2117                ip.v4->check = 0;
2118        } else {
2119                ip.v6->payload_len = 0;
2120        }
2121
2122        if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2123                                         SKB_GSO_GRE_CSUM |
2124                                         SKB_GSO_IPXIP4 |
2125                                         SKB_GSO_IPXIP6 |
2126                                         SKB_GSO_UDP_TUNNEL |
2127                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
2128                if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2129                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2130                        l4.udp->len = 0;
2131
2132                        /* determine offset of outer transport header */
2133                        l4_start = (u8)(l4.hdr - skb->data);
2134
2135                        /* remove payload length from outer checksum */
2136                        paylen = skb->len - l4_start;
2137                        csum_replace_by_diff(&l4.udp->check,
2138                                             (__force __wsum)htonl(paylen));
2139                }
2140
2141                /* reset pointers to inner headers */
2142
2143                /* cppcheck-suppress unreadVariable */
2144                ip.hdr = skb_inner_network_header(skb);
2145                l4.hdr = skb_inner_transport_header(skb);
2146
2147                /* initialize inner IP header fields */
2148                if (ip.v4->version == 4) {
2149                        ip.v4->tot_len = 0;
2150                        ip.v4->check = 0;
2151                } else {
2152                        ip.v6->payload_len = 0;
2153                }
2154        }
2155
2156        /* determine offset of transport header */
2157        l4_start = (u8)(l4.hdr - skb->data);
2158
2159        /* remove payload length from checksum */
2160        paylen = skb->len - l4_start;
2161
2162        if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2163                csum_replace_by_diff(&l4.udp->check,
2164                                     (__force __wsum)htonl(paylen));
2165                /* compute length of UDP segmentation header */
2166                off->header_len = (u8)sizeof(l4.udp) + l4_start;
2167        } else {
2168                csum_replace_by_diff(&l4.tcp->check,
2169                                     (__force __wsum)htonl(paylen));
2170                /* compute length of TCP segmentation header */
2171                off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2172        }
2173
2174        /* update gso_segs and bytecount */
2175        first->gso_segs = skb_shinfo(skb)->gso_segs;
2176        first->bytecount += (first->gso_segs - 1) * off->header_len;
2177
2178        cd_tso_len = skb->len - off->header_len;
2179        cd_mss = skb_shinfo(skb)->gso_size;
2180
2181        /* record cdesc_qw1 with TSO parameters */
2182        off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2183                             (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2184                             (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2185                             (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2186        first->tx_flags |= ICE_TX_FLAGS_TSO;
2187        return 1;
2188}
2189
2190/**
2191 * ice_txd_use_count  - estimate the number of descriptors needed for Tx
2192 * @size: transmit request size in bytes
2193 *
2194 * Due to hardware alignment restrictions (4K alignment), we need to
2195 * assume that we can have no more than 12K of data per descriptor, even
2196 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2197 * Thus, we need to divide by 12K. But division is slow! Instead,
2198 * we decompose the operation into shifts and one relatively cheap
2199 * multiply operation.
2200 *
2201 * To divide by 12K, we first divide by 4K, then divide by 3:
2202 *     To divide by 4K, shift right by 12 bits
2203 *     To divide by 3, multiply by 85, then divide by 256
2204 *     (Divide by 256 is done by shifting right by 8 bits)
2205 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2206 * 3, we'll underestimate near each multiple of 12K. This is actually more
2207 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2208 * segment. For our purposes this is accurate out to 1M which is orders of
2209 * magnitude greater than our largest possible GSO size.
2210 *
2211 * This would then be implemented as:
2212 *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2213 *
2214 * Since multiplication and division are commutative, we can reorder
2215 * operations into:
2216 *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2217 */
2218static unsigned int ice_txd_use_count(unsigned int size)
2219{
2220        return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2221}
2222
2223/**
2224 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2225 * @skb: send buffer
2226 *
2227 * Returns number of data descriptors needed for this skb.
2228 */
2229static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2230{
2231        const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2232        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2233        unsigned int count = 0, size = skb_headlen(skb);
2234
2235        for (;;) {
2236                count += ice_txd_use_count(size);
2237
2238                if (!nr_frags--)
2239                        break;
2240
2241                size = skb_frag_size(frag++);
2242        }
2243
2244        return count;
2245}
2246
2247/**
2248 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2249 * @skb: send buffer
2250 *
2251 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2252 * and so we need to figure out the cases where we need to linearize the skb.
2253 *
2254 * For TSO we need to count the TSO header and segment payload separately.
2255 * As such we need to check cases where we have 7 fragments or more as we
2256 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2257 * the segment payload in the first descriptor, and another 7 for the
2258 * fragments.
2259 */
2260static bool __ice_chk_linearize(struct sk_buff *skb)
2261{
2262        const skb_frag_t *frag, *stale;
2263        int nr_frags, sum;
2264
2265        /* no need to check if number of frags is less than 7 */
2266        nr_frags = skb_shinfo(skb)->nr_frags;
2267        if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2268                return false;
2269
2270        /* We need to walk through the list and validate that each group
2271         * of 6 fragments totals at least gso_size.
2272         */
2273        nr_frags -= ICE_MAX_BUF_TXD - 2;
2274        frag = &skb_shinfo(skb)->frags[0];
2275
2276        /* Initialize size to the negative value of gso_size minus 1. We
2277         * use this as the worst case scenario in which the frag ahead
2278         * of us only provides one byte which is why we are limited to 6
2279         * descriptors for a single transmit as the header and previous
2280         * fragment are already consuming 2 descriptors.
2281         */
2282        sum = 1 - skb_shinfo(skb)->gso_size;
2283
2284        /* Add size of frags 0 through 4 to create our initial sum */
2285        sum += skb_frag_size(frag++);
2286        sum += skb_frag_size(frag++);
2287        sum += skb_frag_size(frag++);
2288        sum += skb_frag_size(frag++);
2289        sum += skb_frag_size(frag++);
2290
2291        /* Walk through fragments adding latest fragment, testing it, and
2292         * then removing stale fragments from the sum.
2293         */
2294        for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2295                int stale_size = skb_frag_size(stale);
2296
2297                sum += skb_frag_size(frag++);
2298
2299                /* The stale fragment may present us with a smaller
2300                 * descriptor than the actual fragment size. To account
2301                 * for that we need to remove all the data on the front and
2302                 * figure out what the remainder would be in the last
2303                 * descriptor associated with the fragment.
2304                 */
2305                if (stale_size > ICE_MAX_DATA_PER_TXD) {
2306                        int align_pad = -(skb_frag_off(stale)) &
2307                                        (ICE_MAX_READ_REQ_SIZE - 1);
2308
2309                        sum -= align_pad;
2310                        stale_size -= align_pad;
2311
2312                        do {
2313                                sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2314                                stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2315                        } while (stale_size > ICE_MAX_DATA_PER_TXD);
2316                }
2317
2318                /* if sum is negative we failed to make sufficient progress */
2319                if (sum < 0)
2320                        return true;
2321
2322                if (!nr_frags--)
2323                        break;
2324
2325                sum -= stale_size;
2326        }
2327
2328        return false;
2329}
2330
2331/**
2332 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2333 * @skb:      send buffer
2334 * @count:    number of buffers used
2335 *
2336 * Note: Our HW can't scatter-gather more than 8 fragments to build
2337 * a packet on the wire and so we need to figure out the cases where we
2338 * need to linearize the skb.
2339 */
2340static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2341{
2342        /* Both TSO and single send will work if count is less than 8 */
2343        if (likely(count < ICE_MAX_BUF_TXD))
2344                return false;
2345
2346        if (skb_is_gso(skb))
2347                return __ice_chk_linearize(skb);
2348
2349        /* we can support up to 8 data buffers for a single send */
2350        return count != ICE_MAX_BUF_TXD;
2351}
2352
2353/**
2354 * ice_xmit_frame_ring - Sends buffer on Tx ring
2355 * @skb: send buffer
2356 * @tx_ring: ring to send buffer on
2357 *
2358 * Returns NETDEV_TX_OK if sent, else an error code
2359 */
2360static netdev_tx_t
2361ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2362{
2363        struct ice_tx_offload_params offload = { 0 };
2364        struct ice_vsi *vsi = tx_ring->vsi;
2365        struct ice_tx_buf *first;
2366        unsigned int count;
2367        int tso, csum;
2368
2369        count = ice_xmit_desc_count(skb);
2370        if (ice_chk_linearize(skb, count)) {
2371                if (__skb_linearize(skb))
2372                        goto out_drop;
2373                count = ice_txd_use_count(skb->len);
2374                tx_ring->tx_stats.tx_linearize++;
2375        }
2376
2377        /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2378         *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2379         *       + 4 desc gap to avoid the cache line where head is,
2380         *       + 1 desc for context descriptor,
2381         * otherwise try next time
2382         */
2383        if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2384                              ICE_DESCS_FOR_CTX_DESC)) {
2385                tx_ring->tx_stats.tx_busy++;
2386                return NETDEV_TX_BUSY;
2387        }
2388
2389        offload.tx_ring = tx_ring;
2390
2391        /* record the location of the first descriptor for this packet */
2392        first = &tx_ring->tx_buf[tx_ring->next_to_use];
2393        first->skb = skb;
2394        first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2395        first->gso_segs = 1;
2396        first->tx_flags = 0;
2397
2398        /* prepare the VLAN tagging flags for Tx */
2399        ice_tx_prepare_vlan_flags(tx_ring, first);
2400
2401        /* set up TSO offload */
2402        tso = ice_tso(first, &offload);
2403        if (tso < 0)
2404                goto out_drop;
2405
2406        /* always set up Tx checksum offload */
2407        csum = ice_tx_csum(first, &offload);
2408        if (csum < 0)
2409                goto out_drop;
2410
2411        /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2412        if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2413                     vsi->type == ICE_VSI_PF &&
2414                     vsi->port_info->is_sw_lldp))
2415                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2416                                        ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2417                                        ICE_TXD_CTX_QW1_CMD_S);
2418
2419        if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2420                struct ice_tx_ctx_desc *cdesc;
2421                u16 i = tx_ring->next_to_use;
2422
2423                /* grab the next descriptor */
2424                cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2425                i++;
2426                tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2427
2428                /* setup context descriptor */
2429                cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2430                cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2431                cdesc->rsvd = cpu_to_le16(0);
2432                cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2433        }
2434
2435        ice_tx_map(tx_ring, first, &offload);
2436        return NETDEV_TX_OK;
2437
2438out_drop:
2439        dev_kfree_skb_any(skb);
2440        return NETDEV_TX_OK;
2441}
2442
2443/**
2444 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2445 * @skb: send buffer
2446 * @netdev: network interface device structure
2447 *
2448 * Returns NETDEV_TX_OK if sent, else an error code
2449 */
2450netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2451{
2452        struct ice_netdev_priv *np = netdev_priv(netdev);
2453        struct ice_vsi *vsi = np->vsi;
2454        struct ice_ring *tx_ring;
2455
2456        tx_ring = vsi->tx_rings[skb->queue_mapping];
2457
2458        /* hardware can't handle really short frames, hardware padding works
2459         * beyond this point
2460         */
2461        if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2462                return NETDEV_TX_OK;
2463
2464        return ice_xmit_frame_ring(skb, tx_ring);
2465}
2466
2467/**
2468 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2469 * @tx_ring: tx_ring to clean
2470 */
2471void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2472{
2473        struct ice_vsi *vsi = tx_ring->vsi;
2474        s16 i = tx_ring->next_to_clean;
2475        int budget = ICE_DFLT_IRQ_WORK;
2476        struct ice_tx_desc *tx_desc;
2477        struct ice_tx_buf *tx_buf;
2478
2479        tx_buf = &tx_ring->tx_buf[i];
2480        tx_desc = ICE_TX_DESC(tx_ring, i);
2481        i -= tx_ring->count;
2482
2483        do {
2484                struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2485
2486                /* if next_to_watch is not set then there is no pending work */
2487                if (!eop_desc)
2488                        break;
2489
2490                /* prevent any other reads prior to eop_desc */
2491                smp_rmb();
2492
2493                /* if the descriptor isn't done, no work to do */
2494                if (!(eop_desc->cmd_type_offset_bsz &
2495                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2496                        break;
2497
2498                /* clear next_to_watch to prevent false hangs */
2499                tx_buf->next_to_watch = NULL;
2500                tx_desc->buf_addr = 0;
2501                tx_desc->cmd_type_offset_bsz = 0;
2502
2503                /* move past filter desc */
2504                tx_buf++;
2505                tx_desc++;
2506                i++;
2507                if (unlikely(!i)) {
2508                        i -= tx_ring->count;
2509                        tx_buf = tx_ring->tx_buf;
2510                        tx_desc = ICE_TX_DESC(tx_ring, 0);
2511                }
2512
2513                /* unmap the data header */
2514                if (dma_unmap_len(tx_buf, len))
2515                        dma_unmap_single(tx_ring->dev,
2516                                         dma_unmap_addr(tx_buf, dma),
2517                                         dma_unmap_len(tx_buf, len),
2518                                         DMA_TO_DEVICE);
2519                if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2520                        devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2521
2522                /* clear next_to_watch to prevent false hangs */
2523                tx_buf->raw_buf = NULL;
2524                tx_buf->tx_flags = 0;
2525                tx_buf->next_to_watch = NULL;
2526                dma_unmap_len_set(tx_buf, len, 0);
2527                tx_desc->buf_addr = 0;
2528                tx_desc->cmd_type_offset_bsz = 0;
2529
2530                /* move past eop_desc for start of next FD desc */
2531                tx_buf++;
2532                tx_desc++;
2533                i++;
2534                if (unlikely(!i)) {
2535                        i -= tx_ring->count;
2536                        tx_buf = tx_ring->tx_buf;
2537                        tx_desc = ICE_TX_DESC(tx_ring, 0);
2538                }
2539
2540                budget--;
2541        } while (likely(budget));
2542
2543        i += tx_ring->count;
2544        tx_ring->next_to_clean = i;
2545
2546        /* re-enable interrupt if needed */
2547        ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2548}
2549