linux/drivers/net/ethernet/intel/ice/ice_txrx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4/* The driver transmit and receive code */
   5
   6#include <linux/mm.h>
   7#include <linux/netdevice.h>
   8#include <linux/prefetch.h>
   9#include <linux/bpf_trace.h>
  10#include <net/dsfield.h>
  11#include <net/mpls.h>
  12#include <net/xdp.h>
  13#include "ice_txrx_lib.h"
  14#include "ice_lib.h"
  15#include "ice.h"
  16#include "ice_trace.h"
  17#include "ice_dcb_lib.h"
  18#include "ice_xsk.h"
  19#include "ice_eswitch.h"
  20
  21#define ICE_RX_HDR_SIZE         256
  22
  23#define FDIR_DESC_RXDID 0x40
  24#define ICE_FDIR_CLEAN_DELAY 10
  25
  26/**
  27 * ice_prgm_fdir_fltr - Program a Flow Director filter
  28 * @vsi: VSI to send dummy packet
  29 * @fdir_desc: flow director descriptor
  30 * @raw_packet: allocated buffer for flow director
  31 */
  32int
  33ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
  34                   u8 *raw_packet)
  35{
  36        struct ice_tx_buf *tx_buf, *first;
  37        struct ice_fltr_desc *f_desc;
  38        struct ice_tx_desc *tx_desc;
  39        struct ice_tx_ring *tx_ring;
  40        struct device *dev;
  41        dma_addr_t dma;
  42        u32 td_cmd;
  43        u16 i;
  44
  45        /* VSI and Tx ring */
  46        if (!vsi)
  47                return -ENOENT;
  48        tx_ring = vsi->tx_rings[0];
  49        if (!tx_ring || !tx_ring->desc)
  50                return -ENOENT;
  51        dev = tx_ring->dev;
  52
  53        /* we are using two descriptors to add/del a filter and we can wait */
  54        for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
  55                if (!i)
  56                        return -EAGAIN;
  57                msleep_interruptible(1);
  58        }
  59
  60        dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
  61                             DMA_TO_DEVICE);
  62
  63        if (dma_mapping_error(dev, dma))
  64                return -EINVAL;
  65
  66        /* grab the next descriptor */
  67        i = tx_ring->next_to_use;
  68        first = &tx_ring->tx_buf[i];
  69        f_desc = ICE_TX_FDIRDESC(tx_ring, i);
  70        memcpy(f_desc, fdir_desc, sizeof(*f_desc));
  71
  72        i++;
  73        i = (i < tx_ring->count) ? i : 0;
  74        tx_desc = ICE_TX_DESC(tx_ring, i);
  75        tx_buf = &tx_ring->tx_buf[i];
  76
  77        i++;
  78        tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  79
  80        memset(tx_buf, 0, sizeof(*tx_buf));
  81        dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
  82        dma_unmap_addr_set(tx_buf, dma, dma);
  83
  84        tx_desc->buf_addr = cpu_to_le64(dma);
  85        td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
  86                 ICE_TX_DESC_CMD_RE;
  87
  88        tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
  89        tx_buf->raw_buf = raw_packet;
  90
  91        tx_desc->cmd_type_offset_bsz =
  92                ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
  93
  94        /* Force memory write to complete before letting h/w know
  95         * there are new descriptors to fetch.
  96         */
  97        wmb();
  98
  99        /* mark the data descriptor to be watched */
 100        first->next_to_watch = tx_desc;
 101
 102        writel(tx_ring->next_to_use, tx_ring->tail);
 103
 104        return 0;
 105}
 106
 107/**
 108 * ice_unmap_and_free_tx_buf - Release a Tx buffer
 109 * @ring: the ring that owns the buffer
 110 * @tx_buf: the buffer to free
 111 */
 112static void
 113ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
 114{
 115        if (tx_buf->skb) {
 116                if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
 117                        devm_kfree(ring->dev, tx_buf->raw_buf);
 118                else if (ice_ring_is_xdp(ring))
 119                        page_frag_free(tx_buf->raw_buf);
 120                else
 121                        dev_kfree_skb_any(tx_buf->skb);
 122                if (dma_unmap_len(tx_buf, len))
 123                        dma_unmap_single(ring->dev,
 124                                         dma_unmap_addr(tx_buf, dma),
 125                                         dma_unmap_len(tx_buf, len),
 126                                         DMA_TO_DEVICE);
 127        } else if (dma_unmap_len(tx_buf, len)) {
 128                dma_unmap_page(ring->dev,
 129                               dma_unmap_addr(tx_buf, dma),
 130                               dma_unmap_len(tx_buf, len),
 131                               DMA_TO_DEVICE);
 132        }
 133
 134        tx_buf->next_to_watch = NULL;
 135        tx_buf->skb = NULL;
 136        dma_unmap_len_set(tx_buf, len, 0);
 137        /* tx_buf must be completely set up in the transmit path */
 138}
 139
 140static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
 141{
 142        return netdev_get_tx_queue(ring->netdev, ring->q_index);
 143}
 144
 145/**
 146 * ice_clean_tx_ring - Free any empty Tx buffers
 147 * @tx_ring: ring to be cleaned
 148 */
 149void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
 150{
 151        u32 size;
 152        u16 i;
 153
 154        if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
 155                ice_xsk_clean_xdp_ring(tx_ring);
 156                goto tx_skip_free;
 157        }
 158
 159        /* ring already cleared, nothing to do */
 160        if (!tx_ring->tx_buf)
 161                return;
 162
 163        /* Free all the Tx ring sk_buffs */
 164        for (i = 0; i < tx_ring->count; i++)
 165                ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
 166
 167tx_skip_free:
 168        memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
 169
 170        size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
 171                     PAGE_SIZE);
 172        /* Zero out the descriptor ring */
 173        memset(tx_ring->desc, 0, size);
 174
 175        tx_ring->next_to_use = 0;
 176        tx_ring->next_to_clean = 0;
 177        tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
 178        tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
 179
 180        if (!tx_ring->netdev)
 181                return;
 182
 183        /* cleanup Tx queue statistics */
 184        netdev_tx_reset_queue(txring_txq(tx_ring));
 185}
 186
 187/**
 188 * ice_free_tx_ring - Free Tx resources per queue
 189 * @tx_ring: Tx descriptor ring for a specific queue
 190 *
 191 * Free all transmit software resources
 192 */
 193void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
 194{
 195        u32 size;
 196
 197        ice_clean_tx_ring(tx_ring);
 198        devm_kfree(tx_ring->dev, tx_ring->tx_buf);
 199        tx_ring->tx_buf = NULL;
 200
 201        if (tx_ring->desc) {
 202                size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
 203                             PAGE_SIZE);
 204                dmam_free_coherent(tx_ring->dev, size,
 205                                   tx_ring->desc, tx_ring->dma);
 206                tx_ring->desc = NULL;
 207        }
 208}
 209
 210/**
 211 * ice_clean_tx_irq - Reclaim resources after transmit completes
 212 * @tx_ring: Tx ring to clean
 213 * @napi_budget: Used to determine if we are in netpoll
 214 *
 215 * Returns true if there's any budget left (e.g. the clean is finished)
 216 */
 217static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
 218{
 219        unsigned int total_bytes = 0, total_pkts = 0;
 220        unsigned int budget = ICE_DFLT_IRQ_WORK;
 221        struct ice_vsi *vsi = tx_ring->vsi;
 222        s16 i = tx_ring->next_to_clean;
 223        struct ice_tx_desc *tx_desc;
 224        struct ice_tx_buf *tx_buf;
 225
 226        /* get the bql data ready */
 227        netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring));
 228
 229        tx_buf = &tx_ring->tx_buf[i];
 230        tx_desc = ICE_TX_DESC(tx_ring, i);
 231        i -= tx_ring->count;
 232
 233        prefetch(&vsi->state);
 234
 235        do {
 236                struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
 237
 238                /* if next_to_watch is not set then there is no work pending */
 239                if (!eop_desc)
 240                        break;
 241
 242                /* follow the guidelines of other drivers */
 243                prefetchw(&tx_buf->skb->users);
 244
 245                smp_rmb();      /* prevent any other reads prior to eop_desc */
 246
 247                ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
 248                /* if the descriptor isn't done, no work yet to do */
 249                if (!(eop_desc->cmd_type_offset_bsz &
 250                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
 251                        break;
 252
 253                /* clear next_to_watch to prevent false hangs */
 254                tx_buf->next_to_watch = NULL;
 255
 256                /* update the statistics for this packet */
 257                total_bytes += tx_buf->bytecount;
 258                total_pkts += tx_buf->gso_segs;
 259
 260                /* free the skb */
 261                napi_consume_skb(tx_buf->skb, napi_budget);
 262
 263                /* unmap skb header data */
 264                dma_unmap_single(tx_ring->dev,
 265                                 dma_unmap_addr(tx_buf, dma),
 266                                 dma_unmap_len(tx_buf, len),
 267                                 DMA_TO_DEVICE);
 268
 269                /* clear tx_buf data */
 270                tx_buf->skb = NULL;
 271                dma_unmap_len_set(tx_buf, len, 0);
 272
 273                /* unmap remaining buffers */
 274                while (tx_desc != eop_desc) {
 275                        ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
 276                        tx_buf++;
 277                        tx_desc++;
 278                        i++;
 279                        if (unlikely(!i)) {
 280                                i -= tx_ring->count;
 281                                tx_buf = tx_ring->tx_buf;
 282                                tx_desc = ICE_TX_DESC(tx_ring, 0);
 283                        }
 284
 285                        /* unmap any remaining paged data */
 286                        if (dma_unmap_len(tx_buf, len)) {
 287                                dma_unmap_page(tx_ring->dev,
 288                                               dma_unmap_addr(tx_buf, dma),
 289                                               dma_unmap_len(tx_buf, len),
 290                                               DMA_TO_DEVICE);
 291                                dma_unmap_len_set(tx_buf, len, 0);
 292                        }
 293                }
 294                ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
 295
 296                /* move us one more past the eop_desc for start of next pkt */
 297                tx_buf++;
 298                tx_desc++;
 299                i++;
 300                if (unlikely(!i)) {
 301                        i -= tx_ring->count;
 302                        tx_buf = tx_ring->tx_buf;
 303                        tx_desc = ICE_TX_DESC(tx_ring, 0);
 304                }
 305
 306                prefetch(tx_desc);
 307
 308                /* update budget accounting */
 309                budget--;
 310        } while (likely(budget));
 311
 312        i += tx_ring->count;
 313        tx_ring->next_to_clean = i;
 314
 315        ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
 316        netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes);
 317
 318#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
 319        if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
 320                     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 321                /* Make sure that anybody stopping the queue after this
 322                 * sees the new next_to_clean.
 323                 */
 324                smp_mb();
 325                if (netif_tx_queue_stopped(txring_txq(tx_ring)) &&
 326                    !test_bit(ICE_VSI_DOWN, vsi->state)) {
 327                        netif_tx_wake_queue(txring_txq(tx_ring));
 328                        ++tx_ring->tx_stats.restart_q;
 329                }
 330        }
 331
 332        return !!budget;
 333}
 334
 335/**
 336 * ice_setup_tx_ring - Allocate the Tx descriptors
 337 * @tx_ring: the Tx ring to set up
 338 *
 339 * Return 0 on success, negative on error
 340 */
 341int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
 342{
 343        struct device *dev = tx_ring->dev;
 344        u32 size;
 345
 346        if (!dev)
 347                return -ENOMEM;
 348
 349        /* warn if we are about to overwrite the pointer */
 350        WARN_ON(tx_ring->tx_buf);
 351        tx_ring->tx_buf =
 352                devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
 353                             GFP_KERNEL);
 354        if (!tx_ring->tx_buf)
 355                return -ENOMEM;
 356
 357        /* round up to nearest page */
 358        size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
 359                     PAGE_SIZE);
 360        tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
 361                                            GFP_KERNEL);
 362        if (!tx_ring->desc) {
 363                dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 364                        size);
 365                goto err;
 366        }
 367
 368        tx_ring->next_to_use = 0;
 369        tx_ring->next_to_clean = 0;
 370        tx_ring->tx_stats.prev_pkt = -1;
 371        return 0;
 372
 373err:
 374        devm_kfree(dev, tx_ring->tx_buf);
 375        tx_ring->tx_buf = NULL;
 376        return -ENOMEM;
 377}
 378
 379/**
 380 * ice_clean_rx_ring - Free Rx buffers
 381 * @rx_ring: ring to be cleaned
 382 */
 383void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
 384{
 385        struct device *dev = rx_ring->dev;
 386        u32 size;
 387        u16 i;
 388
 389        /* ring already cleared, nothing to do */
 390        if (!rx_ring->rx_buf)
 391                return;
 392
 393        if (rx_ring->skb) {
 394                dev_kfree_skb(rx_ring->skb);
 395                rx_ring->skb = NULL;
 396        }
 397
 398        if (rx_ring->xsk_pool) {
 399                ice_xsk_clean_rx_ring(rx_ring);
 400                goto rx_skip_free;
 401        }
 402
 403        /* Free all the Rx ring sk_buffs */
 404        for (i = 0; i < rx_ring->count; i++) {
 405                struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
 406
 407                if (!rx_buf->page)
 408                        continue;
 409
 410                /* Invalidate cache lines that may have been written to by
 411                 * device so that we avoid corrupting memory.
 412                 */
 413                dma_sync_single_range_for_cpu(dev, rx_buf->dma,
 414                                              rx_buf->page_offset,
 415                                              rx_ring->rx_buf_len,
 416                                              DMA_FROM_DEVICE);
 417
 418                /* free resources associated with mapping */
 419                dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
 420                                     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 421                __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
 422
 423                rx_buf->page = NULL;
 424                rx_buf->page_offset = 0;
 425        }
 426
 427rx_skip_free:
 428        if (rx_ring->xsk_pool)
 429                memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
 430        else
 431                memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
 432
 433        /* Zero out the descriptor ring */
 434        size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
 435                     PAGE_SIZE);
 436        memset(rx_ring->desc, 0, size);
 437
 438        rx_ring->next_to_alloc = 0;
 439        rx_ring->next_to_clean = 0;
 440        rx_ring->next_to_use = 0;
 441}
 442
 443/**
 444 * ice_free_rx_ring - Free Rx resources
 445 * @rx_ring: ring to clean the resources from
 446 *
 447 * Free all receive software resources
 448 */
 449void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
 450{
 451        u32 size;
 452
 453        ice_clean_rx_ring(rx_ring);
 454        if (rx_ring->vsi->type == ICE_VSI_PF)
 455                if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
 456                        xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 457        rx_ring->xdp_prog = NULL;
 458        if (rx_ring->xsk_pool) {
 459                kfree(rx_ring->xdp_buf);
 460                rx_ring->xdp_buf = NULL;
 461        } else {
 462                kfree(rx_ring->rx_buf);
 463                rx_ring->rx_buf = NULL;
 464        }
 465
 466        if (rx_ring->desc) {
 467                size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
 468                             PAGE_SIZE);
 469                dmam_free_coherent(rx_ring->dev, size,
 470                                   rx_ring->desc, rx_ring->dma);
 471                rx_ring->desc = NULL;
 472        }
 473}
 474
 475/**
 476 * ice_setup_rx_ring - Allocate the Rx descriptors
 477 * @rx_ring: the Rx ring to set up
 478 *
 479 * Return 0 on success, negative on error
 480 */
 481int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
 482{
 483        struct device *dev = rx_ring->dev;
 484        u32 size;
 485
 486        if (!dev)
 487                return -ENOMEM;
 488
 489        /* warn if we are about to overwrite the pointer */
 490        WARN_ON(rx_ring->rx_buf);
 491        rx_ring->rx_buf =
 492                kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
 493        if (!rx_ring->rx_buf)
 494                return -ENOMEM;
 495
 496        /* round up to nearest page */
 497        size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
 498                     PAGE_SIZE);
 499        rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
 500                                            GFP_KERNEL);
 501        if (!rx_ring->desc) {
 502                dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
 503                        size);
 504                goto err;
 505        }
 506
 507        rx_ring->next_to_use = 0;
 508        rx_ring->next_to_clean = 0;
 509
 510        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
 511                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
 512
 513        if (rx_ring->vsi->type == ICE_VSI_PF &&
 514            !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
 515                if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
 516                                     rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
 517                        goto err;
 518        return 0;
 519
 520err:
 521        kfree(rx_ring->rx_buf);
 522        rx_ring->rx_buf = NULL;
 523        return -ENOMEM;
 524}
 525
 526static unsigned int
 527ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
 528{
 529        unsigned int truesize;
 530
 531#if (PAGE_SIZE < 8192)
 532        truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
 533#else
 534        truesize = rx_ring->rx_offset ?
 535                SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
 536                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
 537                SKB_DATA_ALIGN(size);
 538#endif
 539        return truesize;
 540}
 541
 542/**
 543 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
 544 * @rx_ring: Rx ring
 545 * @xdp: xdp_buff used as input to the XDP program
 546 * @xdp_prog: XDP program to run
 547 * @xdp_ring: ring to be used for XDP_TX action
 548 *
 549 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
 550 */
 551static int
 552ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
 553            struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
 554{
 555        int err;
 556        u32 act;
 557
 558        act = bpf_prog_run_xdp(xdp_prog, xdp);
 559        switch (act) {
 560        case XDP_PASS:
 561                return ICE_XDP_PASS;
 562        case XDP_TX:
 563                if (static_branch_unlikely(&ice_xdp_locking_key))
 564                        spin_lock(&xdp_ring->tx_lock);
 565                err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
 566                if (static_branch_unlikely(&ice_xdp_locking_key))
 567                        spin_unlock(&xdp_ring->tx_lock);
 568                if (err == ICE_XDP_CONSUMED)
 569                        goto out_failure;
 570                return err;
 571        case XDP_REDIRECT:
 572                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
 573                if (err)
 574                        goto out_failure;
 575                return ICE_XDP_REDIR;
 576        default:
 577                bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
 578                fallthrough;
 579        case XDP_ABORTED:
 580out_failure:
 581                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 582                fallthrough;
 583        case XDP_DROP:
 584                return ICE_XDP_CONSUMED;
 585        }
 586}
 587
 588/**
 589 * ice_xdp_xmit - submit packets to XDP ring for transmission
 590 * @dev: netdev
 591 * @n: number of XDP frames to be transmitted
 592 * @frames: XDP frames to be transmitted
 593 * @flags: transmit flags
 594 *
 595 * Returns number of frames successfully sent. Failed frames
 596 * will be free'ed by XDP core.
 597 * For error cases, a negative errno code is returned and no-frames
 598 * are transmitted (caller must handle freeing frames).
 599 */
 600int
 601ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 602             u32 flags)
 603{
 604        struct ice_netdev_priv *np = netdev_priv(dev);
 605        unsigned int queue_index = smp_processor_id();
 606        struct ice_vsi *vsi = np->vsi;
 607        struct ice_tx_ring *xdp_ring;
 608        int nxmit = 0, i;
 609
 610        if (test_bit(ICE_VSI_DOWN, vsi->state))
 611                return -ENETDOWN;
 612
 613        if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
 614                return -ENXIO;
 615
 616        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 617                return -EINVAL;
 618
 619        if (static_branch_unlikely(&ice_xdp_locking_key)) {
 620                queue_index %= vsi->num_xdp_txq;
 621                xdp_ring = vsi->xdp_rings[queue_index];
 622                spin_lock(&xdp_ring->tx_lock);
 623        } else {
 624                xdp_ring = vsi->xdp_rings[queue_index];
 625        }
 626
 627        for (i = 0; i < n; i++) {
 628                struct xdp_frame *xdpf = frames[i];
 629                int err;
 630
 631                err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
 632                if (err != ICE_XDP_TX)
 633                        break;
 634                nxmit++;
 635        }
 636
 637        if (unlikely(flags & XDP_XMIT_FLUSH))
 638                ice_xdp_ring_update_tail(xdp_ring);
 639
 640        if (static_branch_unlikely(&ice_xdp_locking_key))
 641                spin_unlock(&xdp_ring->tx_lock);
 642
 643        return nxmit;
 644}
 645
 646/**
 647 * ice_alloc_mapped_page - recycle or make a new page
 648 * @rx_ring: ring to use
 649 * @bi: rx_buf struct to modify
 650 *
 651 * Returns true if the page was successfully allocated or
 652 * reused.
 653 */
 654static bool
 655ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
 656{
 657        struct page *page = bi->page;
 658        dma_addr_t dma;
 659
 660        /* since we are recycling buffers we should seldom need to alloc */
 661        if (likely(page))
 662                return true;
 663
 664        /* alloc new page for storage */
 665        page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
 666        if (unlikely(!page)) {
 667                rx_ring->rx_stats.alloc_page_failed++;
 668                return false;
 669        }
 670
 671        /* map page for use */
 672        dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
 673                                 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 674
 675        /* if mapping failed free memory back to system since
 676         * there isn't much point in holding memory we can't use
 677         */
 678        if (dma_mapping_error(rx_ring->dev, dma)) {
 679                __free_pages(page, ice_rx_pg_order(rx_ring));
 680                rx_ring->rx_stats.alloc_page_failed++;
 681                return false;
 682        }
 683
 684        bi->dma = dma;
 685        bi->page = page;
 686        bi->page_offset = rx_ring->rx_offset;
 687        page_ref_add(page, USHRT_MAX - 1);
 688        bi->pagecnt_bias = USHRT_MAX;
 689
 690        return true;
 691}
 692
 693/**
 694 * ice_alloc_rx_bufs - Replace used receive buffers
 695 * @rx_ring: ring to place buffers on
 696 * @cleaned_count: number of buffers to replace
 697 *
 698 * Returns false if all allocations were successful, true if any fail. Returning
 699 * true signals to the caller that we didn't replace cleaned_count buffers and
 700 * there is more work to do.
 701 *
 702 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
 703 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
 704 * multiple tail writes per call.
 705 */
 706bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
 707{
 708        union ice_32b_rx_flex_desc *rx_desc;
 709        u16 ntu = rx_ring->next_to_use;
 710        struct ice_rx_buf *bi;
 711
 712        /* do nothing if no valid netdev defined */
 713        if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
 714            !cleaned_count)
 715                return false;
 716
 717        /* get the Rx descriptor and buffer based on next_to_use */
 718        rx_desc = ICE_RX_DESC(rx_ring, ntu);
 719        bi = &rx_ring->rx_buf[ntu];
 720
 721        do {
 722                /* if we fail here, we have work remaining */
 723                if (!ice_alloc_mapped_page(rx_ring, bi))
 724                        break;
 725
 726                /* sync the buffer for use by the device */
 727                dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
 728                                                 bi->page_offset,
 729                                                 rx_ring->rx_buf_len,
 730                                                 DMA_FROM_DEVICE);
 731
 732                /* Refresh the desc even if buffer_addrs didn't change
 733                 * because each write-back erases this info.
 734                 */
 735                rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 736
 737                rx_desc++;
 738                bi++;
 739                ntu++;
 740                if (unlikely(ntu == rx_ring->count)) {
 741                        rx_desc = ICE_RX_DESC(rx_ring, 0);
 742                        bi = rx_ring->rx_buf;
 743                        ntu = 0;
 744                }
 745
 746                /* clear the status bits for the next_to_use descriptor */
 747                rx_desc->wb.status_error0 = 0;
 748
 749                cleaned_count--;
 750        } while (cleaned_count);
 751
 752        if (rx_ring->next_to_use != ntu)
 753                ice_release_rx_desc(rx_ring, ntu);
 754
 755        return !!cleaned_count;
 756}
 757
 758/**
 759 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
 760 * @rx_buf: Rx buffer to adjust
 761 * @size: Size of adjustment
 762 *
 763 * Update the offset within page so that Rx buf will be ready to be reused.
 764 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
 765 * so the second half of page assigned to Rx buffer will be used, otherwise
 766 * the offset is moved by "size" bytes
 767 */
 768static void
 769ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
 770{
 771#if (PAGE_SIZE < 8192)
 772        /* flip page offset to other buffer */
 773        rx_buf->page_offset ^= size;
 774#else
 775        /* move offset up to the next cache line */
 776        rx_buf->page_offset += size;
 777#endif
 778}
 779
 780/**
 781 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
 782 * @rx_buf: buffer containing the page
 783 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
 784 *
 785 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
 786 * which will assign the current buffer to the buffer that next_to_alloc is
 787 * pointing to; otherwise, the DMA mapping needs to be destroyed and
 788 * page freed
 789 */
 790static bool
 791ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
 792{
 793        unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
 794        struct page *page = rx_buf->page;
 795
 796        /* avoid re-using remote and pfmemalloc pages */
 797        if (!dev_page_is_reusable(page))
 798                return false;
 799
 800#if (PAGE_SIZE < 8192)
 801        /* if we are only owner of page we can reuse it */
 802        if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
 803                return false;
 804#else
 805#define ICE_LAST_OFFSET \
 806        (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
 807        if (rx_buf->page_offset > ICE_LAST_OFFSET)
 808                return false;
 809#endif /* PAGE_SIZE < 8192) */
 810
 811        /* If we have drained the page fragment pool we need to update
 812         * the pagecnt_bias and page count so that we fully restock the
 813         * number of references the driver holds.
 814         */
 815        if (unlikely(pagecnt_bias == 1)) {
 816                page_ref_add(page, USHRT_MAX - 1);
 817                rx_buf->pagecnt_bias = USHRT_MAX;
 818        }
 819
 820        return true;
 821}
 822
 823/**
 824 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
 825 * @rx_ring: Rx descriptor ring to transact packets on
 826 * @rx_buf: buffer containing page to add
 827 * @skb: sk_buff to place the data into
 828 * @size: packet length from rx_desc
 829 *
 830 * This function will add the data contained in rx_buf->page to the skb.
 831 * It will just attach the page as a frag to the skb.
 832 * The function will then update the page offset.
 833 */
 834static void
 835ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
 836                struct sk_buff *skb, unsigned int size)
 837{
 838#if (PAGE_SIZE >= 8192)
 839        unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
 840#else
 841        unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 842#endif
 843
 844        if (!size)
 845                return;
 846        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
 847                        rx_buf->page_offset, size, truesize);
 848
 849        /* page is being used so we must update the page offset */
 850        ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 851}
 852
 853/**
 854 * ice_reuse_rx_page - page flip buffer and store it back on the ring
 855 * @rx_ring: Rx descriptor ring to store buffers on
 856 * @old_buf: donor buffer to have page reused
 857 *
 858 * Synchronizes page for reuse by the adapter
 859 */
 860static void
 861ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
 862{
 863        u16 nta = rx_ring->next_to_alloc;
 864        struct ice_rx_buf *new_buf;
 865
 866        new_buf = &rx_ring->rx_buf[nta];
 867
 868        /* update, and store next to alloc */
 869        nta++;
 870        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 871
 872        /* Transfer page from old buffer to new buffer.
 873         * Move each member individually to avoid possible store
 874         * forwarding stalls and unnecessary copy of skb.
 875         */
 876        new_buf->dma = old_buf->dma;
 877        new_buf->page = old_buf->page;
 878        new_buf->page_offset = old_buf->page_offset;
 879        new_buf->pagecnt_bias = old_buf->pagecnt_bias;
 880}
 881
 882/**
 883 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
 884 * @rx_ring: Rx descriptor ring to transact packets on
 885 * @size: size of buffer to add to skb
 886 * @rx_buf_pgcnt: rx_buf page refcount
 887 *
 888 * This function will pull an Rx buffer from the ring and synchronize it
 889 * for use by the CPU.
 890 */
 891static struct ice_rx_buf *
 892ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
 893               int *rx_buf_pgcnt)
 894{
 895        struct ice_rx_buf *rx_buf;
 896
 897        rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
 898        *rx_buf_pgcnt =
 899#if (PAGE_SIZE < 8192)
 900                page_count(rx_buf->page);
 901#else
 902                0;
 903#endif
 904        prefetchw(rx_buf->page);
 905
 906        if (!size)
 907                return rx_buf;
 908        /* we are reusing so sync this buffer for CPU use */
 909        dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
 910                                      rx_buf->page_offset, size,
 911                                      DMA_FROM_DEVICE);
 912
 913        /* We have pulled a buffer for use, so decrement pagecnt_bias */
 914        rx_buf->pagecnt_bias--;
 915
 916        return rx_buf;
 917}
 918
 919/**
 920 * ice_build_skb - Build skb around an existing buffer
 921 * @rx_ring: Rx descriptor ring to transact packets on
 922 * @rx_buf: Rx buffer to pull data from
 923 * @xdp: xdp_buff pointing to the data
 924 *
 925 * This function builds an skb around an existing Rx buffer, taking care
 926 * to set up the skb correctly and avoid any memcpy overhead.
 927 */
 928static struct sk_buff *
 929ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
 930              struct xdp_buff *xdp)
 931{
 932        u8 metasize = xdp->data - xdp->data_meta;
 933#if (PAGE_SIZE < 8192)
 934        unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 935#else
 936        unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
 937                                SKB_DATA_ALIGN(xdp->data_end -
 938                                               xdp->data_hard_start);
 939#endif
 940        struct sk_buff *skb;
 941
 942        /* Prefetch first cache line of first page. If xdp->data_meta
 943         * is unused, this points exactly as xdp->data, otherwise we
 944         * likely have a consumer accessing first few bytes of meta
 945         * data, and then actual data.
 946         */
 947        net_prefetch(xdp->data_meta);
 948        /* build an skb around the page buffer */
 949        skb = napi_build_skb(xdp->data_hard_start, truesize);
 950        if (unlikely(!skb))
 951                return NULL;
 952
 953        /* must to record Rx queue, otherwise OS features such as
 954         * symmetric queue won't work
 955         */
 956        skb_record_rx_queue(skb, rx_ring->q_index);
 957
 958        /* update pointers within the skb to store the data */
 959        skb_reserve(skb, xdp->data - xdp->data_hard_start);
 960        __skb_put(skb, xdp->data_end - xdp->data);
 961        if (metasize)
 962                skb_metadata_set(skb, metasize);
 963
 964        /* buffer is used by skb, update page_offset */
 965        ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 966
 967        return skb;
 968}
 969
 970/**
 971 * ice_construct_skb - Allocate skb and populate it
 972 * @rx_ring: Rx descriptor ring to transact packets on
 973 * @rx_buf: Rx buffer to pull data from
 974 * @xdp: xdp_buff pointing to the data
 975 *
 976 * This function allocates an skb. It then populates it with the page
 977 * data from the current receive descriptor, taking care to set up the
 978 * skb correctly.
 979 */
 980static struct sk_buff *
 981ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
 982                  struct xdp_buff *xdp)
 983{
 984        unsigned int metasize = xdp->data - xdp->data_meta;
 985        unsigned int size = xdp->data_end - xdp->data;
 986        unsigned int headlen;
 987        struct sk_buff *skb;
 988
 989        /* prefetch first cache line of first page */
 990        net_prefetch(xdp->data_meta);
 991
 992        /* allocate a skb to store the frags */
 993        skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
 994                               ICE_RX_HDR_SIZE + metasize,
 995                               GFP_ATOMIC | __GFP_NOWARN);
 996        if (unlikely(!skb))
 997                return NULL;
 998
 999        skb_record_rx_queue(skb, rx_ring->q_index);
1000        /* Determine available headroom for copy */
1001        headlen = size;
1002        if (headlen > ICE_RX_HDR_SIZE)
1003                headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
1004
1005        /* align pull length to size of long to optimize memcpy performance */
1006        memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
1007               ALIGN(headlen + metasize, sizeof(long)));
1008
1009        if (metasize) {
1010                skb_metadata_set(skb, metasize);
1011                __skb_pull(skb, metasize);
1012        }
1013
1014        /* if we exhaust the linear part then add what is left as a frag */
1015        size -= headlen;
1016        if (size) {
1017#if (PAGE_SIZE >= 8192)
1018                unsigned int truesize = SKB_DATA_ALIGN(size);
1019#else
1020                unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
1021#endif
1022                skb_add_rx_frag(skb, 0, rx_buf->page,
1023                                rx_buf->page_offset + headlen, size, truesize);
1024                /* buffer is used by skb, update page_offset */
1025                ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1026        } else {
1027                /* buffer is unused, reset bias back to rx_buf; data was copied
1028                 * onto skb's linear part so there's no need for adjusting
1029                 * page offset and we can reuse this buffer as-is
1030                 */
1031                rx_buf->pagecnt_bias++;
1032        }
1033
1034        return skb;
1035}
1036
1037/**
1038 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1039 * @rx_ring: Rx descriptor ring to transact packets on
1040 * @rx_buf: Rx buffer to pull data from
1041 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1042 *
1043 * This function will update next_to_clean and then clean up the contents
1044 * of the rx_buf. It will either recycle the buffer or unmap it and free
1045 * the associated resources.
1046 */
1047static void
1048ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1049               int rx_buf_pgcnt)
1050{
1051        u16 ntc = rx_ring->next_to_clean + 1;
1052
1053        /* fetch, update, and store next to clean */
1054        ntc = (ntc < rx_ring->count) ? ntc : 0;
1055        rx_ring->next_to_clean = ntc;
1056
1057        if (!rx_buf)
1058                return;
1059
1060        if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1061                /* hand second half of page back to the ring */
1062                ice_reuse_rx_page(rx_ring, rx_buf);
1063        } else {
1064                /* we are not reusing the buffer so unmap it */
1065                dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1066                                     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1067                                     ICE_RX_DMA_ATTR);
1068                __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1069        }
1070
1071        /* clear contents of buffer_info */
1072        rx_buf->page = NULL;
1073}
1074
1075/**
1076 * ice_is_non_eop - process handling of non-EOP buffers
1077 * @rx_ring: Rx ring being processed
1078 * @rx_desc: Rx descriptor for current buffer
1079 *
1080 * If the buffer is an EOP buffer, this function exits returning false,
1081 * otherwise return true indicating that this is in fact a non-EOP buffer.
1082 */
1083static bool
1084ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1085{
1086        /* if we are the last buffer then there is nothing else to do */
1087#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1088        if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
1089                return false;
1090
1091        rx_ring->rx_stats.non_eop_descs++;
1092
1093        return true;
1094}
1095
1096/**
1097 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1098 * @rx_ring: Rx descriptor ring to transact packets on
1099 * @budget: Total limit on number of packets to process
1100 *
1101 * This function provides a "bounce buffer" approach to Rx interrupt
1102 * processing. The advantage to this is that on systems that have
1103 * expensive overhead for IOMMU access this provides a means of avoiding
1104 * it by maintaining the mapping of the page to the system.
1105 *
1106 * Returns amount of work completed
1107 */
1108int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1109{
1110        unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1111        u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1112        unsigned int offset = rx_ring->rx_offset;
1113        struct ice_tx_ring *xdp_ring = NULL;
1114        unsigned int xdp_res, xdp_xmit = 0;
1115        struct sk_buff *skb = rx_ring->skb;
1116        struct bpf_prog *xdp_prog = NULL;
1117        struct xdp_buff xdp;
1118        bool failure;
1119
1120        /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1121#if (PAGE_SIZE < 8192)
1122        frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1123#endif
1124        xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1125
1126        xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1127        if (xdp_prog)
1128                xdp_ring = rx_ring->xdp_ring;
1129
1130        /* start the loop to process Rx packets bounded by 'budget' */
1131        while (likely(total_rx_pkts < (unsigned int)budget)) {
1132                union ice_32b_rx_flex_desc *rx_desc;
1133                struct ice_rx_buf *rx_buf;
1134                unsigned char *hard_start;
1135                unsigned int size;
1136                u16 stat_err_bits;
1137                int rx_buf_pgcnt;
1138                u16 vlan_tag = 0;
1139                u16 rx_ptype;
1140
1141                /* get the Rx desc from Rx ring based on 'next_to_clean' */
1142                rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1143
1144                /* status_error_len will always be zero for unused descriptors
1145                 * because it's cleared in cleanup, and overlaps with hdr_addr
1146                 * which is always zero because packet split isn't used, if the
1147                 * hardware wrote DD then it will be non-zero
1148                 */
1149                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1150                if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
1151                        break;
1152
1153                /* This memory barrier is needed to keep us from reading
1154                 * any other fields out of the rx_desc until we know the
1155                 * DD bit is set.
1156                 */
1157                dma_rmb();
1158
1159                ice_trace(clean_rx_irq, rx_ring, rx_desc);
1160                if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1161                        struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1162
1163                        if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1164                            ctrl_vsi->vf)
1165                                ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1166                        ice_put_rx_buf(rx_ring, NULL, 0);
1167                        cleaned_count++;
1168                        continue;
1169                }
1170
1171                size = le16_to_cpu(rx_desc->wb.pkt_len) &
1172                        ICE_RX_FLX_DESC_PKT_LEN_M;
1173
1174                /* retrieve a buffer from the ring */
1175                rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1176
1177                if (!size) {
1178                        xdp.data = NULL;
1179                        xdp.data_end = NULL;
1180                        xdp.data_hard_start = NULL;
1181                        xdp.data_meta = NULL;
1182                        goto construct_skb;
1183                }
1184
1185                hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1186                             offset;
1187                xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1188#if (PAGE_SIZE > 4096)
1189                /* At larger PAGE_SIZE, frame_sz depend on len size */
1190                xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1191#endif
1192
1193                if (!xdp_prog)
1194                        goto construct_skb;
1195
1196                xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
1197                if (!xdp_res)
1198                        goto construct_skb;
1199                if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1200                        xdp_xmit |= xdp_res;
1201                        ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1202                } else {
1203                        rx_buf->pagecnt_bias++;
1204                }
1205                total_rx_bytes += size;
1206                total_rx_pkts++;
1207
1208                cleaned_count++;
1209                ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1210                continue;
1211construct_skb:
1212                if (skb) {
1213                        ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1214                } else if (likely(xdp.data)) {
1215                        if (ice_ring_uses_build_skb(rx_ring))
1216                                skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1217                        else
1218                                skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1219                }
1220                /* exit if we failed to retrieve a buffer */
1221                if (!skb) {
1222                        rx_ring->rx_stats.alloc_buf_failed++;
1223                        if (rx_buf)
1224                                rx_buf->pagecnt_bias++;
1225                        break;
1226                }
1227
1228                ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1229                cleaned_count++;
1230
1231                /* skip if it is NOP desc */
1232                if (ice_is_non_eop(rx_ring, rx_desc))
1233                        continue;
1234
1235                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1236                if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
1237                                              stat_err_bits))) {
1238                        dev_kfree_skb_any(skb);
1239                        continue;
1240                }
1241
1242                vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
1243
1244                /* pad the skb if needed, to make a valid ethernet frame */
1245                if (eth_skb_pad(skb)) {
1246                        skb = NULL;
1247                        continue;
1248                }
1249
1250                /* probably a little skewed due to removing CRC */
1251                total_rx_bytes += skb->len;
1252
1253                /* populate checksum, VLAN, and protocol */
1254                rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1255                        ICE_RX_FLEX_DESC_PTYPE_M;
1256
1257                ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1258
1259                ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1260                /* send completed skb up the stack */
1261                ice_receive_skb(rx_ring, skb, vlan_tag);
1262                skb = NULL;
1263
1264                /* update budget accounting */
1265                total_rx_pkts++;
1266        }
1267
1268        /* return up to cleaned_count buffers to hardware */
1269        failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1270
1271        if (xdp_prog)
1272                ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
1273        rx_ring->skb = skb;
1274
1275        ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1276
1277        /* guarantee a trip back through this routine if there was a failure */
1278        return failure ? budget : (int)total_rx_pkts;
1279}
1280
1281static void __ice_update_sample(struct ice_q_vector *q_vector,
1282                                struct ice_ring_container *rc,
1283                                struct dim_sample *sample,
1284                                bool is_tx)
1285{
1286        u64 packets = 0, bytes = 0;
1287
1288        if (is_tx) {
1289                struct ice_tx_ring *tx_ring;
1290
1291                ice_for_each_tx_ring(tx_ring, *rc) {
1292                        packets += tx_ring->stats.pkts;
1293                        bytes += tx_ring->stats.bytes;
1294                }
1295        } else {
1296                struct ice_rx_ring *rx_ring;
1297
1298                ice_for_each_rx_ring(rx_ring, *rc) {
1299                        packets += rx_ring->stats.pkts;
1300                        bytes += rx_ring->stats.bytes;
1301                }
1302        }
1303
1304        dim_update_sample(q_vector->total_events, packets, bytes, sample);
1305        sample->comp_ctr = 0;
1306
1307        /* if dim settings get stale, like when not updated for 1
1308         * second or longer, force it to start again. This addresses the
1309         * frequent case of an idle queue being switched to by the
1310         * scheduler. The 1,000 here means 1,000 milliseconds.
1311         */
1312        if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
1313                rc->dim.state = DIM_START_MEASURE;
1314}
1315
1316/**
1317 * ice_net_dim - Update net DIM algorithm
1318 * @q_vector: the vector associated with the interrupt
1319 *
1320 * Create a DIM sample and notify net_dim() so that it can possibly decide
1321 * a new ITR value based on incoming packets, bytes, and interrupts.
1322 *
1323 * This function is a no-op if the ring is not configured to dynamic ITR.
1324 */
1325static void ice_net_dim(struct ice_q_vector *q_vector)
1326{
1327        struct ice_ring_container *tx = &q_vector->tx;
1328        struct ice_ring_container *rx = &q_vector->rx;
1329
1330        if (ITR_IS_DYNAMIC(tx)) {
1331                struct dim_sample dim_sample;
1332
1333                __ice_update_sample(q_vector, tx, &dim_sample, true);
1334                net_dim(&tx->dim, dim_sample);
1335        }
1336
1337        if (ITR_IS_DYNAMIC(rx)) {
1338                struct dim_sample dim_sample;
1339
1340                __ice_update_sample(q_vector, rx, &dim_sample, false);
1341                net_dim(&rx->dim, dim_sample);
1342        }
1343}
1344
1345/**
1346 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1347 * @itr_idx: interrupt throttling index
1348 * @itr: interrupt throttling value in usecs
1349 */
1350static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1351{
1352        /* The ITR value is reported in microseconds, and the register value is
1353         * recorded in 2 microsecond units. For this reason we only need to
1354         * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1355         * granularity as a shift instead of division. The mask makes sure the
1356         * ITR value is never odd so we don't accidentally write into the field
1357         * prior to the ITR field.
1358         */
1359        itr &= ICE_ITR_MASK;
1360
1361        return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1362                (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1363                (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1364}
1365
1366/**
1367 * ice_enable_interrupt - re-enable MSI-X interrupt
1368 * @q_vector: the vector associated with the interrupt to enable
1369 *
1370 * If the VSI is down, the interrupt will not be re-enabled. Also,
1371 * when enabling the interrupt always reset the wb_on_itr to false
1372 * and trigger a software interrupt to clean out internal state.
1373 */
1374static void ice_enable_interrupt(struct ice_q_vector *q_vector)
1375{
1376        struct ice_vsi *vsi = q_vector->vsi;
1377        bool wb_en = q_vector->wb_on_itr;
1378        u32 itr_val;
1379
1380        if (test_bit(ICE_DOWN, vsi->state))
1381                return;
1382
1383        /* trigger an ITR delayed software interrupt when exiting busy poll, to
1384         * make sure to catch any pending cleanups that might have been missed
1385         * due to interrupt state transition. If busy poll or poll isn't
1386         * enabled, then don't update ITR, and just enable the interrupt.
1387         */
1388        if (!wb_en) {
1389                itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1390        } else {
1391                q_vector->wb_on_itr = false;
1392
1393                /* do two things here with a single write. Set up the third ITR
1394                 * index to be used for software interrupt moderation, and then
1395                 * trigger a software interrupt with a rate limit of 20K on
1396                 * software interrupts, this will help avoid high interrupt
1397                 * loads due to frequently polling and exiting polling.
1398                 */
1399                itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
1400                itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1401                           ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
1402                           GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1403        }
1404        wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1405}
1406
1407/**
1408 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1409 * @q_vector: q_vector to set WB_ON_ITR on
1410 *
1411 * We need to tell hardware to write-back completed descriptors even when
1412 * interrupts are disabled. Descriptors will be written back on cache line
1413 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1414 * descriptors may not be written back if they don't fill a cache line until
1415 * the next interrupt.
1416 *
1417 * This sets the write-back frequency to whatever was set previously for the
1418 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1419 * aren't meddling with the INTENA_M bit.
1420 */
1421static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1422{
1423        struct ice_vsi *vsi = q_vector->vsi;
1424
1425        /* already in wb_on_itr mode no need to change it */
1426        if (q_vector->wb_on_itr)
1427                return;
1428
1429        /* use previously set ITR values for all of the ITR indices by
1430         * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1431         * be static in non-adaptive mode (user configured)
1432         */
1433        wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1434             ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1435              GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1436             GLINT_DYN_CTL_WB_ON_ITR_M);
1437
1438        q_vector->wb_on_itr = true;
1439}
1440
1441/**
1442 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1443 * @napi: napi struct with our devices info in it
1444 * @budget: amount of work driver is allowed to do this pass, in packets
1445 *
1446 * This function will clean all queues associated with a q_vector.
1447 *
1448 * Returns the amount of work done
1449 */
1450int ice_napi_poll(struct napi_struct *napi, int budget)
1451{
1452        struct ice_q_vector *q_vector =
1453                                container_of(napi, struct ice_q_vector, napi);
1454        struct ice_tx_ring *tx_ring;
1455        struct ice_rx_ring *rx_ring;
1456        bool clean_complete = true;
1457        int budget_per_ring;
1458        int work_done = 0;
1459
1460        /* Since the actual Tx work is minimal, we can give the Tx a larger
1461         * budget and be more aggressive about cleaning up the Tx descriptors.
1462         */
1463        ice_for_each_tx_ring(tx_ring, q_vector->tx) {
1464                bool wd;
1465
1466                if (tx_ring->xsk_pool)
1467                        wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget);
1468                else if (ice_ring_is_xdp(tx_ring))
1469                        wd = true;
1470                else
1471                        wd = ice_clean_tx_irq(tx_ring, budget);
1472
1473                if (!wd)
1474                        clean_complete = false;
1475        }
1476
1477        /* Handle case where we are called by netpoll with a budget of 0 */
1478        if (unlikely(budget <= 0))
1479                return budget;
1480
1481        /* normally we have 1 Rx ring per q_vector */
1482        if (unlikely(q_vector->num_ring_rx > 1))
1483                /* We attempt to distribute budget to each Rx queue fairly, but
1484                 * don't allow the budget to go below 1 because that would exit
1485                 * polling early.
1486                 */
1487                budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1488        else
1489                /* Max of 1 Rx ring in this q_vector so give it the budget */
1490                budget_per_ring = budget;
1491
1492        ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1493                int cleaned;
1494
1495                /* A dedicated path for zero-copy allows making a single
1496                 * comparison in the irq context instead of many inside the
1497                 * ice_clean_rx_irq function and makes the codebase cleaner.
1498                 */
1499                cleaned = rx_ring->xsk_pool ?
1500                          ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
1501                          ice_clean_rx_irq(rx_ring, budget_per_ring);
1502                work_done += cleaned;
1503                /* if we clean as many as budgeted, we must not be done */
1504                if (cleaned >= budget_per_ring)
1505                        clean_complete = false;
1506        }
1507
1508        /* If work not completed, return budget and polling will return */
1509        if (!clean_complete) {
1510                /* Set the writeback on ITR so partial completions of
1511                 * cache-lines will still continue even if we're polling.
1512                 */
1513                ice_set_wb_on_itr(q_vector);
1514                return budget;
1515        }
1516
1517        /* Exit the polling mode, but don't re-enable interrupts if stack might
1518         * poll us due to busy-polling
1519         */
1520        if (napi_complete_done(napi, work_done)) {
1521                ice_net_dim(q_vector);
1522                ice_enable_interrupt(q_vector);
1523        } else {
1524                ice_set_wb_on_itr(q_vector);
1525        }
1526
1527        return min_t(int, work_done, budget - 1);
1528}
1529
1530/**
1531 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1532 * @tx_ring: the ring to be checked
1533 * @size: the size buffer we want to assure is available
1534 *
1535 * Returns -EBUSY if a stop is needed, else 0
1536 */
1537static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1538{
1539        netif_tx_stop_queue(txring_txq(tx_ring));
1540        /* Memory barrier before checking head and tail */
1541        smp_mb();
1542
1543        /* Check again in a case another CPU has just made room available. */
1544        if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1545                return -EBUSY;
1546
1547        /* A reprieve! - use start_queue because it doesn't call schedule */
1548        netif_tx_start_queue(txring_txq(tx_ring));
1549        ++tx_ring->tx_stats.restart_q;
1550        return 0;
1551}
1552
1553/**
1554 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1555 * @tx_ring: the ring to be checked
1556 * @size:    the size buffer we want to assure is available
1557 *
1558 * Returns 0 if stop is not needed
1559 */
1560static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1561{
1562        if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1563                return 0;
1564
1565        return __ice_maybe_stop_tx(tx_ring, size);
1566}
1567
1568/**
1569 * ice_tx_map - Build the Tx descriptor
1570 * @tx_ring: ring to send buffer on
1571 * @first: first buffer info buffer to use
1572 * @off: pointer to struct that holds offload parameters
1573 *
1574 * This function loops over the skb data pointed to by *first
1575 * and gets a physical address for each memory location and programs
1576 * it and the length into the transmit descriptor.
1577 */
1578static void
1579ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1580           struct ice_tx_offload_params *off)
1581{
1582        u64 td_offset, td_tag, td_cmd;
1583        u16 i = tx_ring->next_to_use;
1584        unsigned int data_len, size;
1585        struct ice_tx_desc *tx_desc;
1586        struct ice_tx_buf *tx_buf;
1587        struct sk_buff *skb;
1588        skb_frag_t *frag;
1589        dma_addr_t dma;
1590        bool kick;
1591
1592        td_tag = off->td_l2tag1;
1593        td_cmd = off->td_cmd;
1594        td_offset = off->td_offset;
1595        skb = first->skb;
1596
1597        data_len = skb->data_len;
1598        size = skb_headlen(skb);
1599
1600        tx_desc = ICE_TX_DESC(tx_ring, i);
1601
1602        if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1603                td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1604                td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1605                          ICE_TX_FLAGS_VLAN_S;
1606        }
1607
1608        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1609
1610        tx_buf = first;
1611
1612        for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1613                unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1614
1615                if (dma_mapping_error(tx_ring->dev, dma))
1616                        goto dma_error;
1617
1618                /* record length, and DMA address */
1619                dma_unmap_len_set(tx_buf, len, size);
1620                dma_unmap_addr_set(tx_buf, dma, dma);
1621
1622                /* align size to end of page */
1623                max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1624                tx_desc->buf_addr = cpu_to_le64(dma);
1625
1626                /* account for data chunks larger than the hardware
1627                 * can handle
1628                 */
1629                while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1630                        tx_desc->cmd_type_offset_bsz =
1631                                ice_build_ctob(td_cmd, td_offset, max_data,
1632                                               td_tag);
1633
1634                        tx_desc++;
1635                        i++;
1636
1637                        if (i == tx_ring->count) {
1638                                tx_desc = ICE_TX_DESC(tx_ring, 0);
1639                                i = 0;
1640                        }
1641
1642                        dma += max_data;
1643                        size -= max_data;
1644
1645                        max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1646                        tx_desc->buf_addr = cpu_to_le64(dma);
1647                }
1648
1649                if (likely(!data_len))
1650                        break;
1651
1652                tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1653                                                              size, td_tag);
1654
1655                tx_desc++;
1656                i++;
1657
1658                if (i == tx_ring->count) {
1659                        tx_desc = ICE_TX_DESC(tx_ring, 0);
1660                        i = 0;
1661                }
1662
1663                size = skb_frag_size(frag);
1664                data_len -= size;
1665
1666                dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1667                                       DMA_TO_DEVICE);
1668
1669                tx_buf = &tx_ring->tx_buf[i];
1670        }
1671
1672        /* record SW timestamp if HW timestamp is not available */
1673        skb_tx_timestamp(first->skb);
1674
1675        i++;
1676        if (i == tx_ring->count)
1677                i = 0;
1678
1679        /* write last descriptor with RS and EOP bits */
1680        td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1681        tx_desc->cmd_type_offset_bsz =
1682                        ice_build_ctob(td_cmd, td_offset, size, td_tag);
1683
1684        /* Force memory writes to complete before letting h/w know there
1685         * are new descriptors to fetch.
1686         *
1687         * We also use this memory barrier to make certain all of the
1688         * status bits have been updated before next_to_watch is written.
1689         */
1690        wmb();
1691
1692        /* set next_to_watch value indicating a packet is present */
1693        first->next_to_watch = tx_desc;
1694
1695        tx_ring->next_to_use = i;
1696
1697        ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1698
1699        /* notify HW of packet */
1700        kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
1701                                      netdev_xmit_more());
1702        if (kick)
1703                /* notify HW of packet */
1704                writel(i, tx_ring->tail);
1705
1706        return;
1707
1708dma_error:
1709        /* clear DMA mappings for failed tx_buf map */
1710        for (;;) {
1711                tx_buf = &tx_ring->tx_buf[i];
1712                ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1713                if (tx_buf == first)
1714                        break;
1715                if (i == 0)
1716                        i = tx_ring->count;
1717                i--;
1718        }
1719
1720        tx_ring->next_to_use = i;
1721}
1722
1723/**
1724 * ice_tx_csum - Enable Tx checksum offloads
1725 * @first: pointer to the first descriptor
1726 * @off: pointer to struct that holds offload parameters
1727 *
1728 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1729 */
1730static
1731int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1732{
1733        u32 l4_len = 0, l3_len = 0, l2_len = 0;
1734        struct sk_buff *skb = first->skb;
1735        union {
1736                struct iphdr *v4;
1737                struct ipv6hdr *v6;
1738                unsigned char *hdr;
1739        } ip;
1740        union {
1741                struct tcphdr *tcp;
1742                unsigned char *hdr;
1743        } l4;
1744        __be16 frag_off, protocol;
1745        unsigned char *exthdr;
1746        u32 offset, cmd = 0;
1747        u8 l4_proto = 0;
1748
1749        if (skb->ip_summed != CHECKSUM_PARTIAL)
1750                return 0;
1751
1752        protocol = vlan_get_protocol(skb);
1753
1754        if (eth_p_mpls(protocol)) {
1755                ip.hdr = skb_inner_network_header(skb);
1756                l4.hdr = skb_checksum_start(skb);
1757        } else {
1758                ip.hdr = skb_network_header(skb);
1759                l4.hdr = skb_transport_header(skb);
1760        }
1761
1762        /* compute outer L2 header size */
1763        l2_len = ip.hdr - skb->data;
1764        offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1765
1766        /* set the tx_flags to indicate the IP protocol type. this is
1767         * required so that checksum header computation below is accurate.
1768         */
1769        if (ip.v4->version == 4)
1770                first->tx_flags |= ICE_TX_FLAGS_IPV4;
1771        else if (ip.v6->version == 6)
1772                first->tx_flags |= ICE_TX_FLAGS_IPV6;
1773
1774        if (skb->encapsulation) {
1775                bool gso_ena = false;
1776                u32 tunnel = 0;
1777
1778                /* define outer network header type */
1779                if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1780                        tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1781                                  ICE_TX_CTX_EIPT_IPV4 :
1782                                  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1783                        l4_proto = ip.v4->protocol;
1784                } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1785                        int ret;
1786
1787                        tunnel |= ICE_TX_CTX_EIPT_IPV6;
1788                        exthdr = ip.hdr + sizeof(*ip.v6);
1789                        l4_proto = ip.v6->nexthdr;
1790                        ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1791                                               &l4_proto, &frag_off);
1792                        if (ret < 0)
1793                                return -1;
1794                }
1795
1796                /* define outer transport */
1797                switch (l4_proto) {
1798                case IPPROTO_UDP:
1799                        tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1800                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1801                        break;
1802                case IPPROTO_GRE:
1803                        tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1804                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1805                        break;
1806                case IPPROTO_IPIP:
1807                case IPPROTO_IPV6:
1808                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1809                        l4.hdr = skb_inner_network_header(skb);
1810                        break;
1811                default:
1812                        if (first->tx_flags & ICE_TX_FLAGS_TSO)
1813                                return -1;
1814
1815                        skb_checksum_help(skb);
1816                        return 0;
1817                }
1818
1819                /* compute outer L3 header size */
1820                tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1821                          ICE_TXD_CTX_QW0_EIPLEN_S;
1822
1823                /* switch IP header pointer from outer to inner header */
1824                ip.hdr = skb_inner_network_header(skb);
1825
1826                /* compute tunnel header size */
1827                tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1828                           ICE_TXD_CTX_QW0_NATLEN_S;
1829
1830                gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1831                /* indicate if we need to offload outer UDP header */
1832                if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1833                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1834                        tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1835
1836                /* record tunnel offload values */
1837                off->cd_tunnel_params |= tunnel;
1838
1839                /* set DTYP=1 to indicate that it's an Tx context descriptor
1840                 * in IPsec tunnel mode with Tx offloads in Quad word 1
1841                 */
1842                off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1843
1844                /* switch L4 header pointer from outer to inner */
1845                l4.hdr = skb_inner_transport_header(skb);
1846                l4_proto = 0;
1847
1848                /* reset type as we transition from outer to inner headers */
1849                first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1850                if (ip.v4->version == 4)
1851                        first->tx_flags |= ICE_TX_FLAGS_IPV4;
1852                if (ip.v6->version == 6)
1853                        first->tx_flags |= ICE_TX_FLAGS_IPV6;
1854        }
1855
1856        /* Enable IP checksum offloads */
1857        if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1858                l4_proto = ip.v4->protocol;
1859                /* the stack computes the IP header already, the only time we
1860                 * need the hardware to recompute it is in the case of TSO.
1861                 */
1862                if (first->tx_flags & ICE_TX_FLAGS_TSO)
1863                        cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1864                else
1865                        cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1866
1867        } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1868                cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1869                exthdr = ip.hdr + sizeof(*ip.v6);
1870                l4_proto = ip.v6->nexthdr;
1871                if (l4.hdr != exthdr)
1872                        ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1873                                         &frag_off);
1874        } else {
1875                return -1;
1876        }
1877
1878        /* compute inner L3 header size */
1879        l3_len = l4.hdr - ip.hdr;
1880        offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1881
1882        /* Enable L4 checksum offloads */
1883        switch (l4_proto) {
1884        case IPPROTO_TCP:
1885                /* enable checksum offloads */
1886                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1887                l4_len = l4.tcp->doff;
1888                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1889                break;
1890        case IPPROTO_UDP:
1891                /* enable UDP checksum offload */
1892                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1893                l4_len = (sizeof(struct udphdr) >> 2);
1894                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1895                break;
1896        case IPPROTO_SCTP:
1897                /* enable SCTP checksum offload */
1898                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1899                l4_len = sizeof(struct sctphdr) >> 2;
1900                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1901                break;
1902
1903        default:
1904                if (first->tx_flags & ICE_TX_FLAGS_TSO)
1905                        return -1;
1906                skb_checksum_help(skb);
1907                return 0;
1908        }
1909
1910        off->td_cmd |= cmd;
1911        off->td_offset |= offset;
1912        return 1;
1913}
1914
1915/**
1916 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1917 * @tx_ring: ring to send buffer on
1918 * @first: pointer to struct ice_tx_buf
1919 *
1920 * Checks the skb and set up correspondingly several generic transmit flags
1921 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1922 */
1923static void
1924ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1925{
1926        struct sk_buff *skb = first->skb;
1927
1928        /* nothing left to do, software offloaded VLAN */
1929        if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1930                return;
1931
1932        /* the VLAN ethertype/tpid is determined by VSI configuration and netdev
1933         * feature flags, which the driver only allows either 802.1Q or 802.1ad
1934         * VLAN offloads exclusively so we only care about the VLAN ID here
1935         */
1936        if (skb_vlan_tag_present(skb)) {
1937                first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1938                if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
1939                        first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
1940                else
1941                        first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1942        }
1943
1944        ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1945}
1946
1947/**
1948 * ice_tso - computes mss and TSO length to prepare for TSO
1949 * @first: pointer to struct ice_tx_buf
1950 * @off: pointer to struct that holds offload parameters
1951 *
1952 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1953 */
1954static
1955int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1956{
1957        struct sk_buff *skb = first->skb;
1958        union {
1959                struct iphdr *v4;
1960                struct ipv6hdr *v6;
1961                unsigned char *hdr;
1962        } ip;
1963        union {
1964                struct tcphdr *tcp;
1965                struct udphdr *udp;
1966                unsigned char *hdr;
1967        } l4;
1968        u64 cd_mss, cd_tso_len;
1969        __be16 protocol;
1970        u32 paylen;
1971        u8 l4_start;
1972        int err;
1973
1974        if (skb->ip_summed != CHECKSUM_PARTIAL)
1975                return 0;
1976
1977        if (!skb_is_gso(skb))
1978                return 0;
1979
1980        err = skb_cow_head(skb, 0);
1981        if (err < 0)
1982                return err;
1983
1984        /* cppcheck-suppress unreadVariable */
1985        protocol = vlan_get_protocol(skb);
1986
1987        if (eth_p_mpls(protocol))
1988                ip.hdr = skb_inner_network_header(skb);
1989        else
1990                ip.hdr = skb_network_header(skb);
1991        l4.hdr = skb_checksum_start(skb);
1992
1993        /* initialize outer IP header fields */
1994        if (ip.v4->version == 4) {
1995                ip.v4->tot_len = 0;
1996                ip.v4->check = 0;
1997        } else {
1998                ip.v6->payload_len = 0;
1999        }
2000
2001        if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2002                                         SKB_GSO_GRE_CSUM |
2003                                         SKB_GSO_IPXIP4 |
2004                                         SKB_GSO_IPXIP6 |
2005                                         SKB_GSO_UDP_TUNNEL |
2006                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
2007                if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2008                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2009                        l4.udp->len = 0;
2010
2011                        /* determine offset of outer transport header */
2012                        l4_start = (u8)(l4.hdr - skb->data);
2013
2014                        /* remove payload length from outer checksum */
2015                        paylen = skb->len - l4_start;
2016                        csum_replace_by_diff(&l4.udp->check,
2017                                             (__force __wsum)htonl(paylen));
2018                }
2019
2020                /* reset pointers to inner headers */
2021
2022                /* cppcheck-suppress unreadVariable */
2023                ip.hdr = skb_inner_network_header(skb);
2024                l4.hdr = skb_inner_transport_header(skb);
2025
2026                /* initialize inner IP header fields */
2027                if (ip.v4->version == 4) {
2028                        ip.v4->tot_len = 0;
2029                        ip.v4->check = 0;
2030                } else {
2031                        ip.v6->payload_len = 0;
2032                }
2033        }
2034
2035        /* determine offset of transport header */
2036        l4_start = (u8)(l4.hdr - skb->data);
2037
2038        /* remove payload length from checksum */
2039        paylen = skb->len - l4_start;
2040
2041        if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2042                csum_replace_by_diff(&l4.udp->check,
2043                                     (__force __wsum)htonl(paylen));
2044                /* compute length of UDP segmentation header */
2045                off->header_len = (u8)sizeof(l4.udp) + l4_start;
2046        } else {
2047                csum_replace_by_diff(&l4.tcp->check,
2048                                     (__force __wsum)htonl(paylen));
2049                /* compute length of TCP segmentation header */
2050                off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2051        }
2052
2053        /* update gso_segs and bytecount */
2054        first->gso_segs = skb_shinfo(skb)->gso_segs;
2055        first->bytecount += (first->gso_segs - 1) * off->header_len;
2056
2057        cd_tso_len = skb->len - off->header_len;
2058        cd_mss = skb_shinfo(skb)->gso_size;
2059
2060        /* record cdesc_qw1 with TSO parameters */
2061        off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2062                             (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2063                             (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2064                             (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2065        first->tx_flags |= ICE_TX_FLAGS_TSO;
2066        return 1;
2067}
2068
2069/**
2070 * ice_txd_use_count  - estimate the number of descriptors needed for Tx
2071 * @size: transmit request size in bytes
2072 *
2073 * Due to hardware alignment restrictions (4K alignment), we need to
2074 * assume that we can have no more than 12K of data per descriptor, even
2075 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2076 * Thus, we need to divide by 12K. But division is slow! Instead,
2077 * we decompose the operation into shifts and one relatively cheap
2078 * multiply operation.
2079 *
2080 * To divide by 12K, we first divide by 4K, then divide by 3:
2081 *     To divide by 4K, shift right by 12 bits
2082 *     To divide by 3, multiply by 85, then divide by 256
2083 *     (Divide by 256 is done by shifting right by 8 bits)
2084 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2085 * 3, we'll underestimate near each multiple of 12K. This is actually more
2086 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2087 * segment. For our purposes this is accurate out to 1M which is orders of
2088 * magnitude greater than our largest possible GSO size.
2089 *
2090 * This would then be implemented as:
2091 *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2092 *
2093 * Since multiplication and division are commutative, we can reorder
2094 * operations into:
2095 *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2096 */
2097static unsigned int ice_txd_use_count(unsigned int size)
2098{
2099        return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2100}
2101
2102/**
2103 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2104 * @skb: send buffer
2105 *
2106 * Returns number of data descriptors needed for this skb.
2107 */
2108static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2109{
2110        const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2111        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2112        unsigned int count = 0, size = skb_headlen(skb);
2113
2114        for (;;) {
2115                count += ice_txd_use_count(size);
2116
2117                if (!nr_frags--)
2118                        break;
2119
2120                size = skb_frag_size(frag++);
2121        }
2122
2123        return count;
2124}
2125
2126/**
2127 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2128 * @skb: send buffer
2129 *
2130 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2131 * and so we need to figure out the cases where we need to linearize the skb.
2132 *
2133 * For TSO we need to count the TSO header and segment payload separately.
2134 * As such we need to check cases where we have 7 fragments or more as we
2135 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2136 * the segment payload in the first descriptor, and another 7 for the
2137 * fragments.
2138 */
2139static bool __ice_chk_linearize(struct sk_buff *skb)
2140{
2141        const skb_frag_t *frag, *stale;
2142        int nr_frags, sum;
2143
2144        /* no need to check if number of frags is less than 7 */
2145        nr_frags = skb_shinfo(skb)->nr_frags;
2146        if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2147                return false;
2148
2149        /* We need to walk through the list and validate that each group
2150         * of 6 fragments totals at least gso_size.
2151         */
2152        nr_frags -= ICE_MAX_BUF_TXD - 2;
2153        frag = &skb_shinfo(skb)->frags[0];
2154
2155        /* Initialize size to the negative value of gso_size minus 1. We
2156         * use this as the worst case scenario in which the frag ahead
2157         * of us only provides one byte which is why we are limited to 6
2158         * descriptors for a single transmit as the header and previous
2159         * fragment are already consuming 2 descriptors.
2160         */
2161        sum = 1 - skb_shinfo(skb)->gso_size;
2162
2163        /* Add size of frags 0 through 4 to create our initial sum */
2164        sum += skb_frag_size(frag++);
2165        sum += skb_frag_size(frag++);
2166        sum += skb_frag_size(frag++);
2167        sum += skb_frag_size(frag++);
2168        sum += skb_frag_size(frag++);
2169
2170        /* Walk through fragments adding latest fragment, testing it, and
2171         * then removing stale fragments from the sum.
2172         */
2173        for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2174                int stale_size = skb_frag_size(stale);
2175
2176                sum += skb_frag_size(frag++);
2177
2178                /* The stale fragment may present us with a smaller
2179                 * descriptor than the actual fragment size. To account
2180                 * for that we need to remove all the data on the front and
2181                 * figure out what the remainder would be in the last
2182                 * descriptor associated with the fragment.
2183                 */
2184                if (stale_size > ICE_MAX_DATA_PER_TXD) {
2185                        int align_pad = -(skb_frag_off(stale)) &
2186                                        (ICE_MAX_READ_REQ_SIZE - 1);
2187
2188                        sum -= align_pad;
2189                        stale_size -= align_pad;
2190
2191                        do {
2192                                sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2193                                stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2194                        } while (stale_size > ICE_MAX_DATA_PER_TXD);
2195                }
2196
2197                /* if sum is negative we failed to make sufficient progress */
2198                if (sum < 0)
2199                        return true;
2200
2201                if (!nr_frags--)
2202                        break;
2203
2204                sum -= stale_size;
2205        }
2206
2207        return false;
2208}
2209
2210/**
2211 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2212 * @skb:      send buffer
2213 * @count:    number of buffers used
2214 *
2215 * Note: Our HW can't scatter-gather more than 8 fragments to build
2216 * a packet on the wire and so we need to figure out the cases where we
2217 * need to linearize the skb.
2218 */
2219static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2220{
2221        /* Both TSO and single send will work if count is less than 8 */
2222        if (likely(count < ICE_MAX_BUF_TXD))
2223                return false;
2224
2225        if (skb_is_gso(skb))
2226                return __ice_chk_linearize(skb);
2227
2228        /* we can support up to 8 data buffers for a single send */
2229        return count != ICE_MAX_BUF_TXD;
2230}
2231
2232/**
2233 * ice_tstamp - set up context descriptor for hardware timestamp
2234 * @tx_ring: pointer to the Tx ring to send buffer on
2235 * @skb: pointer to the SKB we're sending
2236 * @first: Tx buffer
2237 * @off: Tx offload parameters
2238 */
2239static void
2240ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2241           struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2242{
2243        s8 idx;
2244
2245        /* only timestamp the outbound packet if the user has requested it */
2246        if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2247                return;
2248
2249        if (!tx_ring->ptp_tx)
2250                return;
2251
2252        /* Tx timestamps cannot be sampled when doing TSO */
2253        if (first->tx_flags & ICE_TX_FLAGS_TSO)
2254                return;
2255
2256        /* Grab an open timestamp slot */
2257        idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2258        if (idx < 0)
2259                return;
2260
2261        off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2262                             (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2263                             ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2264        first->tx_flags |= ICE_TX_FLAGS_TSYN;
2265}
2266
2267/**
2268 * ice_xmit_frame_ring - Sends buffer on Tx ring
2269 * @skb: send buffer
2270 * @tx_ring: ring to send buffer on
2271 *
2272 * Returns NETDEV_TX_OK if sent, else an error code
2273 */
2274static netdev_tx_t
2275ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2276{
2277        struct ice_tx_offload_params offload = { 0 };
2278        struct ice_vsi *vsi = tx_ring->vsi;
2279        struct ice_tx_buf *first;
2280        struct ethhdr *eth;
2281        unsigned int count;
2282        int tso, csum;
2283
2284        ice_trace(xmit_frame_ring, tx_ring, skb);
2285
2286        count = ice_xmit_desc_count(skb);
2287        if (ice_chk_linearize(skb, count)) {
2288                if (__skb_linearize(skb))
2289                        goto out_drop;
2290                count = ice_txd_use_count(skb->len);
2291                tx_ring->tx_stats.tx_linearize++;
2292        }
2293
2294        /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2295         *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2296         *       + 4 desc gap to avoid the cache line where head is,
2297         *       + 1 desc for context descriptor,
2298         * otherwise try next time
2299         */
2300        if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2301                              ICE_DESCS_FOR_CTX_DESC)) {
2302                tx_ring->tx_stats.tx_busy++;
2303                return NETDEV_TX_BUSY;
2304        }
2305
2306        /* prefetch for bql data which is infrequently used */
2307        netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring));
2308
2309        offload.tx_ring = tx_ring;
2310
2311        /* record the location of the first descriptor for this packet */
2312        first = &tx_ring->tx_buf[tx_ring->next_to_use];
2313        first->skb = skb;
2314        first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2315        first->gso_segs = 1;
2316        first->tx_flags = 0;
2317
2318        /* prepare the VLAN tagging flags for Tx */
2319        ice_tx_prepare_vlan_flags(tx_ring, first);
2320        if (first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) {
2321                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2322                                        (ICE_TX_CTX_DESC_IL2TAG2 <<
2323                                        ICE_TXD_CTX_QW1_CMD_S));
2324                offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
2325                        ICE_TX_FLAGS_VLAN_S;
2326        }
2327
2328        /* set up TSO offload */
2329        tso = ice_tso(first, &offload);
2330        if (tso < 0)
2331                goto out_drop;
2332
2333        /* always set up Tx checksum offload */
2334        csum = ice_tx_csum(first, &offload);
2335        if (csum < 0)
2336                goto out_drop;
2337
2338        /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2339        eth = (struct ethhdr *)skb_mac_header(skb);
2340        if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2341                      eth->h_proto == htons(ETH_P_LLDP)) &&
2342                     vsi->type == ICE_VSI_PF &&
2343                     vsi->port_info->qos_cfg.is_sw_lldp))
2344                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2345                                        ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2346                                        ICE_TXD_CTX_QW1_CMD_S);
2347
2348        ice_tstamp(tx_ring, skb, first, &offload);
2349        if (ice_is_switchdev_running(vsi->back))
2350                ice_eswitch_set_target_vsi(skb, &offload);
2351
2352        if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2353                struct ice_tx_ctx_desc *cdesc;
2354                u16 i = tx_ring->next_to_use;
2355
2356                /* grab the next descriptor */
2357                cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2358                i++;
2359                tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2360
2361                /* setup context descriptor */
2362                cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2363                cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2364                cdesc->rsvd = cpu_to_le16(0);
2365                cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2366        }
2367
2368        ice_tx_map(tx_ring, first, &offload);
2369        return NETDEV_TX_OK;
2370
2371out_drop:
2372        ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2373        dev_kfree_skb_any(skb);
2374        return NETDEV_TX_OK;
2375}
2376
2377/**
2378 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2379 * @skb: send buffer
2380 * @netdev: network interface device structure
2381 *
2382 * Returns NETDEV_TX_OK if sent, else an error code
2383 */
2384netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2385{
2386        struct ice_netdev_priv *np = netdev_priv(netdev);
2387        struct ice_vsi *vsi = np->vsi;
2388        struct ice_tx_ring *tx_ring;
2389
2390        tx_ring = vsi->tx_rings[skb->queue_mapping];
2391
2392        /* hardware can't handle really short frames, hardware padding works
2393         * beyond this point
2394         */
2395        if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2396                return NETDEV_TX_OK;
2397
2398        return ice_xmit_frame_ring(skb, tx_ring);
2399}
2400
2401/**
2402 * ice_get_dscp_up - return the UP/TC value for a SKB
2403 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
2404 * @skb: SKB to query for info to determine UP/TC
2405 *
2406 * This function is to only be called when the PF is in L3 DSCP PFC mode
2407 */
2408static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
2409{
2410        u8 dscp = 0;
2411
2412        if (skb->protocol == htons(ETH_P_IP))
2413                dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
2414        else if (skb->protocol == htons(ETH_P_IPV6))
2415                dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
2416
2417        return dcbcfg->dscp_map[dscp];
2418}
2419
2420u16
2421ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
2422                 struct net_device *sb_dev)
2423{
2424        struct ice_pf *pf = ice_netdev_to_pf(netdev);
2425        struct ice_dcbx_cfg *dcbcfg;
2426
2427        dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
2428        if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
2429                skb->priority = ice_get_dscp_up(dcbcfg, skb);
2430
2431        return netdev_pick_tx(netdev, skb, sb_dev);
2432}
2433
2434/**
2435 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2436 * @tx_ring: tx_ring to clean
2437 */
2438void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2439{
2440        struct ice_vsi *vsi = tx_ring->vsi;
2441        s16 i = tx_ring->next_to_clean;
2442        int budget = ICE_DFLT_IRQ_WORK;
2443        struct ice_tx_desc *tx_desc;
2444        struct ice_tx_buf *tx_buf;
2445
2446        tx_buf = &tx_ring->tx_buf[i];
2447        tx_desc = ICE_TX_DESC(tx_ring, i);
2448        i -= tx_ring->count;
2449
2450        do {
2451                struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2452
2453                /* if next_to_watch is not set then there is no pending work */
2454                if (!eop_desc)
2455                        break;
2456
2457                /* prevent any other reads prior to eop_desc */
2458                smp_rmb();
2459
2460                /* if the descriptor isn't done, no work to do */
2461                if (!(eop_desc->cmd_type_offset_bsz &
2462                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2463                        break;
2464
2465                /* clear next_to_watch to prevent false hangs */
2466                tx_buf->next_to_watch = NULL;
2467                tx_desc->buf_addr = 0;
2468                tx_desc->cmd_type_offset_bsz = 0;
2469
2470                /* move past filter desc */
2471                tx_buf++;
2472                tx_desc++;
2473                i++;
2474                if (unlikely(!i)) {
2475                        i -= tx_ring->count;
2476                        tx_buf = tx_ring->tx_buf;
2477                        tx_desc = ICE_TX_DESC(tx_ring, 0);
2478                }
2479
2480                /* unmap the data header */
2481                if (dma_unmap_len(tx_buf, len))
2482                        dma_unmap_single(tx_ring->dev,
2483                                         dma_unmap_addr(tx_buf, dma),
2484                                         dma_unmap_len(tx_buf, len),
2485                                         DMA_TO_DEVICE);
2486                if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2487                        devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2488
2489                /* clear next_to_watch to prevent false hangs */
2490                tx_buf->raw_buf = NULL;
2491                tx_buf->tx_flags = 0;
2492                tx_buf->next_to_watch = NULL;
2493                dma_unmap_len_set(tx_buf, len, 0);
2494                tx_desc->buf_addr = 0;
2495                tx_desc->cmd_type_offset_bsz = 0;
2496
2497                /* move past eop_desc for start of next FD desc */
2498                tx_buf++;
2499                tx_desc++;
2500                i++;
2501                if (unlikely(!i)) {
2502                        i -= tx_ring->count;
2503                        tx_buf = tx_ring->tx_buf;
2504                        tx_desc = ICE_TX_DESC(tx_ring, 0);
2505                }
2506
2507                budget--;
2508        } while (likely(budget));
2509
2510        i += tx_ring->count;
2511        tx_ring->next_to_clean = i;
2512
2513        /* re-enable interrupt if needed */
2514        ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2515}
2516