linux/drivers/net/ethernet/intel/ice/ice_txrx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4/* The driver transmit and receive code */
   5
   6#include <linux/prefetch.h>
   7#include <linux/mm.h>
   8#include <linux/bpf_trace.h>
   9#include <net/xdp.h>
  10#include "ice_txrx_lib.h"
  11#include "ice_lib.h"
  12#include "ice.h"
  13#include "ice_trace.h"
  14#include "ice_dcb_lib.h"
  15#include "ice_xsk.h"
  16
  17#define ICE_RX_HDR_SIZE         256
  18
  19#define FDIR_DESC_RXDID 0x40
  20#define ICE_FDIR_CLEAN_DELAY 10
  21
  22/**
  23 * ice_prgm_fdir_fltr - Program a Flow Director filter
  24 * @vsi: VSI to send dummy packet
  25 * @fdir_desc: flow director descriptor
  26 * @raw_packet: allocated buffer for flow director
  27 */
  28int
  29ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
  30                   u8 *raw_packet)
  31{
  32        struct ice_tx_buf *tx_buf, *first;
  33        struct ice_fltr_desc *f_desc;
  34        struct ice_tx_desc *tx_desc;
  35        struct ice_ring *tx_ring;
  36        struct device *dev;
  37        dma_addr_t dma;
  38        u32 td_cmd;
  39        u16 i;
  40
  41        /* VSI and Tx ring */
  42        if (!vsi)
  43                return -ENOENT;
  44        tx_ring = vsi->tx_rings[0];
  45        if (!tx_ring || !tx_ring->desc)
  46                return -ENOENT;
  47        dev = tx_ring->dev;
  48
  49        /* we are using two descriptors to add/del a filter and we can wait */
  50        for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
  51                if (!i)
  52                        return -EAGAIN;
  53                msleep_interruptible(1);
  54        }
  55
  56        dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
  57                             DMA_TO_DEVICE);
  58
  59        if (dma_mapping_error(dev, dma))
  60                return -EINVAL;
  61
  62        /* grab the next descriptor */
  63        i = tx_ring->next_to_use;
  64        first = &tx_ring->tx_buf[i];
  65        f_desc = ICE_TX_FDIRDESC(tx_ring, i);
  66        memcpy(f_desc, fdir_desc, sizeof(*f_desc));
  67
  68        i++;
  69        i = (i < tx_ring->count) ? i : 0;
  70        tx_desc = ICE_TX_DESC(tx_ring, i);
  71        tx_buf = &tx_ring->tx_buf[i];
  72
  73        i++;
  74        tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  75
  76        memset(tx_buf, 0, sizeof(*tx_buf));
  77        dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
  78        dma_unmap_addr_set(tx_buf, dma, dma);
  79
  80        tx_desc->buf_addr = cpu_to_le64(dma);
  81        td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
  82                 ICE_TX_DESC_CMD_RE;
  83
  84        tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
  85        tx_buf->raw_buf = raw_packet;
  86
  87        tx_desc->cmd_type_offset_bsz =
  88                ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
  89
  90        /* Force memory write to complete before letting h/w know
  91         * there are new descriptors to fetch.
  92         */
  93        wmb();
  94
  95        /* mark the data descriptor to be watched */
  96        first->next_to_watch = tx_desc;
  97
  98        writel(tx_ring->next_to_use, tx_ring->tail);
  99
 100        return 0;
 101}
 102
 103/**
 104 * ice_unmap_and_free_tx_buf - Release a Tx buffer
 105 * @ring: the ring that owns the buffer
 106 * @tx_buf: the buffer to free
 107 */
 108static void
 109ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
 110{
 111        if (tx_buf->skb) {
 112                if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
 113                        devm_kfree(ring->dev, tx_buf->raw_buf);
 114                else if (ice_ring_is_xdp(ring))
 115                        page_frag_free(tx_buf->raw_buf);
 116                else
 117                        dev_kfree_skb_any(tx_buf->skb);
 118                if (dma_unmap_len(tx_buf, len))
 119                        dma_unmap_single(ring->dev,
 120                                         dma_unmap_addr(tx_buf, dma),
 121                                         dma_unmap_len(tx_buf, len),
 122                                         DMA_TO_DEVICE);
 123        } else if (dma_unmap_len(tx_buf, len)) {
 124                dma_unmap_page(ring->dev,
 125                               dma_unmap_addr(tx_buf, dma),
 126                               dma_unmap_len(tx_buf, len),
 127                               DMA_TO_DEVICE);
 128        }
 129
 130        tx_buf->next_to_watch = NULL;
 131        tx_buf->skb = NULL;
 132        dma_unmap_len_set(tx_buf, len, 0);
 133        /* tx_buf must be completely set up in the transmit path */
 134}
 135
 136static struct netdev_queue *txring_txq(const struct ice_ring *ring)
 137{
 138        return netdev_get_tx_queue(ring->netdev, ring->q_index);
 139}
 140
 141/**
 142 * ice_clean_tx_ring - Free any empty Tx buffers
 143 * @tx_ring: ring to be cleaned
 144 */
 145void ice_clean_tx_ring(struct ice_ring *tx_ring)
 146{
 147        u16 i;
 148
 149        if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
 150                ice_xsk_clean_xdp_ring(tx_ring);
 151                goto tx_skip_free;
 152        }
 153
 154        /* ring already cleared, nothing to do */
 155        if (!tx_ring->tx_buf)
 156                return;
 157
 158        /* Free all the Tx ring sk_buffs */
 159        for (i = 0; i < tx_ring->count; i++)
 160                ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
 161
 162tx_skip_free:
 163        memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
 164
 165        /* Zero out the descriptor ring */
 166        memset(tx_ring->desc, 0, tx_ring->size);
 167
 168        tx_ring->next_to_use = 0;
 169        tx_ring->next_to_clean = 0;
 170
 171        if (!tx_ring->netdev)
 172                return;
 173
 174        /* cleanup Tx queue statistics */
 175        netdev_tx_reset_queue(txring_txq(tx_ring));
 176}
 177
 178/**
 179 * ice_free_tx_ring - Free Tx resources per queue
 180 * @tx_ring: Tx descriptor ring for a specific queue
 181 *
 182 * Free all transmit software resources
 183 */
 184void ice_free_tx_ring(struct ice_ring *tx_ring)
 185{
 186        ice_clean_tx_ring(tx_ring);
 187        devm_kfree(tx_ring->dev, tx_ring->tx_buf);
 188        tx_ring->tx_buf = NULL;
 189
 190        if (tx_ring->desc) {
 191                dmam_free_coherent(tx_ring->dev, tx_ring->size,
 192                                   tx_ring->desc, tx_ring->dma);
 193                tx_ring->desc = NULL;
 194        }
 195}
 196
 197/**
 198 * ice_clean_tx_irq - Reclaim resources after transmit completes
 199 * @tx_ring: Tx ring to clean
 200 * @napi_budget: Used to determine if we are in netpoll
 201 *
 202 * Returns true if there's any budget left (e.g. the clean is finished)
 203 */
 204static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
 205{
 206        unsigned int total_bytes = 0, total_pkts = 0;
 207        unsigned int budget = ICE_DFLT_IRQ_WORK;
 208        struct ice_vsi *vsi = tx_ring->vsi;
 209        s16 i = tx_ring->next_to_clean;
 210        struct ice_tx_desc *tx_desc;
 211        struct ice_tx_buf *tx_buf;
 212
 213        tx_buf = &tx_ring->tx_buf[i];
 214        tx_desc = ICE_TX_DESC(tx_ring, i);
 215        i -= tx_ring->count;
 216
 217        prefetch(&vsi->state);
 218
 219        do {
 220                struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
 221
 222                /* if next_to_watch is not set then there is no work pending */
 223                if (!eop_desc)
 224                        break;
 225
 226                smp_rmb();      /* prevent any other reads prior to eop_desc */
 227
 228                ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
 229                /* if the descriptor isn't done, no work yet to do */
 230                if (!(eop_desc->cmd_type_offset_bsz &
 231                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
 232                        break;
 233
 234                /* clear next_to_watch to prevent false hangs */
 235                tx_buf->next_to_watch = NULL;
 236
 237                /* update the statistics for this packet */
 238                total_bytes += tx_buf->bytecount;
 239                total_pkts += tx_buf->gso_segs;
 240
 241                if (ice_ring_is_xdp(tx_ring))
 242                        page_frag_free(tx_buf->raw_buf);
 243                else
 244                        /* free the skb */
 245                        napi_consume_skb(tx_buf->skb, napi_budget);
 246
 247                /* unmap skb header data */
 248                dma_unmap_single(tx_ring->dev,
 249                                 dma_unmap_addr(tx_buf, dma),
 250                                 dma_unmap_len(tx_buf, len),
 251                                 DMA_TO_DEVICE);
 252
 253                /* clear tx_buf data */
 254                tx_buf->skb = NULL;
 255                dma_unmap_len_set(tx_buf, len, 0);
 256
 257                /* unmap remaining buffers */
 258                while (tx_desc != eop_desc) {
 259                        ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
 260                        tx_buf++;
 261                        tx_desc++;
 262                        i++;
 263                        if (unlikely(!i)) {
 264                                i -= tx_ring->count;
 265                                tx_buf = tx_ring->tx_buf;
 266                                tx_desc = ICE_TX_DESC(tx_ring, 0);
 267                        }
 268
 269                        /* unmap any remaining paged data */
 270                        if (dma_unmap_len(tx_buf, len)) {
 271                                dma_unmap_page(tx_ring->dev,
 272                                               dma_unmap_addr(tx_buf, dma),
 273                                               dma_unmap_len(tx_buf, len),
 274                                               DMA_TO_DEVICE);
 275                                dma_unmap_len_set(tx_buf, len, 0);
 276                        }
 277                }
 278                ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
 279
 280                /* move us one more past the eop_desc for start of next pkt */
 281                tx_buf++;
 282                tx_desc++;
 283                i++;
 284                if (unlikely(!i)) {
 285                        i -= tx_ring->count;
 286                        tx_buf = tx_ring->tx_buf;
 287                        tx_desc = ICE_TX_DESC(tx_ring, 0);
 288                }
 289
 290                prefetch(tx_desc);
 291
 292                /* update budget accounting */
 293                budget--;
 294        } while (likely(budget));
 295
 296        i += tx_ring->count;
 297        tx_ring->next_to_clean = i;
 298
 299        ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
 300
 301        if (ice_ring_is_xdp(tx_ring))
 302                return !!budget;
 303
 304        netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
 305                                  total_bytes);
 306
 307#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
 308        if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
 309                     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
 310                /* Make sure that anybody stopping the queue after this
 311                 * sees the new next_to_clean.
 312                 */
 313                smp_mb();
 314                if (__netif_subqueue_stopped(tx_ring->netdev,
 315                                             tx_ring->q_index) &&
 316                    !test_bit(ICE_VSI_DOWN, vsi->state)) {
 317                        netif_wake_subqueue(tx_ring->netdev,
 318                                            tx_ring->q_index);
 319                        ++tx_ring->tx_stats.restart_q;
 320                }
 321        }
 322
 323        return !!budget;
 324}
 325
 326/**
 327 * ice_setup_tx_ring - Allocate the Tx descriptors
 328 * @tx_ring: the Tx ring to set up
 329 *
 330 * Return 0 on success, negative on error
 331 */
 332int ice_setup_tx_ring(struct ice_ring *tx_ring)
 333{
 334        struct device *dev = tx_ring->dev;
 335
 336        if (!dev)
 337                return -ENOMEM;
 338
 339        /* warn if we are about to overwrite the pointer */
 340        WARN_ON(tx_ring->tx_buf);
 341        tx_ring->tx_buf =
 342                devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
 343                             GFP_KERNEL);
 344        if (!tx_ring->tx_buf)
 345                return -ENOMEM;
 346
 347        /* round up to nearest page */
 348        tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
 349                              PAGE_SIZE);
 350        tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
 351                                            GFP_KERNEL);
 352        if (!tx_ring->desc) {
 353                dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
 354                        tx_ring->size);
 355                goto err;
 356        }
 357
 358        tx_ring->next_to_use = 0;
 359        tx_ring->next_to_clean = 0;
 360        tx_ring->tx_stats.prev_pkt = -1;
 361        return 0;
 362
 363err:
 364        devm_kfree(dev, tx_ring->tx_buf);
 365        tx_ring->tx_buf = NULL;
 366        return -ENOMEM;
 367}
 368
 369/**
 370 * ice_clean_rx_ring - Free Rx buffers
 371 * @rx_ring: ring to be cleaned
 372 */
 373void ice_clean_rx_ring(struct ice_ring *rx_ring)
 374{
 375        struct device *dev = rx_ring->dev;
 376        u16 i;
 377
 378        /* ring already cleared, nothing to do */
 379        if (!rx_ring->rx_buf)
 380                return;
 381
 382        if (rx_ring->skb) {
 383                dev_kfree_skb(rx_ring->skb);
 384                rx_ring->skb = NULL;
 385        }
 386
 387        if (rx_ring->xsk_pool) {
 388                ice_xsk_clean_rx_ring(rx_ring);
 389                goto rx_skip_free;
 390        }
 391
 392        /* Free all the Rx ring sk_buffs */
 393        for (i = 0; i < rx_ring->count; i++) {
 394                struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
 395
 396                if (!rx_buf->page)
 397                        continue;
 398
 399                /* Invalidate cache lines that may have been written to by
 400                 * device so that we avoid corrupting memory.
 401                 */
 402                dma_sync_single_range_for_cpu(dev, rx_buf->dma,
 403                                              rx_buf->page_offset,
 404                                              rx_ring->rx_buf_len,
 405                                              DMA_FROM_DEVICE);
 406
 407                /* free resources associated with mapping */
 408                dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
 409                                     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 410                __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
 411
 412                rx_buf->page = NULL;
 413                rx_buf->page_offset = 0;
 414        }
 415
 416rx_skip_free:
 417        memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
 418
 419        /* Zero out the descriptor ring */
 420        memset(rx_ring->desc, 0, rx_ring->size);
 421
 422        rx_ring->next_to_alloc = 0;
 423        rx_ring->next_to_clean = 0;
 424        rx_ring->next_to_use = 0;
 425}
 426
 427/**
 428 * ice_free_rx_ring - Free Rx resources
 429 * @rx_ring: ring to clean the resources from
 430 *
 431 * Free all receive software resources
 432 */
 433void ice_free_rx_ring(struct ice_ring *rx_ring)
 434{
 435        ice_clean_rx_ring(rx_ring);
 436        if (rx_ring->vsi->type == ICE_VSI_PF)
 437                if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
 438                        xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
 439        rx_ring->xdp_prog = NULL;
 440        devm_kfree(rx_ring->dev, rx_ring->rx_buf);
 441        rx_ring->rx_buf = NULL;
 442
 443        if (rx_ring->desc) {
 444                dmam_free_coherent(rx_ring->dev, rx_ring->size,
 445                                   rx_ring->desc, rx_ring->dma);
 446                rx_ring->desc = NULL;
 447        }
 448}
 449
 450/**
 451 * ice_setup_rx_ring - Allocate the Rx descriptors
 452 * @rx_ring: the Rx ring to set up
 453 *
 454 * Return 0 on success, negative on error
 455 */
 456int ice_setup_rx_ring(struct ice_ring *rx_ring)
 457{
 458        struct device *dev = rx_ring->dev;
 459
 460        if (!dev)
 461                return -ENOMEM;
 462
 463        /* warn if we are about to overwrite the pointer */
 464        WARN_ON(rx_ring->rx_buf);
 465        rx_ring->rx_buf =
 466                devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
 467                             GFP_KERNEL);
 468        if (!rx_ring->rx_buf)
 469                return -ENOMEM;
 470
 471        /* round up to nearest page */
 472        rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
 473                              PAGE_SIZE);
 474        rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
 475                                            GFP_KERNEL);
 476        if (!rx_ring->desc) {
 477                dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
 478                        rx_ring->size);
 479                goto err;
 480        }
 481
 482        rx_ring->next_to_use = 0;
 483        rx_ring->next_to_clean = 0;
 484
 485        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
 486                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
 487
 488        if (rx_ring->vsi->type == ICE_VSI_PF &&
 489            !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
 490                if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
 491                                     rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
 492                        goto err;
 493        return 0;
 494
 495err:
 496        devm_kfree(dev, rx_ring->rx_buf);
 497        rx_ring->rx_buf = NULL;
 498        return -ENOMEM;
 499}
 500
 501static unsigned int
 502ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
 503{
 504        unsigned int truesize;
 505
 506#if (PAGE_SIZE < 8192)
 507        truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
 508#else
 509        truesize = rx_ring->rx_offset ?
 510                SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
 511                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
 512                SKB_DATA_ALIGN(size);
 513#endif
 514        return truesize;
 515}
 516
 517/**
 518 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
 519 * @rx_ring: Rx ring
 520 * @xdp: xdp_buff used as input to the XDP program
 521 * @xdp_prog: XDP program to run
 522 *
 523 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
 524 */
 525static int
 526ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
 527            struct bpf_prog *xdp_prog)
 528{
 529        struct ice_ring *xdp_ring;
 530        int err, result;
 531        u32 act;
 532
 533        act = bpf_prog_run_xdp(xdp_prog, xdp);
 534        switch (act) {
 535        case XDP_PASS:
 536                return ICE_XDP_PASS;
 537        case XDP_TX:
 538                xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
 539                result = ice_xmit_xdp_buff(xdp, xdp_ring);
 540                if (result == ICE_XDP_CONSUMED)
 541                        goto out_failure;
 542                return result;
 543        case XDP_REDIRECT:
 544                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
 545                if (err)
 546                        goto out_failure;
 547                return ICE_XDP_REDIR;
 548        default:
 549                bpf_warn_invalid_xdp_action(act);
 550                fallthrough;
 551        case XDP_ABORTED:
 552out_failure:
 553                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 554                fallthrough;
 555        case XDP_DROP:
 556                return ICE_XDP_CONSUMED;
 557        }
 558}
 559
 560/**
 561 * ice_xdp_xmit - submit packets to XDP ring for transmission
 562 * @dev: netdev
 563 * @n: number of XDP frames to be transmitted
 564 * @frames: XDP frames to be transmitted
 565 * @flags: transmit flags
 566 *
 567 * Returns number of frames successfully sent. Failed frames
 568 * will be free'ed by XDP core.
 569 * For error cases, a negative errno code is returned and no-frames
 570 * are transmitted (caller must handle freeing frames).
 571 */
 572int
 573ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
 574             u32 flags)
 575{
 576        struct ice_netdev_priv *np = netdev_priv(dev);
 577        unsigned int queue_index = smp_processor_id();
 578        struct ice_vsi *vsi = np->vsi;
 579        struct ice_ring *xdp_ring;
 580        int nxmit = 0, i;
 581
 582        if (test_bit(ICE_VSI_DOWN, vsi->state))
 583                return -ENETDOWN;
 584
 585        if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
 586                return -ENXIO;
 587
 588        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
 589                return -EINVAL;
 590
 591        xdp_ring = vsi->xdp_rings[queue_index];
 592        for (i = 0; i < n; i++) {
 593                struct xdp_frame *xdpf = frames[i];
 594                int err;
 595
 596                err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
 597                if (err != ICE_XDP_TX)
 598                        break;
 599                nxmit++;
 600        }
 601
 602        if (unlikely(flags & XDP_XMIT_FLUSH))
 603                ice_xdp_ring_update_tail(xdp_ring);
 604
 605        return nxmit;
 606}
 607
 608/**
 609 * ice_alloc_mapped_page - recycle or make a new page
 610 * @rx_ring: ring to use
 611 * @bi: rx_buf struct to modify
 612 *
 613 * Returns true if the page was successfully allocated or
 614 * reused.
 615 */
 616static bool
 617ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
 618{
 619        struct page *page = bi->page;
 620        dma_addr_t dma;
 621
 622        /* since we are recycling buffers we should seldom need to alloc */
 623        if (likely(page))
 624                return true;
 625
 626        /* alloc new page for storage */
 627        page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
 628        if (unlikely(!page)) {
 629                rx_ring->rx_stats.alloc_page_failed++;
 630                return false;
 631        }
 632
 633        /* map page for use */
 634        dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
 635                                 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
 636
 637        /* if mapping failed free memory back to system since
 638         * there isn't much point in holding memory we can't use
 639         */
 640        if (dma_mapping_error(rx_ring->dev, dma)) {
 641                __free_pages(page, ice_rx_pg_order(rx_ring));
 642                rx_ring->rx_stats.alloc_page_failed++;
 643                return false;
 644        }
 645
 646        bi->dma = dma;
 647        bi->page = page;
 648        bi->page_offset = rx_ring->rx_offset;
 649        page_ref_add(page, USHRT_MAX - 1);
 650        bi->pagecnt_bias = USHRT_MAX;
 651
 652        return true;
 653}
 654
 655/**
 656 * ice_alloc_rx_bufs - Replace used receive buffers
 657 * @rx_ring: ring to place buffers on
 658 * @cleaned_count: number of buffers to replace
 659 *
 660 * Returns false if all allocations were successful, true if any fail. Returning
 661 * true signals to the caller that we didn't replace cleaned_count buffers and
 662 * there is more work to do.
 663 *
 664 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
 665 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
 666 * multiple tail writes per call.
 667 */
 668bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
 669{
 670        union ice_32b_rx_flex_desc *rx_desc;
 671        u16 ntu = rx_ring->next_to_use;
 672        struct ice_rx_buf *bi;
 673
 674        /* do nothing if no valid netdev defined */
 675        if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
 676            !cleaned_count)
 677                return false;
 678
 679        /* get the Rx descriptor and buffer based on next_to_use */
 680        rx_desc = ICE_RX_DESC(rx_ring, ntu);
 681        bi = &rx_ring->rx_buf[ntu];
 682
 683        do {
 684                /* if we fail here, we have work remaining */
 685                if (!ice_alloc_mapped_page(rx_ring, bi))
 686                        break;
 687
 688                /* sync the buffer for use by the device */
 689                dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
 690                                                 bi->page_offset,
 691                                                 rx_ring->rx_buf_len,
 692                                                 DMA_FROM_DEVICE);
 693
 694                /* Refresh the desc even if buffer_addrs didn't change
 695                 * because each write-back erases this info.
 696                 */
 697                rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
 698
 699                rx_desc++;
 700                bi++;
 701                ntu++;
 702                if (unlikely(ntu == rx_ring->count)) {
 703                        rx_desc = ICE_RX_DESC(rx_ring, 0);
 704                        bi = rx_ring->rx_buf;
 705                        ntu = 0;
 706                }
 707
 708                /* clear the status bits for the next_to_use descriptor */
 709                rx_desc->wb.status_error0 = 0;
 710
 711                cleaned_count--;
 712        } while (cleaned_count);
 713
 714        if (rx_ring->next_to_use != ntu)
 715                ice_release_rx_desc(rx_ring, ntu);
 716
 717        return !!cleaned_count;
 718}
 719
 720/**
 721 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
 722 * @rx_buf: Rx buffer to adjust
 723 * @size: Size of adjustment
 724 *
 725 * Update the offset within page so that Rx buf will be ready to be reused.
 726 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
 727 * so the second half of page assigned to Rx buffer will be used, otherwise
 728 * the offset is moved by "size" bytes
 729 */
 730static void
 731ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
 732{
 733#if (PAGE_SIZE < 8192)
 734        /* flip page offset to other buffer */
 735        rx_buf->page_offset ^= size;
 736#else
 737        /* move offset up to the next cache line */
 738        rx_buf->page_offset += size;
 739#endif
 740}
 741
 742/**
 743 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
 744 * @rx_buf: buffer containing the page
 745 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
 746 *
 747 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
 748 * which will assign the current buffer to the buffer that next_to_alloc is
 749 * pointing to; otherwise, the DMA mapping needs to be destroyed and
 750 * page freed
 751 */
 752static bool
 753ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
 754{
 755        unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
 756        struct page *page = rx_buf->page;
 757
 758        /* avoid re-using remote and pfmemalloc pages */
 759        if (!dev_page_is_reusable(page))
 760                return false;
 761
 762#if (PAGE_SIZE < 8192)
 763        /* if we are only owner of page we can reuse it */
 764        if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
 765                return false;
 766#else
 767#define ICE_LAST_OFFSET \
 768        (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
 769        if (rx_buf->page_offset > ICE_LAST_OFFSET)
 770                return false;
 771#endif /* PAGE_SIZE < 8192) */
 772
 773        /* If we have drained the page fragment pool we need to update
 774         * the pagecnt_bias and page count so that we fully restock the
 775         * number of references the driver holds.
 776         */
 777        if (unlikely(pagecnt_bias == 1)) {
 778                page_ref_add(page, USHRT_MAX - 1);
 779                rx_buf->pagecnt_bias = USHRT_MAX;
 780        }
 781
 782        return true;
 783}
 784
 785/**
 786 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
 787 * @rx_ring: Rx descriptor ring to transact packets on
 788 * @rx_buf: buffer containing page to add
 789 * @skb: sk_buff to place the data into
 790 * @size: packet length from rx_desc
 791 *
 792 * This function will add the data contained in rx_buf->page to the skb.
 793 * It will just attach the page as a frag to the skb.
 794 * The function will then update the page offset.
 795 */
 796static void
 797ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 798                struct sk_buff *skb, unsigned int size)
 799{
 800#if (PAGE_SIZE >= 8192)
 801        unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
 802#else
 803        unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 804#endif
 805
 806        if (!size)
 807                return;
 808        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
 809                        rx_buf->page_offset, size, truesize);
 810
 811        /* page is being used so we must update the page offset */
 812        ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 813}
 814
 815/**
 816 * ice_reuse_rx_page - page flip buffer and store it back on the ring
 817 * @rx_ring: Rx descriptor ring to store buffers on
 818 * @old_buf: donor buffer to have page reused
 819 *
 820 * Synchronizes page for reuse by the adapter
 821 */
 822static void
 823ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
 824{
 825        u16 nta = rx_ring->next_to_alloc;
 826        struct ice_rx_buf *new_buf;
 827
 828        new_buf = &rx_ring->rx_buf[nta];
 829
 830        /* update, and store next to alloc */
 831        nta++;
 832        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 833
 834        /* Transfer page from old buffer to new buffer.
 835         * Move each member individually to avoid possible store
 836         * forwarding stalls and unnecessary copy of skb.
 837         */
 838        new_buf->dma = old_buf->dma;
 839        new_buf->page = old_buf->page;
 840        new_buf->page_offset = old_buf->page_offset;
 841        new_buf->pagecnt_bias = old_buf->pagecnt_bias;
 842}
 843
 844/**
 845 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
 846 * @rx_ring: Rx descriptor ring to transact packets on
 847 * @size: size of buffer to add to skb
 848 * @rx_buf_pgcnt: rx_buf page refcount
 849 *
 850 * This function will pull an Rx buffer from the ring and synchronize it
 851 * for use by the CPU.
 852 */
 853static struct ice_rx_buf *
 854ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
 855               int *rx_buf_pgcnt)
 856{
 857        struct ice_rx_buf *rx_buf;
 858
 859        rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
 860        *rx_buf_pgcnt =
 861#if (PAGE_SIZE < 8192)
 862                page_count(rx_buf->page);
 863#else
 864                0;
 865#endif
 866        prefetchw(rx_buf->page);
 867
 868        if (!size)
 869                return rx_buf;
 870        /* we are reusing so sync this buffer for CPU use */
 871        dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
 872                                      rx_buf->page_offset, size,
 873                                      DMA_FROM_DEVICE);
 874
 875        /* We have pulled a buffer for use, so decrement pagecnt_bias */
 876        rx_buf->pagecnt_bias--;
 877
 878        return rx_buf;
 879}
 880
 881/**
 882 * ice_build_skb - Build skb around an existing buffer
 883 * @rx_ring: Rx descriptor ring to transact packets on
 884 * @rx_buf: Rx buffer to pull data from
 885 * @xdp: xdp_buff pointing to the data
 886 *
 887 * This function builds an skb around an existing Rx buffer, taking care
 888 * to set up the skb correctly and avoid any memcpy overhead.
 889 */
 890static struct sk_buff *
 891ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 892              struct xdp_buff *xdp)
 893{
 894        u8 metasize = xdp->data - xdp->data_meta;
 895#if (PAGE_SIZE < 8192)
 896        unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 897#else
 898        unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
 899                                SKB_DATA_ALIGN(xdp->data_end -
 900                                               xdp->data_hard_start);
 901#endif
 902        struct sk_buff *skb;
 903
 904        /* Prefetch first cache line of first page. If xdp->data_meta
 905         * is unused, this points exactly as xdp->data, otherwise we
 906         * likely have a consumer accessing first few bytes of meta
 907         * data, and then actual data.
 908         */
 909        net_prefetch(xdp->data_meta);
 910        /* build an skb around the page buffer */
 911        skb = build_skb(xdp->data_hard_start, truesize);
 912        if (unlikely(!skb))
 913                return NULL;
 914
 915        /* must to record Rx queue, otherwise OS features such as
 916         * symmetric queue won't work
 917         */
 918        skb_record_rx_queue(skb, rx_ring->q_index);
 919
 920        /* update pointers within the skb to store the data */
 921        skb_reserve(skb, xdp->data - xdp->data_hard_start);
 922        __skb_put(skb, xdp->data_end - xdp->data);
 923        if (metasize)
 924                skb_metadata_set(skb, metasize);
 925
 926        /* buffer is used by skb, update page_offset */
 927        ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 928
 929        return skb;
 930}
 931
 932/**
 933 * ice_construct_skb - Allocate skb and populate it
 934 * @rx_ring: Rx descriptor ring to transact packets on
 935 * @rx_buf: Rx buffer to pull data from
 936 * @xdp: xdp_buff pointing to the data
 937 *
 938 * This function allocates an skb. It then populates it with the page
 939 * data from the current receive descriptor, taking care to set up the
 940 * skb correctly.
 941 */
 942static struct sk_buff *
 943ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
 944                  struct xdp_buff *xdp)
 945{
 946        unsigned int size = xdp->data_end - xdp->data;
 947        unsigned int headlen;
 948        struct sk_buff *skb;
 949
 950        /* prefetch first cache line of first page */
 951        net_prefetch(xdp->data);
 952
 953        /* allocate a skb to store the frags */
 954        skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
 955                               GFP_ATOMIC | __GFP_NOWARN);
 956        if (unlikely(!skb))
 957                return NULL;
 958
 959        skb_record_rx_queue(skb, rx_ring->q_index);
 960        /* Determine available headroom for copy */
 961        headlen = size;
 962        if (headlen > ICE_RX_HDR_SIZE)
 963                headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
 964
 965        /* align pull length to size of long to optimize memcpy performance */
 966        memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
 967                                                         sizeof(long)));
 968
 969        /* if we exhaust the linear part then add what is left as a frag */
 970        size -= headlen;
 971        if (size) {
 972#if (PAGE_SIZE >= 8192)
 973                unsigned int truesize = SKB_DATA_ALIGN(size);
 974#else
 975                unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
 976#endif
 977                skb_add_rx_frag(skb, 0, rx_buf->page,
 978                                rx_buf->page_offset + headlen, size, truesize);
 979                /* buffer is used by skb, update page_offset */
 980                ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
 981        } else {
 982                /* buffer is unused, reset bias back to rx_buf; data was copied
 983                 * onto skb's linear part so there's no need for adjusting
 984                 * page offset and we can reuse this buffer as-is
 985                 */
 986                rx_buf->pagecnt_bias++;
 987        }
 988
 989        return skb;
 990}
 991
 992/**
 993 * ice_put_rx_buf - Clean up used buffer and either recycle or free
 994 * @rx_ring: Rx descriptor ring to transact packets on
 995 * @rx_buf: Rx buffer to pull data from
 996 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
 997 *
 998 * This function will update next_to_clean and then clean up the contents
 999 * of the rx_buf. It will either recycle the buffer or unmap it and free
1000 * the associated resources.
1001 */
1002static void
1003ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1004               int rx_buf_pgcnt)
1005{
1006        u16 ntc = rx_ring->next_to_clean + 1;
1007
1008        /* fetch, update, and store next to clean */
1009        ntc = (ntc < rx_ring->count) ? ntc : 0;
1010        rx_ring->next_to_clean = ntc;
1011
1012        if (!rx_buf)
1013                return;
1014
1015        if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1016                /* hand second half of page back to the ring */
1017                ice_reuse_rx_page(rx_ring, rx_buf);
1018        } else {
1019                /* we are not reusing the buffer so unmap it */
1020                dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1021                                     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1022                                     ICE_RX_DMA_ATTR);
1023                __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1024        }
1025
1026        /* clear contents of buffer_info */
1027        rx_buf->page = NULL;
1028}
1029
1030/**
1031 * ice_is_non_eop - process handling of non-EOP buffers
1032 * @rx_ring: Rx ring being processed
1033 * @rx_desc: Rx descriptor for current buffer
1034 *
1035 * If the buffer is an EOP buffer, this function exits returning false,
1036 * otherwise return true indicating that this is in fact a non-EOP buffer.
1037 */
1038static bool
1039ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1040{
1041        /* if we are the last buffer then there is nothing else to do */
1042#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1043        if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1044                return false;
1045
1046        rx_ring->rx_stats.non_eop_descs++;
1047
1048        return true;
1049}
1050
1051/**
1052 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1053 * @rx_ring: Rx descriptor ring to transact packets on
1054 * @budget: Total limit on number of packets to process
1055 *
1056 * This function provides a "bounce buffer" approach to Rx interrupt
1057 * processing. The advantage to this is that on systems that have
1058 * expensive overhead for IOMMU access this provides a means of avoiding
1059 * it by maintaining the mapping of the page to the system.
1060 *
1061 * Returns amount of work completed
1062 */
1063int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1064{
1065        unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1066        u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1067        unsigned int offset = rx_ring->rx_offset;
1068        unsigned int xdp_res, xdp_xmit = 0;
1069        struct sk_buff *skb = rx_ring->skb;
1070        struct bpf_prog *xdp_prog = NULL;
1071        struct xdp_buff xdp;
1072        bool failure;
1073
1074        /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1075#if (PAGE_SIZE < 8192)
1076        frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1077#endif
1078        xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1079
1080        /* start the loop to process Rx packets bounded by 'budget' */
1081        while (likely(total_rx_pkts < (unsigned int)budget)) {
1082                union ice_32b_rx_flex_desc *rx_desc;
1083                struct ice_rx_buf *rx_buf;
1084                unsigned char *hard_start;
1085                unsigned int size;
1086                u16 stat_err_bits;
1087                int rx_buf_pgcnt;
1088                u16 vlan_tag = 0;
1089                u16 rx_ptype;
1090
1091                /* get the Rx desc from Rx ring based on 'next_to_clean' */
1092                rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1093
1094                /* status_error_len will always be zero for unused descriptors
1095                 * because it's cleared in cleanup, and overlaps with hdr_addr
1096                 * which is always zero because packet split isn't used, if the
1097                 * hardware wrote DD then it will be non-zero
1098                 */
1099                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1100                if (!ice_test_staterr(rx_desc, stat_err_bits))
1101                        break;
1102
1103                /* This memory barrier is needed to keep us from reading
1104                 * any other fields out of the rx_desc until we know the
1105                 * DD bit is set.
1106                 */
1107                dma_rmb();
1108
1109                ice_trace(clean_rx_irq, rx_ring, rx_desc);
1110                if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1111                        struct ice_vsi *ctrl_vsi = rx_ring->vsi;
1112
1113                        if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
1114                            ctrl_vsi->vf_id != ICE_INVAL_VFID)
1115                                ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1116                        ice_put_rx_buf(rx_ring, NULL, 0);
1117                        cleaned_count++;
1118                        continue;
1119                }
1120
1121                size = le16_to_cpu(rx_desc->wb.pkt_len) &
1122                        ICE_RX_FLX_DESC_PKT_LEN_M;
1123
1124                /* retrieve a buffer from the ring */
1125                rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1126
1127                if (!size) {
1128                        xdp.data = NULL;
1129                        xdp.data_end = NULL;
1130                        xdp.data_hard_start = NULL;
1131                        xdp.data_meta = NULL;
1132                        goto construct_skb;
1133                }
1134
1135                hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1136                             offset;
1137                xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1138#if (PAGE_SIZE > 4096)
1139                /* At larger PAGE_SIZE, frame_sz depend on len size */
1140                xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1141#endif
1142
1143                xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1144                if (!xdp_prog)
1145                        goto construct_skb;
1146
1147                xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1148                if (!xdp_res)
1149                        goto construct_skb;
1150                if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1151                        xdp_xmit |= xdp_res;
1152                        ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1153                } else {
1154                        rx_buf->pagecnt_bias++;
1155                }
1156                total_rx_bytes += size;
1157                total_rx_pkts++;
1158
1159                cleaned_count++;
1160                ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1161                continue;
1162construct_skb:
1163                if (skb) {
1164                        ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1165                } else if (likely(xdp.data)) {
1166                        if (ice_ring_uses_build_skb(rx_ring))
1167                                skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1168                        else
1169                                skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1170                }
1171                /* exit if we failed to retrieve a buffer */
1172                if (!skb) {
1173                        rx_ring->rx_stats.alloc_buf_failed++;
1174                        if (rx_buf)
1175                                rx_buf->pagecnt_bias++;
1176                        break;
1177                }
1178
1179                ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1180                cleaned_count++;
1181
1182                /* skip if it is NOP desc */
1183                if (ice_is_non_eop(rx_ring, rx_desc))
1184                        continue;
1185
1186                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1187                if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1188                        dev_kfree_skb_any(skb);
1189                        continue;
1190                }
1191
1192                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1193                if (ice_test_staterr(rx_desc, stat_err_bits))
1194                        vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1195
1196                /* pad the skb if needed, to make a valid ethernet frame */
1197                if (eth_skb_pad(skb)) {
1198                        skb = NULL;
1199                        continue;
1200                }
1201
1202                /* probably a little skewed due to removing CRC */
1203                total_rx_bytes += skb->len;
1204
1205                /* populate checksum, VLAN, and protocol */
1206                rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1207                        ICE_RX_FLEX_DESC_PTYPE_M;
1208
1209                ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1210
1211                ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1212                /* send completed skb up the stack */
1213                ice_receive_skb(rx_ring, skb, vlan_tag);
1214                skb = NULL;
1215
1216                /* update budget accounting */
1217                total_rx_pkts++;
1218        }
1219
1220        /* return up to cleaned_count buffers to hardware */
1221        failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1222
1223        if (xdp_prog)
1224                ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1225        rx_ring->skb = skb;
1226
1227        ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1228
1229        /* guarantee a trip back through this routine if there was a failure */
1230        return failure ? budget : (int)total_rx_pkts;
1231}
1232
1233/**
1234 * ice_net_dim - Update net DIM algorithm
1235 * @q_vector: the vector associated with the interrupt
1236 *
1237 * Create a DIM sample and notify net_dim() so that it can possibly decide
1238 * a new ITR value based on incoming packets, bytes, and interrupts.
1239 *
1240 * This function is a no-op if the ring is not configured to dynamic ITR.
1241 */
1242static void ice_net_dim(struct ice_q_vector *q_vector)
1243{
1244        struct ice_ring_container *tx = &q_vector->tx;
1245        struct ice_ring_container *rx = &q_vector->rx;
1246
1247        if (ITR_IS_DYNAMIC(tx)) {
1248                struct dim_sample dim_sample = {};
1249                u64 packets = 0, bytes = 0;
1250                struct ice_ring *ring;
1251
1252                ice_for_each_ring(ring, q_vector->tx) {
1253                        packets += ring->stats.pkts;
1254                        bytes += ring->stats.bytes;
1255                }
1256
1257                dim_update_sample(q_vector->total_events, packets, bytes,
1258                                  &dim_sample);
1259
1260                net_dim(&tx->dim, dim_sample);
1261        }
1262
1263        if (ITR_IS_DYNAMIC(rx)) {
1264                struct dim_sample dim_sample = {};
1265                u64 packets = 0, bytes = 0;
1266                struct ice_ring *ring;
1267
1268                ice_for_each_ring(ring, q_vector->rx) {
1269                        packets += ring->stats.pkts;
1270                        bytes += ring->stats.bytes;
1271                }
1272
1273                dim_update_sample(q_vector->total_events, packets, bytes,
1274                                  &dim_sample);
1275
1276                net_dim(&rx->dim, dim_sample);
1277        }
1278}
1279
1280/**
1281 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1282 * @itr_idx: interrupt throttling index
1283 * @itr: interrupt throttling value in usecs
1284 */
1285static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1286{
1287        /* The ITR value is reported in microseconds, and the register value is
1288         * recorded in 2 microsecond units. For this reason we only need to
1289         * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1290         * granularity as a shift instead of division. The mask makes sure the
1291         * ITR value is never odd so we don't accidentally write into the field
1292         * prior to the ITR field.
1293         */
1294        itr &= ICE_ITR_MASK;
1295
1296        return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1297                (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1298                (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1299}
1300
1301/**
1302 * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
1303 * @q_vector: the vector associated with the interrupt to enable
1304 *
1305 * Update the net_dim() algorithm and re-enable the interrupt associated with
1306 * this vector.
1307 *
1308 * If the VSI is down, the interrupt will not be re-enabled.
1309 */
1310static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1311{
1312        struct ice_vsi *vsi = q_vector->vsi;
1313        bool wb_en = q_vector->wb_on_itr;
1314        u32 itr_val;
1315
1316        if (test_bit(ICE_DOWN, vsi->state))
1317                return;
1318
1319        /* When exiting WB_ON_ITR, let ITR resume its normal
1320         * interrupts-enabled path.
1321         */
1322        if (wb_en)
1323                q_vector->wb_on_itr = false;
1324
1325        /* This will do nothing if dynamic updates are not enabled. */
1326        ice_net_dim(q_vector);
1327
1328        /* net_dim() updates ITR out-of-band using a work item */
1329        itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1330        /* trigger an immediate software interrupt when exiting
1331         * busy poll, to make sure to catch any pending cleanups
1332         * that might have been missed due to interrupt state
1333         * transition.
1334         */
1335        if (wb_en) {
1336                itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
1337                           GLINT_DYN_CTL_SW_ITR_INDX_M |
1338                           GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
1339        }
1340        wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1341}
1342
1343/**
1344 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1345 * @q_vector: q_vector to set WB_ON_ITR on
1346 *
1347 * We need to tell hardware to write-back completed descriptors even when
1348 * interrupts are disabled. Descriptors will be written back on cache line
1349 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1350 * descriptors may not be written back if they don't fill a cache line until
1351 * the next interrupt.
1352 *
1353 * This sets the write-back frequency to whatever was set previously for the
1354 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1355 * aren't meddling with the INTENA_M bit.
1356 */
1357static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1358{
1359        struct ice_vsi *vsi = q_vector->vsi;
1360
1361        /* already in wb_on_itr mode no need to change it */
1362        if (q_vector->wb_on_itr)
1363                return;
1364
1365        /* use previously set ITR values for all of the ITR indices by
1366         * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1367         * be static in non-adaptive mode (user configured)
1368         */
1369        wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1370             ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1371              GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1372             GLINT_DYN_CTL_WB_ON_ITR_M);
1373
1374        q_vector->wb_on_itr = true;
1375}
1376
1377/**
1378 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1379 * @napi: napi struct with our devices info in it
1380 * @budget: amount of work driver is allowed to do this pass, in packets
1381 *
1382 * This function will clean all queues associated with a q_vector.
1383 *
1384 * Returns the amount of work done
1385 */
1386int ice_napi_poll(struct napi_struct *napi, int budget)
1387{
1388        struct ice_q_vector *q_vector =
1389                                container_of(napi, struct ice_q_vector, napi);
1390        bool clean_complete = true;
1391        struct ice_ring *ring;
1392        int budget_per_ring;
1393        int work_done = 0;
1394
1395        /* Since the actual Tx work is minimal, we can give the Tx a larger
1396         * budget and be more aggressive about cleaning up the Tx descriptors.
1397         */
1398        ice_for_each_ring(ring, q_vector->tx) {
1399                bool wd = ring->xsk_pool ?
1400                          ice_clean_tx_irq_zc(ring, budget) :
1401                          ice_clean_tx_irq(ring, budget);
1402
1403                if (!wd)
1404                        clean_complete = false;
1405        }
1406
1407        /* Handle case where we are called by netpoll with a budget of 0 */
1408        if (unlikely(budget <= 0))
1409                return budget;
1410
1411        /* normally we have 1 Rx ring per q_vector */
1412        if (unlikely(q_vector->num_ring_rx > 1))
1413                /* We attempt to distribute budget to each Rx queue fairly, but
1414                 * don't allow the budget to go below 1 because that would exit
1415                 * polling early.
1416                 */
1417                budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1418        else
1419                /* Max of 1 Rx ring in this q_vector so give it the budget */
1420                budget_per_ring = budget;
1421
1422        ice_for_each_ring(ring, q_vector->rx) {
1423                int cleaned;
1424
1425                /* A dedicated path for zero-copy allows making a single
1426                 * comparison in the irq context instead of many inside the
1427                 * ice_clean_rx_irq function and makes the codebase cleaner.
1428                 */
1429                cleaned = ring->xsk_pool ?
1430                          ice_clean_rx_irq_zc(ring, budget_per_ring) :
1431                          ice_clean_rx_irq(ring, budget_per_ring);
1432                work_done += cleaned;
1433                /* if we clean as many as budgeted, we must not be done */
1434                if (cleaned >= budget_per_ring)
1435                        clean_complete = false;
1436        }
1437
1438        /* If work not completed, return budget and polling will return */
1439        if (!clean_complete) {
1440                /* Set the writeback on ITR so partial completions of
1441                 * cache-lines will still continue even if we're polling.
1442                 */
1443                ice_set_wb_on_itr(q_vector);
1444                return budget;
1445        }
1446
1447        /* Exit the polling mode, but don't re-enable interrupts if stack might
1448         * poll us due to busy-polling
1449         */
1450        if (likely(napi_complete_done(napi, work_done)))
1451                ice_update_ena_itr(q_vector);
1452        else
1453                ice_set_wb_on_itr(q_vector);
1454
1455        return min_t(int, work_done, budget - 1);
1456}
1457
1458/**
1459 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1460 * @tx_ring: the ring to be checked
1461 * @size: the size buffer we want to assure is available
1462 *
1463 * Returns -EBUSY if a stop is needed, else 0
1464 */
1465static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1466{
1467        netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1468        /* Memory barrier before checking head and tail */
1469        smp_mb();
1470
1471        /* Check again in a case another CPU has just made room available. */
1472        if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1473                return -EBUSY;
1474
1475        /* A reprieve! - use start_subqueue because it doesn't call schedule */
1476        netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1477        ++tx_ring->tx_stats.restart_q;
1478        return 0;
1479}
1480
1481/**
1482 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1483 * @tx_ring: the ring to be checked
1484 * @size:    the size buffer we want to assure is available
1485 *
1486 * Returns 0 if stop is not needed
1487 */
1488static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1489{
1490        if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1491                return 0;
1492
1493        return __ice_maybe_stop_tx(tx_ring, size);
1494}
1495
1496/**
1497 * ice_tx_map - Build the Tx descriptor
1498 * @tx_ring: ring to send buffer on
1499 * @first: first buffer info buffer to use
1500 * @off: pointer to struct that holds offload parameters
1501 *
1502 * This function loops over the skb data pointed to by *first
1503 * and gets a physical address for each memory location and programs
1504 * it and the length into the transmit descriptor.
1505 */
1506static void
1507ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1508           struct ice_tx_offload_params *off)
1509{
1510        u64 td_offset, td_tag, td_cmd;
1511        u16 i = tx_ring->next_to_use;
1512        unsigned int data_len, size;
1513        struct ice_tx_desc *tx_desc;
1514        struct ice_tx_buf *tx_buf;
1515        struct sk_buff *skb;
1516        skb_frag_t *frag;
1517        dma_addr_t dma;
1518
1519        td_tag = off->td_l2tag1;
1520        td_cmd = off->td_cmd;
1521        td_offset = off->td_offset;
1522        skb = first->skb;
1523
1524        data_len = skb->data_len;
1525        size = skb_headlen(skb);
1526
1527        tx_desc = ICE_TX_DESC(tx_ring, i);
1528
1529        if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1530                td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1531                td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1532                          ICE_TX_FLAGS_VLAN_S;
1533        }
1534
1535        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1536
1537        tx_buf = first;
1538
1539        for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1540                unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1541
1542                if (dma_mapping_error(tx_ring->dev, dma))
1543                        goto dma_error;
1544
1545                /* record length, and DMA address */
1546                dma_unmap_len_set(tx_buf, len, size);
1547                dma_unmap_addr_set(tx_buf, dma, dma);
1548
1549                /* align size to end of page */
1550                max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1551                tx_desc->buf_addr = cpu_to_le64(dma);
1552
1553                /* account for data chunks larger than the hardware
1554                 * can handle
1555                 */
1556                while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1557                        tx_desc->cmd_type_offset_bsz =
1558                                ice_build_ctob(td_cmd, td_offset, max_data,
1559                                               td_tag);
1560
1561                        tx_desc++;
1562                        i++;
1563
1564                        if (i == tx_ring->count) {
1565                                tx_desc = ICE_TX_DESC(tx_ring, 0);
1566                                i = 0;
1567                        }
1568
1569                        dma += max_data;
1570                        size -= max_data;
1571
1572                        max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1573                        tx_desc->buf_addr = cpu_to_le64(dma);
1574                }
1575
1576                if (likely(!data_len))
1577                        break;
1578
1579                tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1580                                                              size, td_tag);
1581
1582                tx_desc++;
1583                i++;
1584
1585                if (i == tx_ring->count) {
1586                        tx_desc = ICE_TX_DESC(tx_ring, 0);
1587                        i = 0;
1588                }
1589
1590                size = skb_frag_size(frag);
1591                data_len -= size;
1592
1593                dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1594                                       DMA_TO_DEVICE);
1595
1596                tx_buf = &tx_ring->tx_buf[i];
1597        }
1598
1599        /* record bytecount for BQL */
1600        netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1601
1602        /* record SW timestamp if HW timestamp is not available */
1603        skb_tx_timestamp(first->skb);
1604
1605        i++;
1606        if (i == tx_ring->count)
1607                i = 0;
1608
1609        /* write last descriptor with RS and EOP bits */
1610        td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1611        tx_desc->cmd_type_offset_bsz =
1612                        ice_build_ctob(td_cmd, td_offset, size, td_tag);
1613
1614        /* Force memory writes to complete before letting h/w know there
1615         * are new descriptors to fetch.
1616         *
1617         * We also use this memory barrier to make certain all of the
1618         * status bits have been updated before next_to_watch is written.
1619         */
1620        wmb();
1621
1622        /* set next_to_watch value indicating a packet is present */
1623        first->next_to_watch = tx_desc;
1624
1625        tx_ring->next_to_use = i;
1626
1627        ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1628
1629        /* notify HW of packet */
1630        if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1631                writel(i, tx_ring->tail);
1632
1633        return;
1634
1635dma_error:
1636        /* clear DMA mappings for failed tx_buf map */
1637        for (;;) {
1638                tx_buf = &tx_ring->tx_buf[i];
1639                ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1640                if (tx_buf == first)
1641                        break;
1642                if (i == 0)
1643                        i = tx_ring->count;
1644                i--;
1645        }
1646
1647        tx_ring->next_to_use = i;
1648}
1649
1650/**
1651 * ice_tx_csum - Enable Tx checksum offloads
1652 * @first: pointer to the first descriptor
1653 * @off: pointer to struct that holds offload parameters
1654 *
1655 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1656 */
1657static
1658int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1659{
1660        u32 l4_len = 0, l3_len = 0, l2_len = 0;
1661        struct sk_buff *skb = first->skb;
1662        union {
1663                struct iphdr *v4;
1664                struct ipv6hdr *v6;
1665                unsigned char *hdr;
1666        } ip;
1667        union {
1668                struct tcphdr *tcp;
1669                unsigned char *hdr;
1670        } l4;
1671        __be16 frag_off, protocol;
1672        unsigned char *exthdr;
1673        u32 offset, cmd = 0;
1674        u8 l4_proto = 0;
1675
1676        if (skb->ip_summed != CHECKSUM_PARTIAL)
1677                return 0;
1678
1679        ip.hdr = skb_network_header(skb);
1680        l4.hdr = skb_transport_header(skb);
1681
1682        /* compute outer L2 header size */
1683        l2_len = ip.hdr - skb->data;
1684        offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1685
1686        protocol = vlan_get_protocol(skb);
1687
1688        if (protocol == htons(ETH_P_IP))
1689                first->tx_flags |= ICE_TX_FLAGS_IPV4;
1690        else if (protocol == htons(ETH_P_IPV6))
1691                first->tx_flags |= ICE_TX_FLAGS_IPV6;
1692
1693        if (skb->encapsulation) {
1694                bool gso_ena = false;
1695                u32 tunnel = 0;
1696
1697                /* define outer network header type */
1698                if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1699                        tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1700                                  ICE_TX_CTX_EIPT_IPV4 :
1701                                  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1702                        l4_proto = ip.v4->protocol;
1703                } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1704                        int ret;
1705
1706                        tunnel |= ICE_TX_CTX_EIPT_IPV6;
1707                        exthdr = ip.hdr + sizeof(*ip.v6);
1708                        l4_proto = ip.v6->nexthdr;
1709                        ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1710                                               &l4_proto, &frag_off);
1711                        if (ret < 0)
1712                                return -1;
1713                }
1714
1715                /* define outer transport */
1716                switch (l4_proto) {
1717                case IPPROTO_UDP:
1718                        tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1719                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1720                        break;
1721                case IPPROTO_GRE:
1722                        tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1723                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1724                        break;
1725                case IPPROTO_IPIP:
1726                case IPPROTO_IPV6:
1727                        first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1728                        l4.hdr = skb_inner_network_header(skb);
1729                        break;
1730                default:
1731                        if (first->tx_flags & ICE_TX_FLAGS_TSO)
1732                                return -1;
1733
1734                        skb_checksum_help(skb);
1735                        return 0;
1736                }
1737
1738                /* compute outer L3 header size */
1739                tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1740                          ICE_TXD_CTX_QW0_EIPLEN_S;
1741
1742                /* switch IP header pointer from outer to inner header */
1743                ip.hdr = skb_inner_network_header(skb);
1744
1745                /* compute tunnel header size */
1746                tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1747                           ICE_TXD_CTX_QW0_NATLEN_S;
1748
1749                gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1750                /* indicate if we need to offload outer UDP header */
1751                if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1752                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1753                        tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1754
1755                /* record tunnel offload values */
1756                off->cd_tunnel_params |= tunnel;
1757
1758                /* set DTYP=1 to indicate that it's an Tx context descriptor
1759                 * in IPsec tunnel mode with Tx offloads in Quad word 1
1760                 */
1761                off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1762
1763                /* switch L4 header pointer from outer to inner */
1764                l4.hdr = skb_inner_transport_header(skb);
1765                l4_proto = 0;
1766
1767                /* reset type as we transition from outer to inner headers */
1768                first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1769                if (ip.v4->version == 4)
1770                        first->tx_flags |= ICE_TX_FLAGS_IPV4;
1771                if (ip.v6->version == 6)
1772                        first->tx_flags |= ICE_TX_FLAGS_IPV6;
1773        }
1774
1775        /* Enable IP checksum offloads */
1776        if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1777                l4_proto = ip.v4->protocol;
1778                /* the stack computes the IP header already, the only time we
1779                 * need the hardware to recompute it is in the case of TSO.
1780                 */
1781                if (first->tx_flags & ICE_TX_FLAGS_TSO)
1782                        cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1783                else
1784                        cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1785
1786        } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1787                cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1788                exthdr = ip.hdr + sizeof(*ip.v6);
1789                l4_proto = ip.v6->nexthdr;
1790                if (l4.hdr != exthdr)
1791                        ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1792                                         &frag_off);
1793        } else {
1794                return -1;
1795        }
1796
1797        /* compute inner L3 header size */
1798        l3_len = l4.hdr - ip.hdr;
1799        offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1800
1801        /* Enable L4 checksum offloads */
1802        switch (l4_proto) {
1803        case IPPROTO_TCP:
1804                /* enable checksum offloads */
1805                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1806                l4_len = l4.tcp->doff;
1807                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1808                break;
1809        case IPPROTO_UDP:
1810                /* enable UDP checksum offload */
1811                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1812                l4_len = (sizeof(struct udphdr) >> 2);
1813                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1814                break;
1815        case IPPROTO_SCTP:
1816                /* enable SCTP checksum offload */
1817                cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1818                l4_len = sizeof(struct sctphdr) >> 2;
1819                offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1820                break;
1821
1822        default:
1823                if (first->tx_flags & ICE_TX_FLAGS_TSO)
1824                        return -1;
1825                skb_checksum_help(skb);
1826                return 0;
1827        }
1828
1829        off->td_cmd |= cmd;
1830        off->td_offset |= offset;
1831        return 1;
1832}
1833
1834/**
1835 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1836 * @tx_ring: ring to send buffer on
1837 * @first: pointer to struct ice_tx_buf
1838 *
1839 * Checks the skb and set up correspondingly several generic transmit flags
1840 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1841 */
1842static void
1843ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1844{
1845        struct sk_buff *skb = first->skb;
1846
1847        /* nothing left to do, software offloaded VLAN */
1848        if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
1849                return;
1850
1851        /* currently, we always assume 802.1Q for VLAN insertion as VLAN
1852         * insertion for 802.1AD is not supported
1853         */
1854        if (skb_vlan_tag_present(skb)) {
1855                first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1856                first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1857        }
1858
1859        ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1860}
1861
1862/**
1863 * ice_tso - computes mss and TSO length to prepare for TSO
1864 * @first: pointer to struct ice_tx_buf
1865 * @off: pointer to struct that holds offload parameters
1866 *
1867 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1868 */
1869static
1870int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1871{
1872        struct sk_buff *skb = first->skb;
1873        union {
1874                struct iphdr *v4;
1875                struct ipv6hdr *v6;
1876                unsigned char *hdr;
1877        } ip;
1878        union {
1879                struct tcphdr *tcp;
1880                struct udphdr *udp;
1881                unsigned char *hdr;
1882        } l4;
1883        u64 cd_mss, cd_tso_len;
1884        u32 paylen;
1885        u8 l4_start;
1886        int err;
1887
1888        if (skb->ip_summed != CHECKSUM_PARTIAL)
1889                return 0;
1890
1891        if (!skb_is_gso(skb))
1892                return 0;
1893
1894        err = skb_cow_head(skb, 0);
1895        if (err < 0)
1896                return err;
1897
1898        /* cppcheck-suppress unreadVariable */
1899        ip.hdr = skb_network_header(skb);
1900        l4.hdr = skb_transport_header(skb);
1901
1902        /* initialize outer IP header fields */
1903        if (ip.v4->version == 4) {
1904                ip.v4->tot_len = 0;
1905                ip.v4->check = 0;
1906        } else {
1907                ip.v6->payload_len = 0;
1908        }
1909
1910        if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
1911                                         SKB_GSO_GRE_CSUM |
1912                                         SKB_GSO_IPXIP4 |
1913                                         SKB_GSO_IPXIP6 |
1914                                         SKB_GSO_UDP_TUNNEL |
1915                                         SKB_GSO_UDP_TUNNEL_CSUM)) {
1916                if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
1917                    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
1918                        l4.udp->len = 0;
1919
1920                        /* determine offset of outer transport header */
1921                        l4_start = (u8)(l4.hdr - skb->data);
1922
1923                        /* remove payload length from outer checksum */
1924                        paylen = skb->len - l4_start;
1925                        csum_replace_by_diff(&l4.udp->check,
1926                                             (__force __wsum)htonl(paylen));
1927                }
1928
1929                /* reset pointers to inner headers */
1930
1931                /* cppcheck-suppress unreadVariable */
1932                ip.hdr = skb_inner_network_header(skb);
1933                l4.hdr = skb_inner_transport_header(skb);
1934
1935                /* initialize inner IP header fields */
1936                if (ip.v4->version == 4) {
1937                        ip.v4->tot_len = 0;
1938                        ip.v4->check = 0;
1939                } else {
1940                        ip.v6->payload_len = 0;
1941                }
1942        }
1943
1944        /* determine offset of transport header */
1945        l4_start = (u8)(l4.hdr - skb->data);
1946
1947        /* remove payload length from checksum */
1948        paylen = skb->len - l4_start;
1949
1950        if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1951                csum_replace_by_diff(&l4.udp->check,
1952                                     (__force __wsum)htonl(paylen));
1953                /* compute length of UDP segmentation header */
1954                off->header_len = (u8)sizeof(l4.udp) + l4_start;
1955        } else {
1956                csum_replace_by_diff(&l4.tcp->check,
1957                                     (__force __wsum)htonl(paylen));
1958                /* compute length of TCP segmentation header */
1959                off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1960        }
1961
1962        /* update gso_segs and bytecount */
1963        first->gso_segs = skb_shinfo(skb)->gso_segs;
1964        first->bytecount += (first->gso_segs - 1) * off->header_len;
1965
1966        cd_tso_len = skb->len - off->header_len;
1967        cd_mss = skb_shinfo(skb)->gso_size;
1968
1969        /* record cdesc_qw1 with TSO parameters */
1970        off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1971                             (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1972                             (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1973                             (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1974        first->tx_flags |= ICE_TX_FLAGS_TSO;
1975        return 1;
1976}
1977
1978/**
1979 * ice_txd_use_count  - estimate the number of descriptors needed for Tx
1980 * @size: transmit request size in bytes
1981 *
1982 * Due to hardware alignment restrictions (4K alignment), we need to
1983 * assume that we can have no more than 12K of data per descriptor, even
1984 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1985 * Thus, we need to divide by 12K. But division is slow! Instead,
1986 * we decompose the operation into shifts and one relatively cheap
1987 * multiply operation.
1988 *
1989 * To divide by 12K, we first divide by 4K, then divide by 3:
1990 *     To divide by 4K, shift right by 12 bits
1991 *     To divide by 3, multiply by 85, then divide by 256
1992 *     (Divide by 256 is done by shifting right by 8 bits)
1993 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1994 * 3, we'll underestimate near each multiple of 12K. This is actually more
1995 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1996 * segment. For our purposes this is accurate out to 1M which is orders of
1997 * magnitude greater than our largest possible GSO size.
1998 *
1999 * This would then be implemented as:
2000 *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2001 *
2002 * Since multiplication and division are commutative, we can reorder
2003 * operations into:
2004 *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2005 */
2006static unsigned int ice_txd_use_count(unsigned int size)
2007{
2008        return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2009}
2010
2011/**
2012 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2013 * @skb: send buffer
2014 *
2015 * Returns number of data descriptors needed for this skb.
2016 */
2017static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2018{
2019        const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2020        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2021        unsigned int count = 0, size = skb_headlen(skb);
2022
2023        for (;;) {
2024                count += ice_txd_use_count(size);
2025
2026                if (!nr_frags--)
2027                        break;
2028
2029                size = skb_frag_size(frag++);
2030        }
2031
2032        return count;
2033}
2034
2035/**
2036 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2037 * @skb: send buffer
2038 *
2039 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2040 * and so we need to figure out the cases where we need to linearize the skb.
2041 *
2042 * For TSO we need to count the TSO header and segment payload separately.
2043 * As such we need to check cases where we have 7 fragments or more as we
2044 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2045 * the segment payload in the first descriptor, and another 7 for the
2046 * fragments.
2047 */
2048static bool __ice_chk_linearize(struct sk_buff *skb)
2049{
2050        const skb_frag_t *frag, *stale;
2051        int nr_frags, sum;
2052
2053        /* no need to check if number of frags is less than 7 */
2054        nr_frags = skb_shinfo(skb)->nr_frags;
2055        if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2056                return false;
2057
2058        /* We need to walk through the list and validate that each group
2059         * of 6 fragments totals at least gso_size.
2060         */
2061        nr_frags -= ICE_MAX_BUF_TXD - 2;
2062        frag = &skb_shinfo(skb)->frags[0];
2063
2064        /* Initialize size to the negative value of gso_size minus 1. We
2065         * use this as the worst case scenario in which the frag ahead
2066         * of us only provides one byte which is why we are limited to 6
2067         * descriptors for a single transmit as the header and previous
2068         * fragment are already consuming 2 descriptors.
2069         */
2070        sum = 1 - skb_shinfo(skb)->gso_size;
2071
2072        /* Add size of frags 0 through 4 to create our initial sum */
2073        sum += skb_frag_size(frag++);
2074        sum += skb_frag_size(frag++);
2075        sum += skb_frag_size(frag++);
2076        sum += skb_frag_size(frag++);
2077        sum += skb_frag_size(frag++);
2078
2079        /* Walk through fragments adding latest fragment, testing it, and
2080         * then removing stale fragments from the sum.
2081         */
2082        for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2083                int stale_size = skb_frag_size(stale);
2084
2085                sum += skb_frag_size(frag++);
2086
2087                /* The stale fragment may present us with a smaller
2088                 * descriptor than the actual fragment size. To account
2089                 * for that we need to remove all the data on the front and
2090                 * figure out what the remainder would be in the last
2091                 * descriptor associated with the fragment.
2092                 */
2093                if (stale_size > ICE_MAX_DATA_PER_TXD) {
2094                        int align_pad = -(skb_frag_off(stale)) &
2095                                        (ICE_MAX_READ_REQ_SIZE - 1);
2096
2097                        sum -= align_pad;
2098                        stale_size -= align_pad;
2099
2100                        do {
2101                                sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2102                                stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2103                        } while (stale_size > ICE_MAX_DATA_PER_TXD);
2104                }
2105
2106                /* if sum is negative we failed to make sufficient progress */
2107                if (sum < 0)
2108                        return true;
2109
2110                if (!nr_frags--)
2111                        break;
2112
2113                sum -= stale_size;
2114        }
2115
2116        return false;
2117}
2118
2119/**
2120 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2121 * @skb:      send buffer
2122 * @count:    number of buffers used
2123 *
2124 * Note: Our HW can't scatter-gather more than 8 fragments to build
2125 * a packet on the wire and so we need to figure out the cases where we
2126 * need to linearize the skb.
2127 */
2128static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2129{
2130        /* Both TSO and single send will work if count is less than 8 */
2131        if (likely(count < ICE_MAX_BUF_TXD))
2132                return false;
2133
2134        if (skb_is_gso(skb))
2135                return __ice_chk_linearize(skb);
2136
2137        /* we can support up to 8 data buffers for a single send */
2138        return count != ICE_MAX_BUF_TXD;
2139}
2140
2141/**
2142 * ice_tstamp - set up context descriptor for hardware timestamp
2143 * @tx_ring: pointer to the Tx ring to send buffer on
2144 * @skb: pointer to the SKB we're sending
2145 * @first: Tx buffer
2146 * @off: Tx offload parameters
2147 */
2148static void
2149ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
2150           struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2151{
2152        s8 idx;
2153
2154        /* only timestamp the outbound packet if the user has requested it */
2155        if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
2156                return;
2157
2158        if (!tx_ring->ptp_tx)
2159                return;
2160
2161        /* Tx timestamps cannot be sampled when doing TSO */
2162        if (first->tx_flags & ICE_TX_FLAGS_TSO)
2163                return;
2164
2165        /* Grab an open timestamp slot */
2166        idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
2167        if (idx < 0)
2168                return;
2169
2170        off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2171                             (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
2172                             ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
2173        first->tx_flags |= ICE_TX_FLAGS_TSYN;
2174}
2175
2176/**
2177 * ice_xmit_frame_ring - Sends buffer on Tx ring
2178 * @skb: send buffer
2179 * @tx_ring: ring to send buffer on
2180 *
2181 * Returns NETDEV_TX_OK if sent, else an error code
2182 */
2183static netdev_tx_t
2184ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2185{
2186        struct ice_tx_offload_params offload = { 0 };
2187        struct ice_vsi *vsi = tx_ring->vsi;
2188        struct ice_tx_buf *first;
2189        struct ethhdr *eth;
2190        unsigned int count;
2191        int tso, csum;
2192
2193        ice_trace(xmit_frame_ring, tx_ring, skb);
2194
2195        count = ice_xmit_desc_count(skb);
2196        if (ice_chk_linearize(skb, count)) {
2197                if (__skb_linearize(skb))
2198                        goto out_drop;
2199                count = ice_txd_use_count(skb->len);
2200                tx_ring->tx_stats.tx_linearize++;
2201        }
2202
2203        /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2204         *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2205         *       + 4 desc gap to avoid the cache line where head is,
2206         *       + 1 desc for context descriptor,
2207         * otherwise try next time
2208         */
2209        if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2210                              ICE_DESCS_FOR_CTX_DESC)) {
2211                tx_ring->tx_stats.tx_busy++;
2212                return NETDEV_TX_BUSY;
2213        }
2214
2215        offload.tx_ring = tx_ring;
2216
2217        /* record the location of the first descriptor for this packet */
2218        first = &tx_ring->tx_buf[tx_ring->next_to_use];
2219        first->skb = skb;
2220        first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2221        first->gso_segs = 1;
2222        first->tx_flags = 0;
2223
2224        /* prepare the VLAN tagging flags for Tx */
2225        ice_tx_prepare_vlan_flags(tx_ring, first);
2226
2227        /* set up TSO offload */
2228        tso = ice_tso(first, &offload);
2229        if (tso < 0)
2230                goto out_drop;
2231
2232        /* always set up Tx checksum offload */
2233        csum = ice_tx_csum(first, &offload);
2234        if (csum < 0)
2235                goto out_drop;
2236
2237        /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2238        eth = (struct ethhdr *)skb_mac_header(skb);
2239        if (unlikely((skb->priority == TC_PRIO_CONTROL ||
2240                      eth->h_proto == htons(ETH_P_LLDP)) &&
2241                     vsi->type == ICE_VSI_PF &&
2242                     vsi->port_info->qos_cfg.is_sw_lldp))
2243                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2244                                        ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2245                                        ICE_TXD_CTX_QW1_CMD_S);
2246
2247        ice_tstamp(tx_ring, skb, first, &offload);
2248
2249        if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2250                struct ice_tx_ctx_desc *cdesc;
2251                u16 i = tx_ring->next_to_use;
2252
2253                /* grab the next descriptor */
2254                cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2255                i++;
2256                tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2257
2258                /* setup context descriptor */
2259                cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2260                cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2261                cdesc->rsvd = cpu_to_le16(0);
2262                cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2263        }
2264
2265        ice_tx_map(tx_ring, first, &offload);
2266        return NETDEV_TX_OK;
2267
2268out_drop:
2269        ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2270        dev_kfree_skb_any(skb);
2271        return NETDEV_TX_OK;
2272}
2273
2274/**
2275 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2276 * @skb: send buffer
2277 * @netdev: network interface device structure
2278 *
2279 * Returns NETDEV_TX_OK if sent, else an error code
2280 */
2281netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2282{
2283        struct ice_netdev_priv *np = netdev_priv(netdev);
2284        struct ice_vsi *vsi = np->vsi;
2285        struct ice_ring *tx_ring;
2286
2287        tx_ring = vsi->tx_rings[skb->queue_mapping];
2288
2289        /* hardware can't handle really short frames, hardware padding works
2290         * beyond this point
2291         */
2292        if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2293                return NETDEV_TX_OK;
2294
2295        return ice_xmit_frame_ring(skb, tx_ring);
2296}
2297
2298/**
2299 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2300 * @tx_ring: tx_ring to clean
2301 */
2302void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2303{
2304        struct ice_vsi *vsi = tx_ring->vsi;
2305        s16 i = tx_ring->next_to_clean;
2306        int budget = ICE_DFLT_IRQ_WORK;
2307        struct ice_tx_desc *tx_desc;
2308        struct ice_tx_buf *tx_buf;
2309
2310        tx_buf = &tx_ring->tx_buf[i];
2311        tx_desc = ICE_TX_DESC(tx_ring, i);
2312        i -= tx_ring->count;
2313
2314        do {
2315                struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2316
2317                /* if next_to_watch is not set then there is no pending work */
2318                if (!eop_desc)
2319                        break;
2320
2321                /* prevent any other reads prior to eop_desc */
2322                smp_rmb();
2323
2324                /* if the descriptor isn't done, no work to do */
2325                if (!(eop_desc->cmd_type_offset_bsz &
2326                      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2327                        break;
2328
2329                /* clear next_to_watch to prevent false hangs */
2330                tx_buf->next_to_watch = NULL;
2331                tx_desc->buf_addr = 0;
2332                tx_desc->cmd_type_offset_bsz = 0;
2333
2334                /* move past filter desc */
2335                tx_buf++;
2336                tx_desc++;
2337                i++;
2338                if (unlikely(!i)) {
2339                        i -= tx_ring->count;
2340                        tx_buf = tx_ring->tx_buf;
2341                        tx_desc = ICE_TX_DESC(tx_ring, 0);
2342                }
2343
2344                /* unmap the data header */
2345                if (dma_unmap_len(tx_buf, len))
2346                        dma_unmap_single(tx_ring->dev,
2347                                         dma_unmap_addr(tx_buf, dma),
2348                                         dma_unmap_len(tx_buf, len),
2349                                         DMA_TO_DEVICE);
2350                if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2351                        devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2352
2353                /* clear next_to_watch to prevent false hangs */
2354                tx_buf->raw_buf = NULL;
2355                tx_buf->tx_flags = 0;
2356                tx_buf->next_to_watch = NULL;
2357                dma_unmap_len_set(tx_buf, len, 0);
2358                tx_desc->buf_addr = 0;
2359                tx_desc->cmd_type_offset_bsz = 0;
2360
2361                /* move past eop_desc for start of next FD desc */
2362                tx_buf++;
2363                tx_desc++;
2364                i++;
2365                if (unlikely(!i)) {
2366                        i -= tx_ring->count;
2367                        tx_buf = tx_ring->tx_buf;
2368                        tx_desc = ICE_TX_DESC(tx_ring, 0);
2369                }
2370
2371                budget--;
2372        } while (likely(budget));
2373
2374        i += tx_ring->count;
2375        tx_ring->next_to_clean = i;
2376
2377        /* re-enable interrupt if needed */
2378        ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
2379}
2380