linux/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2018 Intel Corporation. */
   3
   4#include <linux/bpf_trace.h>
   5#include <net/xdp_sock.h>
   6#include <net/xdp.h>
   7
   8#include "ixgbe.h"
   9#include "ixgbe_txrx_common.h"
  10
  11struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
  12                                struct ixgbe_ring *ring)
  13{
  14        bool xdp_on = READ_ONCE(adapter->xdp_prog);
  15        int qid = ring->ring_idx;
  16
  17        if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
  18                return NULL;
  19
  20        return xdp_get_umem_from_qid(adapter->netdev, qid);
  21}
  22
  23static int ixgbe_xsk_umem_dma_map(struct ixgbe_adapter *adapter,
  24                                  struct xdp_umem *umem)
  25{
  26        struct device *dev = &adapter->pdev->dev;
  27        unsigned int i, j;
  28        dma_addr_t dma;
  29
  30        for (i = 0; i < umem->npgs; i++) {
  31                dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
  32                                         DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
  33                if (dma_mapping_error(dev, dma))
  34                        goto out_unmap;
  35
  36                umem->pages[i].dma = dma;
  37        }
  38
  39        return 0;
  40
  41out_unmap:
  42        for (j = 0; j < i; j++) {
  43                dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
  44                                     DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
  45                umem->pages[i].dma = 0;
  46        }
  47
  48        return -1;
  49}
  50
  51static void ixgbe_xsk_umem_dma_unmap(struct ixgbe_adapter *adapter,
  52                                     struct xdp_umem *umem)
  53{
  54        struct device *dev = &adapter->pdev->dev;
  55        unsigned int i;
  56
  57        for (i = 0; i < umem->npgs; i++) {
  58                dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
  59                                     DMA_BIDIRECTIONAL, IXGBE_RX_DMA_ATTR);
  60
  61                umem->pages[i].dma = 0;
  62        }
  63}
  64
  65static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
  66                                 struct xdp_umem *umem,
  67                                 u16 qid)
  68{
  69        struct net_device *netdev = adapter->netdev;
  70        struct xdp_umem_fq_reuse *reuseq;
  71        bool if_running;
  72        int err;
  73
  74        if (qid >= adapter->num_rx_queues)
  75                return -EINVAL;
  76
  77        if (qid >= netdev->real_num_rx_queues ||
  78            qid >= netdev->real_num_tx_queues)
  79                return -EINVAL;
  80
  81        reuseq = xsk_reuseq_prepare(adapter->rx_ring[0]->count);
  82        if (!reuseq)
  83                return -ENOMEM;
  84
  85        xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
  86
  87        err = ixgbe_xsk_umem_dma_map(adapter, umem);
  88        if (err)
  89                return err;
  90
  91        if_running = netif_running(adapter->netdev) &&
  92                     ixgbe_enabled_xdp_adapter(adapter);
  93
  94        if (if_running)
  95                ixgbe_txrx_ring_disable(adapter, qid);
  96
  97        set_bit(qid, adapter->af_xdp_zc_qps);
  98
  99        if (if_running) {
 100                ixgbe_txrx_ring_enable(adapter, qid);
 101
 102                /* Kick start the NAPI context so that receiving will start */
 103                err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
 104                if (err)
 105                        return err;
 106        }
 107
 108        return 0;
 109}
 110
 111static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
 112{
 113        struct xdp_umem *umem;
 114        bool if_running;
 115
 116        umem = xdp_get_umem_from_qid(adapter->netdev, qid);
 117        if (!umem)
 118                return -EINVAL;
 119
 120        if_running = netif_running(adapter->netdev) &&
 121                     ixgbe_enabled_xdp_adapter(adapter);
 122
 123        if (if_running)
 124                ixgbe_txrx_ring_disable(adapter, qid);
 125
 126        clear_bit(qid, adapter->af_xdp_zc_qps);
 127        ixgbe_xsk_umem_dma_unmap(adapter, umem);
 128
 129        if (if_running)
 130                ixgbe_txrx_ring_enable(adapter, qid);
 131
 132        return 0;
 133}
 134
 135int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
 136                         u16 qid)
 137{
 138        return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
 139                ixgbe_xsk_umem_disable(adapter, qid);
 140}
 141
 142static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
 143                            struct ixgbe_ring *rx_ring,
 144                            struct xdp_buff *xdp)
 145{
 146        struct xdp_umem *umem = rx_ring->xsk_umem;
 147        int err, result = IXGBE_XDP_PASS;
 148        struct bpf_prog *xdp_prog;
 149        struct xdp_frame *xdpf;
 150        u64 offset;
 151        u32 act;
 152
 153        rcu_read_lock();
 154        xdp_prog = READ_ONCE(rx_ring->xdp_prog);
 155        act = bpf_prog_run_xdp(xdp_prog, xdp);
 156        offset = xdp->data - xdp->data_hard_start;
 157
 158        xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
 159
 160        switch (act) {
 161        case XDP_PASS:
 162                break;
 163        case XDP_TX:
 164                xdpf = convert_to_xdp_frame(xdp);
 165                if (unlikely(!xdpf)) {
 166                        result = IXGBE_XDP_CONSUMED;
 167                        break;
 168                }
 169                result = ixgbe_xmit_xdp_ring(adapter, xdpf);
 170                break;
 171        case XDP_REDIRECT:
 172                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
 173                result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
 174                break;
 175        default:
 176                bpf_warn_invalid_xdp_action(act);
 177                /* fallthrough */
 178        case XDP_ABORTED:
 179                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
 180                /* fallthrough -- handle aborts by dropping packet */
 181        case XDP_DROP:
 182                result = IXGBE_XDP_CONSUMED;
 183                break;
 184        }
 185        rcu_read_unlock();
 186        return result;
 187}
 188
 189static struct
 190ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
 191                                        unsigned int size)
 192{
 193        struct ixgbe_rx_buffer *bi;
 194
 195        bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
 196
 197        /* we are reusing so sync this buffer for CPU use */
 198        dma_sync_single_range_for_cpu(rx_ring->dev,
 199                                      bi->dma, 0,
 200                                      size,
 201                                      DMA_BIDIRECTIONAL);
 202
 203        return bi;
 204}
 205
 206static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
 207                                     struct ixgbe_rx_buffer *obi)
 208{
 209        u16 nta = rx_ring->next_to_alloc;
 210        struct ixgbe_rx_buffer *nbi;
 211
 212        nbi = &rx_ring->rx_buffer_info[rx_ring->next_to_alloc];
 213        /* update, and store next to alloc */
 214        nta++;
 215        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 216
 217        /* transfer page from old buffer to new buffer */
 218        nbi->dma = obi->dma;
 219        nbi->addr = obi->addr;
 220        nbi->handle = obi->handle;
 221
 222        obi->addr = NULL;
 223        obi->skb = NULL;
 224}
 225
 226void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
 227{
 228        struct ixgbe_rx_buffer *bi;
 229        struct ixgbe_ring *rx_ring;
 230        u64 hr, mask;
 231        u16 nta;
 232
 233        rx_ring = container_of(alloc, struct ixgbe_ring, zca);
 234        hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
 235        mask = rx_ring->xsk_umem->chunk_mask;
 236
 237        nta = rx_ring->next_to_alloc;
 238        bi = rx_ring->rx_buffer_info;
 239
 240        nta++;
 241        rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
 242
 243        handle &= mask;
 244
 245        bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
 246        bi->dma += hr;
 247
 248        bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
 249        bi->addr += hr;
 250
 251        bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
 252                                            rx_ring->xsk_umem->headroom);
 253}
 254
 255static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
 256                                  struct ixgbe_rx_buffer *bi)
 257{
 258        struct xdp_umem *umem = rx_ring->xsk_umem;
 259        void *addr = bi->addr;
 260        u64 handle, hr;
 261
 262        if (addr)
 263                return true;
 264
 265        if (!xsk_umem_peek_addr(umem, &handle)) {
 266                rx_ring->rx_stats.alloc_rx_page_failed++;
 267                return false;
 268        }
 269
 270        hr = umem->headroom + XDP_PACKET_HEADROOM;
 271
 272        bi->dma = xdp_umem_get_dma(umem, handle);
 273        bi->dma += hr;
 274
 275        bi->addr = xdp_umem_get_data(umem, handle);
 276        bi->addr += hr;
 277
 278        bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
 279
 280        xsk_umem_discard_addr(umem);
 281        return true;
 282}
 283
 284static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
 285                                       struct ixgbe_rx_buffer *bi)
 286{
 287        struct xdp_umem *umem = rx_ring->xsk_umem;
 288        u64 handle, hr;
 289
 290        if (!xsk_umem_peek_addr_rq(umem, &handle)) {
 291                rx_ring->rx_stats.alloc_rx_page_failed++;
 292                return false;
 293        }
 294
 295        handle &= rx_ring->xsk_umem->chunk_mask;
 296
 297        hr = umem->headroom + XDP_PACKET_HEADROOM;
 298
 299        bi->dma = xdp_umem_get_dma(umem, handle);
 300        bi->dma += hr;
 301
 302        bi->addr = xdp_umem_get_data(umem, handle);
 303        bi->addr += hr;
 304
 305        bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
 306
 307        xsk_umem_discard_addr_rq(umem);
 308        return true;
 309}
 310
 311static __always_inline bool
 312__ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 cleaned_count,
 313                            bool alloc(struct ixgbe_ring *rx_ring,
 314                                       struct ixgbe_rx_buffer *bi))
 315{
 316        union ixgbe_adv_rx_desc *rx_desc;
 317        struct ixgbe_rx_buffer *bi;
 318        u16 i = rx_ring->next_to_use;
 319        bool ok = true;
 320
 321        /* nothing to do */
 322        if (!cleaned_count)
 323                return true;
 324
 325        rx_desc = IXGBE_RX_DESC(rx_ring, i);
 326        bi = &rx_ring->rx_buffer_info[i];
 327        i -= rx_ring->count;
 328
 329        do {
 330                if (!alloc(rx_ring, bi)) {
 331                        ok = false;
 332                        break;
 333                }
 334
 335                /* sync the buffer for use by the device */
 336                dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
 337                                                 bi->page_offset,
 338                                                 rx_ring->rx_buf_len,
 339                                                 DMA_BIDIRECTIONAL);
 340
 341                /* Refresh the desc even if buffer_addrs didn't change
 342                 * because each write-back erases this info.
 343                 */
 344                rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
 345
 346                rx_desc++;
 347                bi++;
 348                i++;
 349                if (unlikely(!i)) {
 350                        rx_desc = IXGBE_RX_DESC(rx_ring, 0);
 351                        bi = rx_ring->rx_buffer_info;
 352                        i -= rx_ring->count;
 353                }
 354
 355                /* clear the length for the next_to_use descriptor */
 356                rx_desc->wb.upper.length = 0;
 357
 358                cleaned_count--;
 359        } while (cleaned_count);
 360
 361        i += rx_ring->count;
 362
 363        if (rx_ring->next_to_use != i) {
 364                rx_ring->next_to_use = i;
 365
 366                /* update next to alloc since we have filled the ring */
 367                rx_ring->next_to_alloc = i;
 368
 369                /* Force memory writes to complete before letting h/w
 370                 * know there are new descriptors to fetch.  (Only
 371                 * applicable for weak-ordered memory model archs,
 372                 * such as IA-64).
 373                 */
 374                wmb();
 375                writel(i, rx_ring->tail);
 376        }
 377
 378        return ok;
 379}
 380
 381void ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
 382{
 383        __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
 384                                    ixgbe_alloc_buffer_slow_zc);
 385}
 386
 387static bool ixgbe_alloc_rx_buffers_fast_zc(struct ixgbe_ring *rx_ring,
 388                                           u16 count)
 389{
 390        return __ixgbe_alloc_rx_buffers_zc(rx_ring, count,
 391                                           ixgbe_alloc_buffer_zc);
 392}
 393
 394static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
 395                                              struct ixgbe_rx_buffer *bi,
 396                                              struct xdp_buff *xdp)
 397{
 398        unsigned int metasize = xdp->data - xdp->data_meta;
 399        unsigned int datasize = xdp->data_end - xdp->data;
 400        struct sk_buff *skb;
 401
 402        /* allocate a skb to store the frags */
 403        skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
 404                               xdp->data_end - xdp->data_hard_start,
 405                               GFP_ATOMIC | __GFP_NOWARN);
 406        if (unlikely(!skb))
 407                return NULL;
 408
 409        skb_reserve(skb, xdp->data - xdp->data_hard_start);
 410        memcpy(__skb_put(skb, datasize), xdp->data, datasize);
 411        if (metasize)
 412                skb_metadata_set(skb, metasize);
 413
 414        ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
 415        return skb;
 416}
 417
 418static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
 419{
 420        u32 ntc = rx_ring->next_to_clean + 1;
 421
 422        ntc = (ntc < rx_ring->count) ? ntc : 0;
 423        rx_ring->next_to_clean = ntc;
 424        prefetch(IXGBE_RX_DESC(rx_ring, ntc));
 425}
 426
 427int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
 428                          struct ixgbe_ring *rx_ring,
 429                          const int budget)
 430{
 431        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
 432        struct ixgbe_adapter *adapter = q_vector->adapter;
 433        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
 434        unsigned int xdp_res, xdp_xmit = 0;
 435        bool failure = false;
 436        struct sk_buff *skb;
 437        struct xdp_buff xdp;
 438
 439        xdp.rxq = &rx_ring->xdp_rxq;
 440
 441        while (likely(total_rx_packets < budget)) {
 442                union ixgbe_adv_rx_desc *rx_desc;
 443                struct ixgbe_rx_buffer *bi;
 444                unsigned int size;
 445
 446                /* return some buffers to hardware, one at a time is too slow */
 447                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
 448                        failure = failure ||
 449                                  !ixgbe_alloc_rx_buffers_fast_zc(rx_ring,
 450                                                                 cleaned_count);
 451                        cleaned_count = 0;
 452                }
 453
 454                rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 455                size = le16_to_cpu(rx_desc->wb.upper.length);
 456                if (!size)
 457                        break;
 458
 459                /* This memory barrier is needed to keep us from reading
 460                 * any other fields out of the rx_desc until we know the
 461                 * descriptor has been written back
 462                 */
 463                dma_rmb();
 464
 465                bi = ixgbe_get_rx_buffer_zc(rx_ring, size);
 466
 467                if (unlikely(!ixgbe_test_staterr(rx_desc,
 468                                                 IXGBE_RXD_STAT_EOP))) {
 469                        struct ixgbe_rx_buffer *next_bi;
 470
 471                        ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
 472                        ixgbe_inc_ntc(rx_ring);
 473                        next_bi =
 474                               &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
 475                        next_bi->skb = ERR_PTR(-EINVAL);
 476                        continue;
 477                }
 478
 479                if (unlikely(bi->skb)) {
 480                        ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
 481                        ixgbe_inc_ntc(rx_ring);
 482                        continue;
 483                }
 484
 485                xdp.data = bi->addr;
 486                xdp.data_meta = xdp.data;
 487                xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
 488                xdp.data_end = xdp.data + size;
 489                xdp.handle = bi->handle;
 490
 491                xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, &xdp);
 492
 493                if (xdp_res) {
 494                        if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
 495                                xdp_xmit |= xdp_res;
 496                                bi->addr = NULL;
 497                                bi->skb = NULL;
 498                        } else {
 499                                ixgbe_reuse_rx_buffer_zc(rx_ring, bi);
 500                        }
 501                        total_rx_packets++;
 502                        total_rx_bytes += size;
 503
 504                        cleaned_count++;
 505                        ixgbe_inc_ntc(rx_ring);
 506                        continue;
 507                }
 508
 509                /* XDP_PASS path */
 510                skb = ixgbe_construct_skb_zc(rx_ring, bi, &xdp);
 511                if (!skb) {
 512                        rx_ring->rx_stats.alloc_rx_buff_failed++;
 513                        break;
 514                }
 515
 516                cleaned_count++;
 517                ixgbe_inc_ntc(rx_ring);
 518
 519                if (eth_skb_pad(skb))
 520                        continue;
 521
 522                total_rx_bytes += skb->len;
 523                total_rx_packets++;
 524
 525                ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
 526                ixgbe_rx_skb(q_vector, skb);
 527        }
 528
 529        if (xdp_xmit & IXGBE_XDP_REDIR)
 530                xdp_do_flush_map();
 531
 532        if (xdp_xmit & IXGBE_XDP_TX) {
 533                struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 534
 535                /* Force memory writes to complete before letting h/w
 536                 * know there are new descriptors to fetch.
 537                 */
 538                wmb();
 539                writel(ring->next_to_use, ring->tail);
 540        }
 541
 542        u64_stats_update_begin(&rx_ring->syncp);
 543        rx_ring->stats.packets += total_rx_packets;
 544        rx_ring->stats.bytes += total_rx_bytes;
 545        u64_stats_update_end(&rx_ring->syncp);
 546        q_vector->rx.total_packets += total_rx_packets;
 547        q_vector->rx.total_bytes += total_rx_bytes;
 548
 549        if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
 550                if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
 551                        xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
 552                else
 553                        xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
 554
 555                return (int)total_rx_packets;
 556        }
 557        return failure ? budget : (int)total_rx_packets;
 558}
 559
 560void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
 561{
 562        u16 i = rx_ring->next_to_clean;
 563        struct ixgbe_rx_buffer *bi = &rx_ring->rx_buffer_info[i];
 564
 565        while (i != rx_ring->next_to_alloc) {
 566                xsk_umem_fq_reuse(rx_ring->xsk_umem, bi->handle);
 567                i++;
 568                bi++;
 569                if (i == rx_ring->count) {
 570                        i = 0;
 571                        bi = rx_ring->rx_buffer_info;
 572                }
 573        }
 574}
 575
 576static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
 577{
 578        union ixgbe_adv_tx_desc *tx_desc = NULL;
 579        struct ixgbe_tx_buffer *tx_bi;
 580        bool work_done = true;
 581        struct xdp_desc desc;
 582        dma_addr_t dma;
 583        u32 cmd_type;
 584
 585        while (budget-- > 0) {
 586                if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
 587                    !netif_carrier_ok(xdp_ring->netdev)) {
 588                        work_done = false;
 589                        break;
 590                }
 591
 592                if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
 593                        break;
 594
 595                dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
 596
 597                dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
 598                                           DMA_BIDIRECTIONAL);
 599
 600                tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
 601                tx_bi->bytecount = desc.len;
 602                tx_bi->xdpf = NULL;
 603                tx_bi->gso_segs = 1;
 604
 605                tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
 606                tx_desc->read.buffer_addr = cpu_to_le64(dma);
 607
 608                /* put descriptor type bits */
 609                cmd_type = IXGBE_ADVTXD_DTYP_DATA |
 610                           IXGBE_ADVTXD_DCMD_DEXT |
 611                           IXGBE_ADVTXD_DCMD_IFCS;
 612                cmd_type |= desc.len | IXGBE_TXD_CMD;
 613                tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
 614                tx_desc->read.olinfo_status =
 615                        cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
 616
 617                xdp_ring->next_to_use++;
 618                if (xdp_ring->next_to_use == xdp_ring->count)
 619                        xdp_ring->next_to_use = 0;
 620        }
 621
 622        if (tx_desc) {
 623                ixgbe_xdp_ring_update_tail(xdp_ring);
 624                xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
 625        }
 626
 627        return !!budget && work_done;
 628}
 629
 630static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
 631                                      struct ixgbe_tx_buffer *tx_bi)
 632{
 633        xdp_return_frame(tx_bi->xdpf);
 634        dma_unmap_single(tx_ring->dev,
 635                         dma_unmap_addr(tx_bi, dma),
 636                         dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
 637        dma_unmap_len_set(tx_bi, len, 0);
 638}
 639
 640bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
 641                            struct ixgbe_ring *tx_ring, int napi_budget)
 642{
 643        u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
 644        unsigned int total_packets = 0, total_bytes = 0;
 645        struct xdp_umem *umem = tx_ring->xsk_umem;
 646        union ixgbe_adv_tx_desc *tx_desc;
 647        struct ixgbe_tx_buffer *tx_bi;
 648        u32 xsk_frames = 0;
 649
 650        tx_bi = &tx_ring->tx_buffer_info[ntc];
 651        tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
 652
 653        while (ntc != ntu) {
 654                if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
 655                        break;
 656
 657                total_bytes += tx_bi->bytecount;
 658                total_packets += tx_bi->gso_segs;
 659
 660                if (tx_bi->xdpf)
 661                        ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
 662                else
 663                        xsk_frames++;
 664
 665                tx_bi->xdpf = NULL;
 666
 667                tx_bi++;
 668                tx_desc++;
 669                ntc++;
 670                if (unlikely(ntc == tx_ring->count)) {
 671                        ntc = 0;
 672                        tx_bi = tx_ring->tx_buffer_info;
 673                        tx_desc = IXGBE_TX_DESC(tx_ring, 0);
 674                }
 675
 676                /* issue prefetch for next Tx descriptor */
 677                prefetch(tx_desc);
 678        }
 679
 680        tx_ring->next_to_clean = ntc;
 681
 682        u64_stats_update_begin(&tx_ring->syncp);
 683        tx_ring->stats.bytes += total_bytes;
 684        tx_ring->stats.packets += total_packets;
 685        u64_stats_update_end(&tx_ring->syncp);
 686        q_vector->tx.total_bytes += total_bytes;
 687        q_vector->tx.total_packets += total_packets;
 688
 689        if (xsk_frames)
 690                xsk_umem_complete_tx(umem, xsk_frames);
 691
 692        if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
 693                xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
 694
 695        return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
 696}
 697
 698int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
 699{
 700        struct ixgbe_adapter *adapter = netdev_priv(dev);
 701        struct ixgbe_ring *ring;
 702
 703        if (test_bit(__IXGBE_DOWN, &adapter->state))
 704                return -ENETDOWN;
 705
 706        if (!READ_ONCE(adapter->xdp_prog))
 707                return -ENXIO;
 708
 709        if (qid >= adapter->num_xdp_queues)
 710                return -ENXIO;
 711
 712        if (!adapter->xdp_ring[qid]->xsk_umem)
 713                return -ENXIO;
 714
 715        ring = adapter->xdp_ring[qid];
 716        if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
 717                u64 eics = BIT_ULL(ring->q_vector->v_idx);
 718
 719                ixgbe_irq_rearm_queues(adapter, eics);
 720        }
 721
 722        return 0;
 723}
 724
 725void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
 726{
 727        u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
 728        struct xdp_umem *umem = tx_ring->xsk_umem;
 729        struct ixgbe_tx_buffer *tx_bi;
 730        u32 xsk_frames = 0;
 731
 732        while (ntc != ntu) {
 733                tx_bi = &tx_ring->tx_buffer_info[ntc];
 734
 735                if (tx_bi->xdpf)
 736                        ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
 737                else
 738                        xsk_frames++;
 739
 740                tx_bi->xdpf = NULL;
 741
 742                ntc++;
 743                if (ntc == tx_ring->count)
 744                        ntc = 0;
 745        }
 746
 747        if (xsk_frames)
 748                xsk_umem_complete_tx(umem, xsk_frames);
 749}
 750