linux/drivers/net/ethernet/sfc/falcon/rx.c
<<
>>
Prefs
   1/****************************************************************************
   2 * Driver for Solarflare network controllers and boards
   3 * Copyright 2005-2006 Fen Systems Ltd.
   4 * Copyright 2005-2013 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#include <linux/socket.h>
  12#include <linux/in.h>
  13#include <linux/slab.h>
  14#include <linux/ip.h>
  15#include <linux/ipv6.h>
  16#include <linux/tcp.h>
  17#include <linux/udp.h>
  18#include <linux/prefetch.h>
  19#include <linux/moduleparam.h>
  20#include <linux/iommu.h>
  21#include <net/ip.h>
  22#include <net/checksum.h>
  23#include "net_driver.h"
  24#include "efx.h"
  25#include "filter.h"
  26#include "nic.h"
  27#include "selftest.h"
  28#include "workarounds.h"
  29
  30/* Preferred number of descriptors to fill at once */
  31#define EF4_RX_PREFERRED_BATCH 8U
  32
  33/* Number of RX buffers to recycle pages for.  When creating the RX page recycle
  34 * ring, this number is divided by the number of buffers per page to calculate
  35 * the number of pages to store in the RX page recycle ring.
  36 */
  37#define EF4_RECYCLE_RING_SIZE_IOMMU 4096
  38#define EF4_RECYCLE_RING_SIZE_NOIOMMU (2 * EF4_RX_PREFERRED_BATCH)
  39
  40/* Size of buffer allocated for skb header area. */
  41#define EF4_SKB_HEADERS  128u
  42
  43/* This is the percentage fill level below which new RX descriptors
  44 * will be added to the RX descriptor ring.
  45 */
  46static unsigned int rx_refill_threshold;
  47
  48/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
  49#define EF4_RX_MAX_FRAGS DIV_ROUND_UP(EF4_MAX_FRAME_LEN(EF4_MAX_MTU), \
  50                                      EF4_RX_USR_BUF_SIZE)
  51
  52/*
  53 * RX maximum head room required.
  54 *
  55 * This must be at least 1 to prevent overflow, plus one packet-worth
  56 * to allow pipelined receives.
  57 */
  58#define EF4_RXD_HEAD_ROOM (1 + EF4_RX_MAX_FRAGS)
  59
  60static inline u8 *ef4_rx_buf_va(struct ef4_rx_buffer *buf)
  61{
  62        return page_address(buf->page) + buf->page_offset;
  63}
  64
  65static inline u32 ef4_rx_buf_hash(struct ef4_nic *efx, const u8 *eh)
  66{
  67#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  68        return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
  69#else
  70        const u8 *data = eh + efx->rx_packet_hash_offset;
  71        return (u32)data[0]       |
  72               (u32)data[1] << 8  |
  73               (u32)data[2] << 16 |
  74               (u32)data[3] << 24;
  75#endif
  76}
  77
  78static inline struct ef4_rx_buffer *
  79ef4_rx_buf_next(struct ef4_rx_queue *rx_queue, struct ef4_rx_buffer *rx_buf)
  80{
  81        if (unlikely(rx_buf == ef4_rx_buffer(rx_queue, rx_queue->ptr_mask)))
  82                return ef4_rx_buffer(rx_queue, 0);
  83        else
  84                return rx_buf + 1;
  85}
  86
  87static inline void ef4_sync_rx_buffer(struct ef4_nic *efx,
  88                                      struct ef4_rx_buffer *rx_buf,
  89                                      unsigned int len)
  90{
  91        dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
  92                                DMA_FROM_DEVICE);
  93}
  94
  95void ef4_rx_config_page_split(struct ef4_nic *efx)
  96{
  97        efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
  98                                      EF4_RX_BUF_ALIGNMENT);
  99        efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
 100                ((PAGE_SIZE - sizeof(struct ef4_rx_page_state)) /
 101                 efx->rx_page_buf_step);
 102        efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
 103                efx->rx_bufs_per_page;
 104        efx->rx_pages_per_batch = DIV_ROUND_UP(EF4_RX_PREFERRED_BATCH,
 105                                               efx->rx_bufs_per_page);
 106}
 107
 108/* Check the RX page recycle ring for a page that can be reused. */
 109static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
 110{
 111        struct ef4_nic *efx = rx_queue->efx;
 112        struct page *page;
 113        struct ef4_rx_page_state *state;
 114        unsigned index;
 115
 116        index = rx_queue->page_remove & rx_queue->page_ptr_mask;
 117        page = rx_queue->page_ring[index];
 118        if (page == NULL)
 119                return NULL;
 120
 121        rx_queue->page_ring[index] = NULL;
 122        /* page_remove cannot exceed page_add. */
 123        if (rx_queue->page_remove != rx_queue->page_add)
 124                ++rx_queue->page_remove;
 125
 126        /* If page_count is 1 then we hold the only reference to this page. */
 127        if (page_count(page) == 1) {
 128                ++rx_queue->page_recycle_count;
 129                return page;
 130        } else {
 131                state = page_address(page);
 132                dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
 133                               PAGE_SIZE << efx->rx_buffer_order,
 134                               DMA_FROM_DEVICE);
 135                put_page(page);
 136                ++rx_queue->page_recycle_failed;
 137        }
 138
 139        return NULL;
 140}
 141
 142/**
 143 * ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
 144 *
 145 * @rx_queue:           Efx RX queue
 146 *
 147 * This allocates a batch of pages, maps them for DMA, and populates
 148 * struct ef4_rx_buffers for each one. Return a negative error code or
 149 * 0 on success. If a single page can be used for multiple buffers,
 150 * then the page will either be inserted fully, or not at all.
 151 */
 152static int ef4_init_rx_buffers(struct ef4_rx_queue *rx_queue, bool atomic)
 153{
 154        struct ef4_nic *efx = rx_queue->efx;
 155        struct ef4_rx_buffer *rx_buf;
 156        struct page *page;
 157        unsigned int page_offset;
 158        struct ef4_rx_page_state *state;
 159        dma_addr_t dma_addr;
 160        unsigned index, count;
 161
 162        count = 0;
 163        do {
 164                page = ef4_reuse_page(rx_queue);
 165                if (page == NULL) {
 166                        page = alloc_pages(__GFP_COLD | __GFP_COMP |
 167                                           (atomic ? GFP_ATOMIC : GFP_KERNEL),
 168                                           efx->rx_buffer_order);
 169                        if (unlikely(page == NULL))
 170                                return -ENOMEM;
 171                        dma_addr =
 172                                dma_map_page(&efx->pci_dev->dev, page, 0,
 173                                             PAGE_SIZE << efx->rx_buffer_order,
 174                                             DMA_FROM_DEVICE);
 175                        if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
 176                                                       dma_addr))) {
 177                                __free_pages(page, efx->rx_buffer_order);
 178                                return -EIO;
 179                        }
 180                        state = page_address(page);
 181                        state->dma_addr = dma_addr;
 182                } else {
 183                        state = page_address(page);
 184                        dma_addr = state->dma_addr;
 185                }
 186
 187                dma_addr += sizeof(struct ef4_rx_page_state);
 188                page_offset = sizeof(struct ef4_rx_page_state);
 189
 190                do {
 191                        index = rx_queue->added_count & rx_queue->ptr_mask;
 192                        rx_buf = ef4_rx_buffer(rx_queue, index);
 193                        rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
 194                        rx_buf->page = page;
 195                        rx_buf->page_offset = page_offset + efx->rx_ip_align;
 196                        rx_buf->len = efx->rx_dma_len;
 197                        rx_buf->flags = 0;
 198                        ++rx_queue->added_count;
 199                        get_page(page);
 200                        dma_addr += efx->rx_page_buf_step;
 201                        page_offset += efx->rx_page_buf_step;
 202                } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
 203
 204                rx_buf->flags = EF4_RX_BUF_LAST_IN_PAGE;
 205        } while (++count < efx->rx_pages_per_batch);
 206
 207        return 0;
 208}
 209
 210/* Unmap a DMA-mapped page.  This function is only called for the final RX
 211 * buffer in a page.
 212 */
 213static void ef4_unmap_rx_buffer(struct ef4_nic *efx,
 214                                struct ef4_rx_buffer *rx_buf)
 215{
 216        struct page *page = rx_buf->page;
 217
 218        if (page) {
 219                struct ef4_rx_page_state *state = page_address(page);
 220                dma_unmap_page(&efx->pci_dev->dev,
 221                               state->dma_addr,
 222                               PAGE_SIZE << efx->rx_buffer_order,
 223                               DMA_FROM_DEVICE);
 224        }
 225}
 226
 227static void ef4_free_rx_buffers(struct ef4_rx_queue *rx_queue,
 228                                struct ef4_rx_buffer *rx_buf,
 229                                unsigned int num_bufs)
 230{
 231        do {
 232                if (rx_buf->page) {
 233                        put_page(rx_buf->page);
 234                        rx_buf->page = NULL;
 235                }
 236                rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
 237        } while (--num_bufs);
 238}
 239
 240/* Attempt to recycle the page if there is an RX recycle ring; the page can
 241 * only be added if this is the final RX buffer, to prevent pages being used in
 242 * the descriptor ring and appearing in the recycle ring simultaneously.
 243 */
 244static void ef4_recycle_rx_page(struct ef4_channel *channel,
 245                                struct ef4_rx_buffer *rx_buf)
 246{
 247        struct page *page = rx_buf->page;
 248        struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
 249        struct ef4_nic *efx = rx_queue->efx;
 250        unsigned index;
 251
 252        /* Only recycle the page after processing the final buffer. */
 253        if (!(rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE))
 254                return;
 255
 256        index = rx_queue->page_add & rx_queue->page_ptr_mask;
 257        if (rx_queue->page_ring[index] == NULL) {
 258                unsigned read_index = rx_queue->page_remove &
 259                        rx_queue->page_ptr_mask;
 260
 261                /* The next slot in the recycle ring is available, but
 262                 * increment page_remove if the read pointer currently
 263                 * points here.
 264                 */
 265                if (read_index == index)
 266                        ++rx_queue->page_remove;
 267                rx_queue->page_ring[index] = page;
 268                ++rx_queue->page_add;
 269                return;
 270        }
 271        ++rx_queue->page_recycle_full;
 272        ef4_unmap_rx_buffer(efx, rx_buf);
 273        put_page(rx_buf->page);
 274}
 275
 276static void ef4_fini_rx_buffer(struct ef4_rx_queue *rx_queue,
 277                               struct ef4_rx_buffer *rx_buf)
 278{
 279        /* Release the page reference we hold for the buffer. */
 280        if (rx_buf->page)
 281                put_page(rx_buf->page);
 282
 283        /* If this is the last buffer in a page, unmap and free it. */
 284        if (rx_buf->flags & EF4_RX_BUF_LAST_IN_PAGE) {
 285                ef4_unmap_rx_buffer(rx_queue->efx, rx_buf);
 286                ef4_free_rx_buffers(rx_queue, rx_buf, 1);
 287        }
 288        rx_buf->page = NULL;
 289}
 290
 291/* Recycle the pages that are used by buffers that have just been received. */
 292static void ef4_recycle_rx_pages(struct ef4_channel *channel,
 293                                 struct ef4_rx_buffer *rx_buf,
 294                                 unsigned int n_frags)
 295{
 296        struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
 297
 298        do {
 299                ef4_recycle_rx_page(channel, rx_buf);
 300                rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
 301        } while (--n_frags);
 302}
 303
 304static void ef4_discard_rx_packet(struct ef4_channel *channel,
 305                                  struct ef4_rx_buffer *rx_buf,
 306                                  unsigned int n_frags)
 307{
 308        struct ef4_rx_queue *rx_queue = ef4_channel_get_rx_queue(channel);
 309
 310        ef4_recycle_rx_pages(channel, rx_buf, n_frags);
 311
 312        ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
 313}
 314
 315/**
 316 * ef4_fast_push_rx_descriptors - push new RX descriptors quickly
 317 * @rx_queue:           RX descriptor queue
 318 *
 319 * This will aim to fill the RX descriptor queue up to
 320 * @rx_queue->@max_fill. If there is insufficient atomic
 321 * memory to do so, a slow fill will be scheduled.
 322 *
 323 * The caller must provide serialisation (none is used here). In practise,
 324 * this means this function must run from the NAPI handler, or be called
 325 * when NAPI is disabled.
 326 */
 327void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic)
 328{
 329        struct ef4_nic *efx = rx_queue->efx;
 330        unsigned int fill_level, batch_size;
 331        int space, rc = 0;
 332
 333        if (!rx_queue->refill_enabled)
 334                return;
 335
 336        /* Calculate current fill level, and exit if we don't need to fill */
 337        fill_level = (rx_queue->added_count - rx_queue->removed_count);
 338        EF4_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
 339        if (fill_level >= rx_queue->fast_fill_trigger)
 340                goto out;
 341
 342        /* Record minimum fill level */
 343        if (unlikely(fill_level < rx_queue->min_fill)) {
 344                if (fill_level)
 345                        rx_queue->min_fill = fill_level;
 346        }
 347
 348        batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
 349        space = rx_queue->max_fill - fill_level;
 350        EF4_BUG_ON_PARANOID(space < batch_size);
 351
 352        netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
 353                   "RX queue %d fast-filling descriptor ring from"
 354                   " level %d to level %d\n",
 355                   ef4_rx_queue_index(rx_queue), fill_level,
 356                   rx_queue->max_fill);
 357
 358
 359        do {
 360                rc = ef4_init_rx_buffers(rx_queue, atomic);
 361                if (unlikely(rc)) {
 362                        /* Ensure that we don't leave the rx queue empty */
 363                        if (rx_queue->added_count == rx_queue->removed_count)
 364                                ef4_schedule_slow_fill(rx_queue);
 365                        goto out;
 366                }
 367        } while ((space -= batch_size) >= batch_size);
 368
 369        netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
 370                   "RX queue %d fast-filled descriptor ring "
 371                   "to level %d\n", ef4_rx_queue_index(rx_queue),
 372                   rx_queue->added_count - rx_queue->removed_count);
 373
 374 out:
 375        if (rx_queue->notified_count != rx_queue->added_count)
 376                ef4_nic_notify_rx_desc(rx_queue);
 377}
 378
 379void ef4_rx_slow_fill(unsigned long context)
 380{
 381        struct ef4_rx_queue *rx_queue = (struct ef4_rx_queue *)context;
 382
 383        /* Post an event to cause NAPI to run and refill the queue */
 384        ef4_nic_generate_fill_event(rx_queue);
 385        ++rx_queue->slow_fill_count;
 386}
 387
 388static void ef4_rx_packet__check_len(struct ef4_rx_queue *rx_queue,
 389                                     struct ef4_rx_buffer *rx_buf,
 390                                     int len)
 391{
 392        struct ef4_nic *efx = rx_queue->efx;
 393        unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
 394
 395        if (likely(len <= max_len))
 396                return;
 397
 398        /* The packet must be discarded, but this is only a fatal error
 399         * if the caller indicated it was
 400         */
 401        rx_buf->flags |= EF4_RX_PKT_DISCARD;
 402
 403        if ((len > rx_buf->len) && EF4_WORKAROUND_8071(efx)) {
 404                if (net_ratelimit())
 405                        netif_err(efx, rx_err, efx->net_dev,
 406                                  " RX queue %d seriously overlength "
 407                                  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
 408                                  ef4_rx_queue_index(rx_queue), len, max_len,
 409                                  efx->type->rx_buffer_padding);
 410                ef4_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
 411        } else {
 412                if (net_ratelimit())
 413                        netif_err(efx, rx_err, efx->net_dev,
 414                                  " RX queue %d overlength RX event "
 415                                  "(0x%x > 0x%x)\n",
 416                                  ef4_rx_queue_index(rx_queue), len, max_len);
 417        }
 418
 419        ef4_rx_queue_channel(rx_queue)->n_rx_overlength++;
 420}
 421
 422/* Pass a received packet up through GRO.  GRO can handle pages
 423 * regardless of checksum state and skbs with a good checksum.
 424 */
 425static void
 426ef4_rx_packet_gro(struct ef4_channel *channel, struct ef4_rx_buffer *rx_buf,
 427                  unsigned int n_frags, u8 *eh)
 428{
 429        struct napi_struct *napi = &channel->napi_str;
 430        gro_result_t gro_result;
 431        struct ef4_nic *efx = channel->efx;
 432        struct sk_buff *skb;
 433
 434        skb = napi_get_frags(napi);
 435        if (unlikely(!skb)) {
 436                struct ef4_rx_queue *rx_queue;
 437
 438                rx_queue = ef4_channel_get_rx_queue(channel);
 439                ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
 440                return;
 441        }
 442
 443        if (efx->net_dev->features & NETIF_F_RXHASH)
 444                skb_set_hash(skb, ef4_rx_buf_hash(efx, eh),
 445                             PKT_HASH_TYPE_L3);
 446        skb->ip_summed = ((rx_buf->flags & EF4_RX_PKT_CSUMMED) ?
 447                          CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
 448
 449        for (;;) {
 450                skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 451                                   rx_buf->page, rx_buf->page_offset,
 452                                   rx_buf->len);
 453                rx_buf->page = NULL;
 454                skb->len += rx_buf->len;
 455                if (skb_shinfo(skb)->nr_frags == n_frags)
 456                        break;
 457
 458                rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
 459        }
 460
 461        skb->data_len = skb->len;
 462        skb->truesize += n_frags * efx->rx_buffer_truesize;
 463
 464        skb_record_rx_queue(skb, channel->rx_queue.core_index);
 465
 466        gro_result = napi_gro_frags(napi);
 467        if (gro_result != GRO_DROP)
 468                channel->irq_mod_score += 2;
 469}
 470
 471/* Allocate and construct an SKB around page fragments */
 472static struct sk_buff *ef4_rx_mk_skb(struct ef4_channel *channel,
 473                                     struct ef4_rx_buffer *rx_buf,
 474                                     unsigned int n_frags,
 475                                     u8 *eh, int hdr_len)
 476{
 477        struct ef4_nic *efx = channel->efx;
 478        struct sk_buff *skb;
 479
 480        /* Allocate an SKB to store the headers */
 481        skb = netdev_alloc_skb(efx->net_dev,
 482                               efx->rx_ip_align + efx->rx_prefix_size +
 483                               hdr_len);
 484        if (unlikely(skb == NULL)) {
 485                atomic_inc(&efx->n_rx_noskb_drops);
 486                return NULL;
 487        }
 488
 489        EF4_BUG_ON_PARANOID(rx_buf->len < hdr_len);
 490
 491        memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
 492               efx->rx_prefix_size + hdr_len);
 493        skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
 494        __skb_put(skb, hdr_len);
 495
 496        /* Append the remaining page(s) onto the frag list */
 497        if (rx_buf->len > hdr_len) {
 498                rx_buf->page_offset += hdr_len;
 499                rx_buf->len -= hdr_len;
 500
 501                for (;;) {
 502                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 503                                           rx_buf->page, rx_buf->page_offset,
 504                                           rx_buf->len);
 505                        rx_buf->page = NULL;
 506                        skb->len += rx_buf->len;
 507                        skb->data_len += rx_buf->len;
 508                        if (skb_shinfo(skb)->nr_frags == n_frags)
 509                                break;
 510
 511                        rx_buf = ef4_rx_buf_next(&channel->rx_queue, rx_buf);
 512                }
 513        } else {
 514                __free_pages(rx_buf->page, efx->rx_buffer_order);
 515                rx_buf->page = NULL;
 516                n_frags = 0;
 517        }
 518
 519        skb->truesize += n_frags * efx->rx_buffer_truesize;
 520
 521        /* Move past the ethernet header */
 522        skb->protocol = eth_type_trans(skb, efx->net_dev);
 523
 524        skb_mark_napi_id(skb, &channel->napi_str);
 525
 526        return skb;
 527}
 528
 529void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index,
 530                   unsigned int n_frags, unsigned int len, u16 flags)
 531{
 532        struct ef4_nic *efx = rx_queue->efx;
 533        struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
 534        struct ef4_rx_buffer *rx_buf;
 535
 536        rx_queue->rx_packets++;
 537
 538        rx_buf = ef4_rx_buffer(rx_queue, index);
 539        rx_buf->flags |= flags;
 540
 541        /* Validate the number of fragments and completed length */
 542        if (n_frags == 1) {
 543                if (!(flags & EF4_RX_PKT_PREFIX_LEN))
 544                        ef4_rx_packet__check_len(rx_queue, rx_buf, len);
 545        } else if (unlikely(n_frags > EF4_RX_MAX_FRAGS) ||
 546                   unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
 547                   unlikely(len > n_frags * efx->rx_dma_len) ||
 548                   unlikely(!efx->rx_scatter)) {
 549                /* If this isn't an explicit discard request, either
 550                 * the hardware or the driver is broken.
 551                 */
 552                WARN_ON(!(len == 0 && rx_buf->flags & EF4_RX_PKT_DISCARD));
 553                rx_buf->flags |= EF4_RX_PKT_DISCARD;
 554        }
 555
 556        netif_vdbg(efx, rx_status, efx->net_dev,
 557                   "RX queue %d received ids %x-%x len %d %s%s\n",
 558                   ef4_rx_queue_index(rx_queue), index,
 559                   (index + n_frags - 1) & rx_queue->ptr_mask, len,
 560                   (rx_buf->flags & EF4_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
 561                   (rx_buf->flags & EF4_RX_PKT_DISCARD) ? " [DISCARD]" : "");
 562
 563        /* Discard packet, if instructed to do so.  Process the
 564         * previous receive first.
 565         */
 566        if (unlikely(rx_buf->flags & EF4_RX_PKT_DISCARD)) {
 567                ef4_rx_flush_packet(channel);
 568                ef4_discard_rx_packet(channel, rx_buf, n_frags);
 569                return;
 570        }
 571
 572        if (n_frags == 1 && !(flags & EF4_RX_PKT_PREFIX_LEN))
 573                rx_buf->len = len;
 574
 575        /* Release and/or sync the DMA mapping - assumes all RX buffers
 576         * consumed in-order per RX queue.
 577         */
 578        ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
 579
 580        /* Prefetch nice and early so data will (hopefully) be in cache by
 581         * the time we look at it.
 582         */
 583        prefetch(ef4_rx_buf_va(rx_buf));
 584
 585        rx_buf->page_offset += efx->rx_prefix_size;
 586        rx_buf->len -= efx->rx_prefix_size;
 587
 588        if (n_frags > 1) {
 589                /* Release/sync DMA mapping for additional fragments.
 590                 * Fix length for last fragment.
 591                 */
 592                unsigned int tail_frags = n_frags - 1;
 593
 594                for (;;) {
 595                        rx_buf = ef4_rx_buf_next(rx_queue, rx_buf);
 596                        if (--tail_frags == 0)
 597                                break;
 598                        ef4_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
 599                }
 600                rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
 601                ef4_sync_rx_buffer(efx, rx_buf, rx_buf->len);
 602        }
 603
 604        /* All fragments have been DMA-synced, so recycle pages. */
 605        rx_buf = ef4_rx_buffer(rx_queue, index);
 606        ef4_recycle_rx_pages(channel, rx_buf, n_frags);
 607
 608        /* Pipeline receives so that we give time for packet headers to be
 609         * prefetched into cache.
 610         */
 611        ef4_rx_flush_packet(channel);
 612        channel->rx_pkt_n_frags = n_frags;
 613        channel->rx_pkt_index = index;
 614}
 615
 616static void ef4_rx_deliver(struct ef4_channel *channel, u8 *eh,
 617                           struct ef4_rx_buffer *rx_buf,
 618                           unsigned int n_frags)
 619{
 620        struct sk_buff *skb;
 621        u16 hdr_len = min_t(u16, rx_buf->len, EF4_SKB_HEADERS);
 622
 623        skb = ef4_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
 624        if (unlikely(skb == NULL)) {
 625                struct ef4_rx_queue *rx_queue;
 626
 627                rx_queue = ef4_channel_get_rx_queue(channel);
 628                ef4_free_rx_buffers(rx_queue, rx_buf, n_frags);
 629                return;
 630        }
 631        skb_record_rx_queue(skb, channel->rx_queue.core_index);
 632
 633        /* Set the SKB flags */
 634        skb_checksum_none_assert(skb);
 635        if (likely(rx_buf->flags & EF4_RX_PKT_CSUMMED))
 636                skb->ip_summed = CHECKSUM_UNNECESSARY;
 637
 638        if (channel->type->receive_skb)
 639                if (channel->type->receive_skb(channel, skb))
 640                        return;
 641
 642        /* Pass the packet up */
 643        netif_receive_skb(skb);
 644}
 645
 646/* Handle a received packet.  Second half: Touches packet payload. */
 647void __ef4_rx_packet(struct ef4_channel *channel)
 648{
 649        struct ef4_nic *efx = channel->efx;
 650        struct ef4_rx_buffer *rx_buf =
 651                ef4_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
 652        u8 *eh = ef4_rx_buf_va(rx_buf);
 653
 654        /* Read length from the prefix if necessary.  This already
 655         * excludes the length of the prefix itself.
 656         */
 657        if (rx_buf->flags & EF4_RX_PKT_PREFIX_LEN)
 658                rx_buf->len = le16_to_cpup((__le16 *)
 659                                           (eh + efx->rx_packet_len_offset));
 660
 661        /* If we're in loopback test, then pass the packet directly to the
 662         * loopback layer, and free the rx_buf here
 663         */
 664        if (unlikely(efx->loopback_selftest)) {
 665                struct ef4_rx_queue *rx_queue;
 666
 667                ef4_loopback_rx_packet(efx, eh, rx_buf->len);
 668                rx_queue = ef4_channel_get_rx_queue(channel);
 669                ef4_free_rx_buffers(rx_queue, rx_buf,
 670                                    channel->rx_pkt_n_frags);
 671                goto out;
 672        }
 673
 674        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
 675                rx_buf->flags &= ~EF4_RX_PKT_CSUMMED;
 676
 677        if ((rx_buf->flags & EF4_RX_PKT_TCP) && !channel->type->receive_skb)
 678                ef4_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
 679        else
 680                ef4_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
 681out:
 682        channel->rx_pkt_n_frags = 0;
 683}
 684
 685int ef4_probe_rx_queue(struct ef4_rx_queue *rx_queue)
 686{
 687        struct ef4_nic *efx = rx_queue->efx;
 688        unsigned int entries;
 689        int rc;
 690
 691        /* Create the smallest power-of-two aligned ring */
 692        entries = max(roundup_pow_of_two(efx->rxq_entries), EF4_MIN_DMAQ_SIZE);
 693        EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
 694        rx_queue->ptr_mask = entries - 1;
 695
 696        netif_dbg(efx, probe, efx->net_dev,
 697                  "creating RX queue %d size %#x mask %#x\n",
 698                  ef4_rx_queue_index(rx_queue), efx->rxq_entries,
 699                  rx_queue->ptr_mask);
 700
 701        /* Allocate RX buffers */
 702        rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
 703                                   GFP_KERNEL);
 704        if (!rx_queue->buffer)
 705                return -ENOMEM;
 706
 707        rc = ef4_nic_probe_rx(rx_queue);
 708        if (rc) {
 709                kfree(rx_queue->buffer);
 710                rx_queue->buffer = NULL;
 711        }
 712
 713        return rc;
 714}
 715
 716static void ef4_init_rx_recycle_ring(struct ef4_nic *efx,
 717                                     struct ef4_rx_queue *rx_queue)
 718{
 719        unsigned int bufs_in_recycle_ring, page_ring_size;
 720
 721        /* Set the RX recycle ring size */
 722#ifdef CONFIG_PPC64
 723        bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
 724#else
 725        if (iommu_present(&pci_bus_type))
 726                bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_IOMMU;
 727        else
 728                bufs_in_recycle_ring = EF4_RECYCLE_RING_SIZE_NOIOMMU;
 729#endif /* CONFIG_PPC64 */
 730
 731        page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
 732                                            efx->rx_bufs_per_page);
 733        rx_queue->page_ring = kcalloc(page_ring_size,
 734                                      sizeof(*rx_queue->page_ring), GFP_KERNEL);
 735        rx_queue->page_ptr_mask = page_ring_size - 1;
 736}
 737
 738void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue)
 739{
 740        struct ef4_nic *efx = rx_queue->efx;
 741        unsigned int max_fill, trigger, max_trigger;
 742
 743        netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 744                  "initialising RX queue %d\n", ef4_rx_queue_index(rx_queue));
 745
 746        /* Initialise ptr fields */
 747        rx_queue->added_count = 0;
 748        rx_queue->notified_count = 0;
 749        rx_queue->removed_count = 0;
 750        rx_queue->min_fill = -1U;
 751        ef4_init_rx_recycle_ring(efx, rx_queue);
 752
 753        rx_queue->page_remove = 0;
 754        rx_queue->page_add = rx_queue->page_ptr_mask + 1;
 755        rx_queue->page_recycle_count = 0;
 756        rx_queue->page_recycle_failed = 0;
 757        rx_queue->page_recycle_full = 0;
 758
 759        /* Initialise limit fields */
 760        max_fill = efx->rxq_entries - EF4_RXD_HEAD_ROOM;
 761        max_trigger =
 762                max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
 763        if (rx_refill_threshold != 0) {
 764                trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
 765                if (trigger > max_trigger)
 766                        trigger = max_trigger;
 767        } else {
 768                trigger = max_trigger;
 769        }
 770
 771        rx_queue->max_fill = max_fill;
 772        rx_queue->fast_fill_trigger = trigger;
 773        rx_queue->refill_enabled = true;
 774
 775        /* Set up RX descriptor ring */
 776        ef4_nic_init_rx(rx_queue);
 777}
 778
 779void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue)
 780{
 781        int i;
 782        struct ef4_nic *efx = rx_queue->efx;
 783        struct ef4_rx_buffer *rx_buf;
 784
 785        netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 786                  "shutting down RX queue %d\n", ef4_rx_queue_index(rx_queue));
 787
 788        del_timer_sync(&rx_queue->slow_fill);
 789
 790        /* Release RX buffers from the current read ptr to the write ptr */
 791        if (rx_queue->buffer) {
 792                for (i = rx_queue->removed_count; i < rx_queue->added_count;
 793                     i++) {
 794                        unsigned index = i & rx_queue->ptr_mask;
 795                        rx_buf = ef4_rx_buffer(rx_queue, index);
 796                        ef4_fini_rx_buffer(rx_queue, rx_buf);
 797                }
 798        }
 799
 800        /* Unmap and release the pages in the recycle ring. Remove the ring. */
 801        for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
 802                struct page *page = rx_queue->page_ring[i];
 803                struct ef4_rx_page_state *state;
 804
 805                if (page == NULL)
 806                        continue;
 807
 808                state = page_address(page);
 809                dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
 810                               PAGE_SIZE << efx->rx_buffer_order,
 811                               DMA_FROM_DEVICE);
 812                put_page(page);
 813        }
 814        kfree(rx_queue->page_ring);
 815        rx_queue->page_ring = NULL;
 816}
 817
 818void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue)
 819{
 820        netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
 821                  "destroying RX queue %d\n", ef4_rx_queue_index(rx_queue));
 822
 823        ef4_nic_remove_rx(rx_queue);
 824
 825        kfree(rx_queue->buffer);
 826        rx_queue->buffer = NULL;
 827}
 828
 829
 830module_param(rx_refill_threshold, uint, 0444);
 831MODULE_PARM_DESC(rx_refill_threshold,
 832                 "RX descriptor ring refill threshold (%)");
 833
 834#ifdef CONFIG_RFS_ACCEL
 835
 836int ef4_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
 837                   u16 rxq_index, u32 flow_id)
 838{
 839        struct ef4_nic *efx = netdev_priv(net_dev);
 840        struct ef4_channel *channel;
 841        struct ef4_filter_spec spec;
 842        struct flow_keys fk;
 843        int rc;
 844
 845        if (flow_id == RPS_FLOW_ID_INVALID)
 846                return -EINVAL;
 847
 848        if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
 849                return -EPROTONOSUPPORT;
 850
 851        if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
 852                return -EPROTONOSUPPORT;
 853        if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
 854                return -EPROTONOSUPPORT;
 855
 856        ef4_filter_init_rx(&spec, EF4_FILTER_PRI_HINT,
 857                           efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
 858                           rxq_index);
 859        spec.match_flags =
 860                EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
 861                EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
 862                EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT;
 863        spec.ether_type = fk.basic.n_proto;
 864        spec.ip_proto = fk.basic.ip_proto;
 865
 866        if (fk.basic.n_proto == htons(ETH_P_IP)) {
 867                spec.rem_host[0] = fk.addrs.v4addrs.src;
 868                spec.loc_host[0] = fk.addrs.v4addrs.dst;
 869        } else {
 870                memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
 871                memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
 872        }
 873
 874        spec.rem_port = fk.ports.src;
 875        spec.loc_port = fk.ports.dst;
 876
 877        rc = efx->type->filter_rfs_insert(efx, &spec);
 878        if (rc < 0)
 879                return rc;
 880
 881        /* Remember this so we can check whether to expire the filter later */
 882        channel = ef4_get_channel(efx, rxq_index);
 883        channel->rps_flow_id[rc] = flow_id;
 884        ++channel->rfs_filters_added;
 885
 886        if (spec.ether_type == htons(ETH_P_IP))
 887                netif_info(efx, rx_status, efx->net_dev,
 888                           "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
 889                           (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 890                           spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
 891                           ntohs(spec.loc_port), rxq_index, flow_id, rc);
 892        else
 893                netif_info(efx, rx_status, efx->net_dev,
 894                           "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
 895                           (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
 896                           spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
 897                           ntohs(spec.loc_port), rxq_index, flow_id, rc);
 898
 899        return rc;
 900}
 901
 902bool __ef4_filter_rfs_expire(struct ef4_nic *efx, unsigned int quota)
 903{
 904        bool (*expire_one)(struct ef4_nic *efx, u32 flow_id, unsigned int index);
 905        unsigned int channel_idx, index, size;
 906        u32 flow_id;
 907
 908        if (!spin_trylock_bh(&efx->filter_lock))
 909                return false;
 910
 911        expire_one = efx->type->filter_rfs_expire_one;
 912        channel_idx = efx->rps_expire_channel;
 913        index = efx->rps_expire_index;
 914        size = efx->type->max_rx_ip_filters;
 915        while (quota--) {
 916                struct ef4_channel *channel = ef4_get_channel(efx, channel_idx);
 917                flow_id = channel->rps_flow_id[index];
 918
 919                if (flow_id != RPS_FLOW_ID_INVALID &&
 920                    expire_one(efx, flow_id, index)) {
 921                        netif_info(efx, rx_status, efx->net_dev,
 922                                   "expired filter %d [queue %u flow %u]\n",
 923                                   index, channel_idx, flow_id);
 924                        channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
 925                }
 926                if (++index == size) {
 927                        if (++channel_idx == efx->n_channels)
 928                                channel_idx = 0;
 929                        index = 0;
 930                }
 931        }
 932        efx->rps_expire_channel = channel_idx;
 933        efx->rps_expire_index = index;
 934
 935        spin_unlock_bh(&efx->filter_lock);
 936        return true;
 937}
 938
 939#endif /* CONFIG_RFS_ACCEL */
 940
 941/**
 942 * ef4_filter_is_mc_recipient - test whether spec is a multicast recipient
 943 * @spec: Specification to test
 944 *
 945 * Return: %true if the specification is a non-drop RX filter that
 946 * matches a local MAC address I/G bit value of 1 or matches a local
 947 * IPv4 or IPv6 address value in the respective multicast address
 948 * range.  Otherwise %false.
 949 */
 950bool ef4_filter_is_mc_recipient(const struct ef4_filter_spec *spec)
 951{
 952        if (!(spec->flags & EF4_FILTER_FLAG_RX) ||
 953            spec->dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
 954                return false;
 955
 956        if (spec->match_flags &
 957            (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG) &&
 958            is_multicast_ether_addr(spec->loc_mac))
 959                return true;
 960
 961        if ((spec->match_flags &
 962             (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) ==
 963            (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_LOC_HOST)) {
 964                if (spec->ether_type == htons(ETH_P_IP) &&
 965                    ipv4_is_multicast(spec->loc_host[0]))
 966                        return true;
 967                if (spec->ether_type == htons(ETH_P_IPV6) &&
 968                    ((const u8 *)spec->loc_host)[0] == 0xff)
 969                        return true;
 970        }
 971
 972        return false;
 973}
 974