linux/drivers/net/sfc/rx.c
<<
>>
Prefs
   1/****************************************************************************
   2 * Driver for Solarflare Solarstorm network controllers and boards
   3 * Copyright 2005-2006 Fen Systems Ltd.
   4 * Copyright 2005-2008 Solarflare Communications Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of the GNU General Public License version 2 as published
   8 * by the Free Software Foundation, incorporated herein by reference.
   9 */
  10
  11#include <linux/socket.h>
  12#include <linux/in.h>
  13#include <linux/ip.h>
  14#include <linux/tcp.h>
  15#include <linux/udp.h>
  16#include <net/ip.h>
  17#include <net/checksum.h>
  18#include "net_driver.h"
  19#include "rx.h"
  20#include "efx.h"
  21#include "falcon.h"
  22#include "selftest.h"
  23#include "workarounds.h"
  24
  25/* Number of RX descriptors pushed at once. */
  26#define EFX_RX_BATCH  8
  27
  28/* Size of buffer allocated for skb header area. */
  29#define EFX_SKB_HEADERS  64u
  30
  31/*
  32 * rx_alloc_method - RX buffer allocation method
  33 *
  34 * This driver supports two methods for allocating and using RX buffers:
  35 * each RX buffer may be backed by an skb or by an order-n page.
  36 *
  37 * When LRO is in use then the second method has a lower overhead,
  38 * since we don't have to allocate then free skbs on reassembled frames.
  39 *
  40 * Values:
  41 *   - RX_ALLOC_METHOD_AUTO = 0
  42 *   - RX_ALLOC_METHOD_SKB  = 1
  43 *   - RX_ALLOC_METHOD_PAGE = 2
  44 *
  45 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
  46 * controlled by the parameters below.
  47 *
  48 *   - Since pushing and popping descriptors are separated by the rx_queue
  49 *     size, so the watermarks should be ~rxd_size.
  50 *   - The performance win by using page-based allocation for LRO is less
  51 *     than the performance hit of using page-based allocation of non-LRO,
  52 *     so the watermarks should reflect this.
  53 *
  54 * Per channel we maintain a single variable, updated by each channel:
  55 *
  56 *   rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
  57 *                      RX_ALLOC_FACTOR_SKB)
  58 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
  59 * limits the hysteresis), and update the allocation strategy:
  60 *
  61 *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
  62 *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
  63 */
  64static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
  65
  66#define RX_ALLOC_LEVEL_LRO 0x2000
  67#define RX_ALLOC_LEVEL_MAX 0x3000
  68#define RX_ALLOC_FACTOR_LRO 1
  69#define RX_ALLOC_FACTOR_SKB (-2)
  70
  71/* This is the percentage fill level below which new RX descriptors
  72 * will be added to the RX descriptor ring.
  73 */
  74static unsigned int rx_refill_threshold = 90;
  75
  76/* This is the percentage fill level to which an RX queue will be refilled
  77 * when the "RX refill threshold" is reached.
  78 */
  79static unsigned int rx_refill_limit = 95;
  80
  81/*
  82 * RX maximum head room required.
  83 *
  84 * This must be at least 1 to prevent overflow and at least 2 to allow
  85 * pipelined receives.
  86 */
  87#define EFX_RXD_HEAD_ROOM 2
  88
  89static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
  90{
  91        /* Offset is always within one page, so we don't need to consider
  92         * the page order.
  93         */
  94        return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
  95}
  96static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
  97{
  98        return PAGE_SIZE << efx->rx_buffer_order;
  99}
 100
 101
 102/**
 103 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
 104 *
 105 * @rx_queue:           Efx RX queue
 106 * @rx_buf:             RX buffer structure to populate
 107 *
 108 * This allocates memory for a new receive buffer, maps it for DMA,
 109 * and populates a struct efx_rx_buffer with the relevant
 110 * information.  Return a negative error code or 0 on success.
 111 */
 112static int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
 113                                  struct efx_rx_buffer *rx_buf)
 114{
 115        struct efx_nic *efx = rx_queue->efx;
 116        struct net_device *net_dev = efx->net_dev;
 117        int skb_len = efx->rx_buffer_len;
 118
 119        rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
 120        if (unlikely(!rx_buf->skb))
 121                return -ENOMEM;
 122
 123        /* Adjust the SKB for padding and checksum */
 124        skb_reserve(rx_buf->skb, NET_IP_ALIGN);
 125        rx_buf->len = skb_len - NET_IP_ALIGN;
 126        rx_buf->data = (char *)rx_buf->skb->data;
 127        rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
 128
 129        rx_buf->dma_addr = pci_map_single(efx->pci_dev,
 130                                          rx_buf->data, rx_buf->len,
 131                                          PCI_DMA_FROMDEVICE);
 132
 133        if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
 134                dev_kfree_skb_any(rx_buf->skb);
 135                rx_buf->skb = NULL;
 136                return -EIO;
 137        }
 138
 139        return 0;
 140}
 141
 142/**
 143 * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
 144 *
 145 * @rx_queue:           Efx RX queue
 146 * @rx_buf:             RX buffer structure to populate
 147 *
 148 * This allocates memory for a new receive buffer, maps it for DMA,
 149 * and populates a struct efx_rx_buffer with the relevant
 150 * information.  Return a negative error code or 0 on success.
 151 */
 152static int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
 153                                   struct efx_rx_buffer *rx_buf)
 154{
 155        struct efx_nic *efx = rx_queue->efx;
 156        int bytes, space, offset;
 157
 158        bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
 159
 160        /* If there is space left in the previously allocated page,
 161         * then use it. Otherwise allocate a new one */
 162        rx_buf->page = rx_queue->buf_page;
 163        if (rx_buf->page == NULL) {
 164                dma_addr_t dma_addr;
 165
 166                rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
 167                                           efx->rx_buffer_order);
 168                if (unlikely(rx_buf->page == NULL))
 169                        return -ENOMEM;
 170
 171                dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
 172                                        0, efx_rx_buf_size(efx),
 173                                        PCI_DMA_FROMDEVICE);
 174
 175                if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
 176                        __free_pages(rx_buf->page, efx->rx_buffer_order);
 177                        rx_buf->page = NULL;
 178                        return -EIO;
 179                }
 180
 181                rx_queue->buf_page = rx_buf->page;
 182                rx_queue->buf_dma_addr = dma_addr;
 183                rx_queue->buf_data = (page_address(rx_buf->page) +
 184                                      EFX_PAGE_IP_ALIGN);
 185        }
 186
 187        rx_buf->len = bytes;
 188        rx_buf->data = rx_queue->buf_data;
 189        offset = efx_rx_buf_offset(rx_buf);
 190        rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
 191
 192        /* Try to pack multiple buffers per page */
 193        if (efx->rx_buffer_order == 0) {
 194                /* The next buffer starts on the next 512 byte boundary */
 195                rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
 196                offset += ((bytes + 0x1ff) & ~0x1ff);
 197
 198                space = efx_rx_buf_size(efx) - offset;
 199                if (space >= bytes) {
 200                        /* Refs dropped on kernel releasing each skb */
 201                        get_page(rx_queue->buf_page);
 202                        goto out;
 203                }
 204        }
 205
 206        /* This is the final RX buffer for this page, so mark it for
 207         * unmapping */
 208        rx_queue->buf_page = NULL;
 209        rx_buf->unmap_addr = rx_queue->buf_dma_addr;
 210
 211 out:
 212        return 0;
 213}
 214
 215/* This allocates memory for a new receive buffer, maps it for DMA,
 216 * and populates a struct efx_rx_buffer with the relevant
 217 * information.
 218 */
 219static int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
 220                              struct efx_rx_buffer *new_rx_buf)
 221{
 222        int rc = 0;
 223
 224        if (rx_queue->channel->rx_alloc_push_pages) {
 225                new_rx_buf->skb = NULL;
 226                rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
 227                rx_queue->alloc_page_count++;
 228        } else {
 229                new_rx_buf->page = NULL;
 230                rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
 231                rx_queue->alloc_skb_count++;
 232        }
 233
 234        if (unlikely(rc < 0))
 235                EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
 236                           rx_queue->queue, rc);
 237        return rc;
 238}
 239
 240static void efx_unmap_rx_buffer(struct efx_nic *efx,
 241                                struct efx_rx_buffer *rx_buf)
 242{
 243        if (rx_buf->page) {
 244                EFX_BUG_ON_PARANOID(rx_buf->skb);
 245                if (rx_buf->unmap_addr) {
 246                        pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
 247                                       efx_rx_buf_size(efx),
 248                                       PCI_DMA_FROMDEVICE);
 249                        rx_buf->unmap_addr = 0;
 250                }
 251        } else if (likely(rx_buf->skb)) {
 252                pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
 253                                 rx_buf->len, PCI_DMA_FROMDEVICE);
 254        }
 255}
 256
 257static void efx_free_rx_buffer(struct efx_nic *efx,
 258                               struct efx_rx_buffer *rx_buf)
 259{
 260        if (rx_buf->page) {
 261                __free_pages(rx_buf->page, efx->rx_buffer_order);
 262                rx_buf->page = NULL;
 263        } else if (likely(rx_buf->skb)) {
 264                dev_kfree_skb_any(rx_buf->skb);
 265                rx_buf->skb = NULL;
 266        }
 267}
 268
 269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
 270                               struct efx_rx_buffer *rx_buf)
 271{
 272        efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
 273        efx_free_rx_buffer(rx_queue->efx, rx_buf);
 274}
 275
 276/**
 277 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
 278 * @rx_queue:           RX descriptor queue
 279 * @retry:              Recheck the fill level
 280 * This will aim to fill the RX descriptor queue up to
 281 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
 282 * memory to do so, the caller should retry.
 283 */
 284static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
 285                                          int retry)
 286{
 287        struct efx_rx_buffer *rx_buf;
 288        unsigned fill_level, index;
 289        int i, space, rc = 0;
 290
 291        /* Calculate current fill level.  Do this outside the lock,
 292         * because most of the time we'll end up not wanting to do the
 293         * fill anyway.
 294         */
 295        fill_level = (rx_queue->added_count - rx_queue->removed_count);
 296        EFX_BUG_ON_PARANOID(fill_level >
 297                            rx_queue->efx->type->rxd_ring_mask + 1);
 298
 299        /* Don't fill if we don't need to */
 300        if (fill_level >= rx_queue->fast_fill_trigger)
 301                return 0;
 302
 303        /* Record minimum fill level */
 304        if (unlikely(fill_level < rx_queue->min_fill)) {
 305                if (fill_level)
 306                        rx_queue->min_fill = fill_level;
 307        }
 308
 309        /* Acquire RX add lock.  If this lock is contended, then a fast
 310         * fill must already be in progress (e.g. in the refill
 311         * tasklet), so we don't need to do anything
 312         */
 313        if (!spin_trylock_bh(&rx_queue->add_lock))
 314                return -1;
 315
 316 retry:
 317        /* Recalculate current fill level now that we have the lock */
 318        fill_level = (rx_queue->added_count - rx_queue->removed_count);
 319        EFX_BUG_ON_PARANOID(fill_level >
 320                            rx_queue->efx->type->rxd_ring_mask + 1);
 321        space = rx_queue->fast_fill_limit - fill_level;
 322        if (space < EFX_RX_BATCH)
 323                goto out_unlock;
 324
 325        EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
 326                  " level %d to level %d using %s allocation\n",
 327                  rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
 328                  rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
 329
 330        do {
 331                for (i = 0; i < EFX_RX_BATCH; ++i) {
 332                        index = (rx_queue->added_count &
 333                                 rx_queue->efx->type->rxd_ring_mask);
 334                        rx_buf = efx_rx_buffer(rx_queue, index);
 335                        rc = efx_init_rx_buffer(rx_queue, rx_buf);
 336                        if (unlikely(rc))
 337                                goto out;
 338                        ++rx_queue->added_count;
 339                }
 340        } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
 341
 342        EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
 343                  "to level %d\n", rx_queue->queue,
 344                  rx_queue->added_count - rx_queue->removed_count);
 345
 346 out:
 347        /* Send write pointer to card. */
 348        falcon_notify_rx_desc(rx_queue);
 349
 350        /* If the fast fill is running inside from the refill tasklet, then
 351         * for SMP systems it may be running on a different CPU to
 352         * RX event processing, which means that the fill level may now be
 353         * out of date. */
 354        if (unlikely(retry && (rc == 0)))
 355                goto retry;
 356
 357 out_unlock:
 358        spin_unlock_bh(&rx_queue->add_lock);
 359
 360        return rc;
 361}
 362
 363/**
 364 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
 365 * @rx_queue:           RX descriptor queue
 366 *
 367 * This will aim to fill the RX descriptor queue up to
 368 * @rx_queue->@fast_fill_limit.  If there is insufficient memory to do so,
 369 * it will schedule a work item to immediately continue the fast fill
 370 */
 371void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
 372{
 373        int rc;
 374
 375        rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
 376        if (unlikely(rc)) {
 377                /* Schedule the work item to run immediately. The hope is
 378                 * that work is immediately pending to free some memory
 379                 * (e.g. an RX event or TX completion)
 380                 */
 381                efx_schedule_slow_fill(rx_queue, 0);
 382        }
 383}
 384
 385void efx_rx_work(struct work_struct *data)
 386{
 387        struct efx_rx_queue *rx_queue;
 388        int rc;
 389
 390        rx_queue = container_of(data, struct efx_rx_queue, work.work);
 391
 392        if (unlikely(!rx_queue->channel->enabled))
 393                return;
 394
 395        EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
 396                  "%d\n", rx_queue->queue, raw_smp_processor_id());
 397
 398        ++rx_queue->slow_fill_count;
 399        /* Push new RX descriptors, allowing at least 1 jiffy for
 400         * the kernel to free some more memory. */
 401        rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
 402        if (rc)
 403                efx_schedule_slow_fill(rx_queue, 1);
 404}
 405
 406static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
 407                                     struct efx_rx_buffer *rx_buf,
 408                                     int len, bool *discard,
 409                                     bool *leak_packet)
 410{
 411        struct efx_nic *efx = rx_queue->efx;
 412        unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
 413
 414        if (likely(len <= max_len))
 415                return;
 416
 417        /* The packet must be discarded, but this is only a fatal error
 418         * if the caller indicated it was
 419         */
 420        *discard = true;
 421
 422        if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
 423                EFX_ERR_RL(efx, " RX queue %d seriously overlength "
 424                           "RX event (0x%x > 0x%x+0x%x). Leaking\n",
 425                           rx_queue->queue, len, max_len,
 426                           efx->type->rx_buffer_padding);
 427                /* If this buffer was skb-allocated, then the meta
 428                 * data at the end of the skb will be trashed. So
 429                 * we have no choice but to leak the fragment.
 430                 */
 431                *leak_packet = (rx_buf->skb != NULL);
 432                efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
 433        } else {
 434                EFX_ERR_RL(efx, " RX queue %d overlength RX event "
 435                           "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
 436        }
 437
 438        rx_queue->channel->n_rx_overlength++;
 439}
 440
 441/* Pass a received packet up through the generic LRO stack
 442 *
 443 * Handles driverlink veto, and passes the fragment up via
 444 * the appropriate LRO method
 445 */
 446static void efx_rx_packet_lro(struct efx_channel *channel,
 447                              struct efx_rx_buffer *rx_buf,
 448                              bool checksummed)
 449{
 450        struct napi_struct *napi = &channel->napi_str;
 451
 452        /* Pass the skb/page into the LRO engine */
 453        if (rx_buf->page) {
 454                struct sk_buff *skb = napi_get_frags(napi);
 455
 456                if (!skb) {
 457                        put_page(rx_buf->page);
 458                        goto out;
 459                }
 460
 461                skb_shinfo(skb)->frags[0].page = rx_buf->page;
 462                skb_shinfo(skb)->frags[0].page_offset =
 463                        efx_rx_buf_offset(rx_buf);
 464                skb_shinfo(skb)->frags[0].size = rx_buf->len;
 465                skb_shinfo(skb)->nr_frags = 1;
 466
 467                skb->len = rx_buf->len;
 468                skb->data_len = rx_buf->len;
 469                skb->truesize += rx_buf->len;
 470                skb->ip_summed =
 471                        checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
 472
 473                napi_gro_frags(napi);
 474
 475out:
 476                EFX_BUG_ON_PARANOID(rx_buf->skb);
 477                rx_buf->page = NULL;
 478        } else {
 479                EFX_BUG_ON_PARANOID(!rx_buf->skb);
 480                EFX_BUG_ON_PARANOID(!checksummed);
 481
 482                napi_gro_receive(napi, rx_buf->skb);
 483                rx_buf->skb = NULL;
 484        }
 485}
 486
 487void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
 488                   unsigned int len, bool checksummed, bool discard)
 489{
 490        struct efx_nic *efx = rx_queue->efx;
 491        struct efx_rx_buffer *rx_buf;
 492        bool leak_packet = false;
 493
 494        rx_buf = efx_rx_buffer(rx_queue, index);
 495        EFX_BUG_ON_PARANOID(!rx_buf->data);
 496        EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
 497        EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
 498
 499        /* This allows the refill path to post another buffer.
 500         * EFX_RXD_HEAD_ROOM ensures that the slot we are using
 501         * isn't overwritten yet.
 502         */
 503        rx_queue->removed_count++;
 504
 505        /* Validate the length encoded in the event vs the descriptor pushed */
 506        efx_rx_packet__check_len(rx_queue, rx_buf, len,
 507                                 &discard, &leak_packet);
 508
 509        EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
 510                  rx_queue->queue, index,
 511                  (unsigned long long)rx_buf->dma_addr, len,
 512                  (checksummed ? " [SUMMED]" : ""),
 513                  (discard ? " [DISCARD]" : ""));
 514
 515        /* Discard packet, if instructed to do so */
 516        if (unlikely(discard)) {
 517                if (unlikely(leak_packet))
 518                        rx_queue->channel->n_skbuff_leaks++;
 519                else
 520                        /* We haven't called efx_unmap_rx_buffer yet,
 521                         * so fini the entire rx_buffer here */
 522                        efx_fini_rx_buffer(rx_queue, rx_buf);
 523                return;
 524        }
 525
 526        /* Release card resources - assumes all RX buffers consumed in-order
 527         * per RX queue
 528         */
 529        efx_unmap_rx_buffer(efx, rx_buf);
 530
 531        /* Prefetch nice and early so data will (hopefully) be in cache by
 532         * the time we look at it.
 533         */
 534        prefetch(rx_buf->data);
 535
 536        /* Pipeline receives so that we give time for packet headers to be
 537         * prefetched into cache.
 538         */
 539        rx_buf->len = len;
 540        if (rx_queue->channel->rx_pkt)
 541                __efx_rx_packet(rx_queue->channel,
 542                                rx_queue->channel->rx_pkt,
 543                                rx_queue->channel->rx_pkt_csummed);
 544        rx_queue->channel->rx_pkt = rx_buf;
 545        rx_queue->channel->rx_pkt_csummed = checksummed;
 546}
 547
 548/* Handle a received packet.  Second half: Touches packet payload. */
 549void __efx_rx_packet(struct efx_channel *channel,
 550                     struct efx_rx_buffer *rx_buf, bool checksummed)
 551{
 552        struct efx_nic *efx = channel->efx;
 553        struct sk_buff *skb;
 554
 555        /* If we're in loopback test, then pass the packet directly to the
 556         * loopback layer, and free the rx_buf here
 557         */
 558        if (unlikely(efx->loopback_selftest)) {
 559                efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len);
 560                efx_free_rx_buffer(efx, rx_buf);
 561                goto done;
 562        }
 563
 564        if (rx_buf->skb) {
 565                prefetch(skb_shinfo(rx_buf->skb));
 566
 567                skb_put(rx_buf->skb, rx_buf->len);
 568
 569                /* Move past the ethernet header. rx_buf->data still points
 570                 * at the ethernet header */
 571                rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
 572                                                       efx->net_dev);
 573        }
 574
 575        if (likely(checksummed || rx_buf->page)) {
 576                efx_rx_packet_lro(channel, rx_buf, checksummed);
 577                goto done;
 578        }
 579
 580        /* We now own the SKB */
 581        skb = rx_buf->skb;
 582        rx_buf->skb = NULL;
 583
 584        EFX_BUG_ON_PARANOID(rx_buf->page);
 585        EFX_BUG_ON_PARANOID(rx_buf->skb);
 586        EFX_BUG_ON_PARANOID(!skb);
 587
 588        /* Set the SKB flags */
 589        skb->ip_summed = CHECKSUM_NONE;
 590
 591        skb_record_rx_queue(skb, channel->channel);
 592
 593        /* Pass the packet up */
 594        netif_receive_skb(skb);
 595
 596        /* Update allocation strategy method */
 597        channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
 598
 599done:
 600        ;
 601}
 602
 603void efx_rx_strategy(struct efx_channel *channel)
 604{
 605        enum efx_rx_alloc_method method = rx_alloc_method;
 606
 607        /* Only makes sense to use page based allocation if LRO is enabled */
 608        if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
 609                method = RX_ALLOC_METHOD_SKB;
 610        } else if (method == RX_ALLOC_METHOD_AUTO) {
 611                /* Constrain the rx_alloc_level */
 612                if (channel->rx_alloc_level < 0)
 613                        channel->rx_alloc_level = 0;
 614                else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
 615                        channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
 616
 617                /* Decide on the allocation method */
 618                method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
 619                          RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
 620        }
 621
 622        /* Push the option */
 623        channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
 624}
 625
 626int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
 627{
 628        struct efx_nic *efx = rx_queue->efx;
 629        unsigned int rxq_size;
 630        int rc;
 631
 632        EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
 633
 634        /* Allocate RX buffers */
 635        rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
 636        rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
 637        if (!rx_queue->buffer)
 638                return -ENOMEM;
 639
 640        rc = falcon_probe_rx(rx_queue);
 641        if (rc) {
 642                kfree(rx_queue->buffer);
 643                rx_queue->buffer = NULL;
 644        }
 645        return rc;
 646}
 647
 648void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
 649{
 650        struct efx_nic *efx = rx_queue->efx;
 651        unsigned int max_fill, trigger, limit;
 652
 653        EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
 654
 655        /* Initialise ptr fields */
 656        rx_queue->added_count = 0;
 657        rx_queue->notified_count = 0;
 658        rx_queue->removed_count = 0;
 659        rx_queue->min_fill = -1U;
 660        rx_queue->min_overfill = -1U;
 661
 662        /* Initialise limit fields */
 663        max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
 664        trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
 665        limit = max_fill * min(rx_refill_limit, 100U) / 100U;
 666
 667        rx_queue->max_fill = max_fill;
 668        rx_queue->fast_fill_trigger = trigger;
 669        rx_queue->fast_fill_limit = limit;
 670
 671        /* Set up RX descriptor ring */
 672        falcon_init_rx(rx_queue);
 673}
 674
 675void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
 676{
 677        int i;
 678        struct efx_rx_buffer *rx_buf;
 679
 680        EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
 681
 682        falcon_fini_rx(rx_queue);
 683
 684        /* Release RX buffers NB start at index 0 not current HW ptr */
 685        if (rx_queue->buffer) {
 686                for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
 687                        rx_buf = efx_rx_buffer(rx_queue, i);
 688                        efx_fini_rx_buffer(rx_queue, rx_buf);
 689                }
 690        }
 691
 692        /* For a page that is part-way through splitting into RX buffers */
 693        if (rx_queue->buf_page != NULL) {
 694                pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
 695                               efx_rx_buf_size(rx_queue->efx),
 696                               PCI_DMA_FROMDEVICE);
 697                __free_pages(rx_queue->buf_page,
 698                             rx_queue->efx->rx_buffer_order);
 699                rx_queue->buf_page = NULL;
 700        }
 701}
 702
 703void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
 704{
 705        EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
 706
 707        falcon_remove_rx(rx_queue);
 708
 709        kfree(rx_queue->buffer);
 710        rx_queue->buffer = NULL;
 711}
 712
 713
 714module_param(rx_alloc_method, int, 0644);
 715MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
 716
 717module_param(rx_refill_threshold, uint, 0444);
 718MODULE_PARM_DESC(rx_refill_threshold,
 719                 "RX descriptor ring fast/slow fill threshold (%)");
 720
 721