linux/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
   3 * driver for Linux.
   4 *
   5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
   6 *
   7 * This software is available to you under a choice of one of two
   8 * licenses.  You may choose to be licensed under the terms of the GNU
   9 * General Public License (GPL) Version 2, available from the file
  10 * COPYING in the main directory of this source tree, or the
  11 * OpenIB.org BSD license below:
  12 *
  13 *     Redistribution and use in source and binary forms, with or
  14 *     without modification, are permitted provided that the following
  15 *     conditions are met:
  16 *
  17 *      - Redistributions of source code must retain the above
  18 *        copyright notice, this list of conditions and the following
  19 *        disclaimer.
  20 *
  21 *      - Redistributions in binary form must reproduce the above
  22 *        copyright notice, this list of conditions and the following
  23 *        disclaimer in the documentation and/or other materials
  24 *        provided with the distribution.
  25 *
  26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33 * SOFTWARE.
  34 */
  35
  36#include <linux/skbuff.h>
  37#include <linux/netdevice.h>
  38#include <linux/etherdevice.h>
  39#include <linux/if_vlan.h>
  40#include <linux/ip.h>
  41#include <net/ipv6.h>
  42#include <net/tcp.h>
  43#include <linux/dma-mapping.h>
  44#include <linux/prefetch.h>
  45
  46#include "t4vf_common.h"
  47#include "t4vf_defs.h"
  48
  49#include "../cxgb4/t4_regs.h"
  50#include "../cxgb4/t4fw_api.h"
  51#include "../cxgb4/t4_msg.h"
  52
  53/*
  54 * Decoded Adapter Parameters.
  55 */
  56static u32 FL_PG_ORDER;         /* large page allocation size */
  57static u32 STAT_LEN;            /* length of status page at ring end */
  58static u32 PKTSHIFT;            /* padding between CPL and packet data */
  59static u32 FL_ALIGN;            /* response queue message alignment */
  60
  61/*
  62 * Constants ...
  63 */
  64enum {
  65        /*
  66         * Egress Queue sizes, producer and consumer indices are all in units
  67         * of Egress Context Units bytes.  Note that as far as the hardware is
  68         * concerned, the free list is an Egress Queue (the host produces free
  69         * buffers which the hardware consumes) and free list entries are
  70         * 64-bit PCI DMA addresses.
  71         */
  72        EQ_UNIT = SGE_EQ_IDXSIZE,
  73        FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
  74        TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
  75
  76        /*
  77         * Max number of TX descriptors we clean up at a time.  Should be
  78         * modest as freeing skbs isn't cheap and it happens while holding
  79         * locks.  We just need to free packets faster than they arrive, we
  80         * eventually catch up and keep the amortized cost reasonable.
  81         */
  82        MAX_TX_RECLAIM = 16,
  83
  84        /*
  85         * Max number of Rx buffers we replenish at a time.  Again keep this
  86         * modest, allocating buffers isn't cheap either.
  87         */
  88        MAX_RX_REFILL = 16,
  89
  90        /*
  91         * Period of the Rx queue check timer.  This timer is infrequent as it
  92         * has something to do only when the system experiences severe memory
  93         * shortage.
  94         */
  95        RX_QCHECK_PERIOD = (HZ / 2),
  96
  97        /*
  98         * Period of the TX queue check timer and the maximum number of TX
  99         * descriptors to be reclaimed by the TX timer.
 100         */
 101        TX_QCHECK_PERIOD = (HZ / 2),
 102        MAX_TIMER_TX_RECLAIM = 100,
 103
 104        /*
 105         * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
 106         * timer will attempt to refill it.
 107         */
 108        FL_STARVE_THRES = 4,
 109
 110        /*
 111         * Suspend an Ethernet TX queue with fewer available descriptors than
 112         * this.  We always want to have room for a maximum sized packet:
 113         * inline immediate data + MAX_SKB_FRAGS. This is the same as
 114         * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
 115         * (see that function and its helpers for a description of the
 116         * calculation).
 117         */
 118        ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
 119        ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
 120                                   ((ETHTXQ_MAX_FRAGS-1) & 1) +
 121                                   2),
 122        ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
 123                          sizeof(struct cpl_tx_pkt_lso_core) +
 124                          sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
 125        ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
 126
 127        ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
 128
 129        /*
 130         * Max TX descriptor space we allow for an Ethernet packet to be
 131         * inlined into a WR.  This is limited by the maximum value which
 132         * we can specify for immediate data in the firmware Ethernet TX
 133         * Work Request.
 134         */
 135        MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_MASK,
 136
 137        /*
 138         * Max size of a WR sent through a control TX queue.
 139         */
 140        MAX_CTRL_WR_LEN = 256,
 141
 142        /*
 143         * Maximum amount of data which we'll ever need to inline into a
 144         * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
 145         */
 146        MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
 147                          ? MAX_IMM_TX_PKT_LEN
 148                          : MAX_CTRL_WR_LEN),
 149
 150        /*
 151         * For incoming packets less than RX_COPY_THRES, we copy the data into
 152         * an skb rather than referencing the data.  We allocate enough
 153         * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
 154         * of the data (header).
 155         */
 156        RX_COPY_THRES = 256,
 157        RX_PULL_LEN = 128,
 158
 159        /*
 160         * Main body length for sk_buffs used for RX Ethernet packets with
 161         * fragments.  Should be >= RX_PULL_LEN but possibly bigger to give
 162         * pskb_may_pull() some room.
 163         */
 164        RX_SKB_LEN = 512,
 165};
 166
 167/*
 168 * Software state per TX descriptor.
 169 */
 170struct tx_sw_desc {
 171        struct sk_buff *skb;            /* socket buffer of TX data source */
 172        struct ulptx_sgl *sgl;          /* scatter/gather list in TX Queue */
 173};
 174
 175/*
 176 * Software state per RX Free List descriptor.  We keep track of the allocated
 177 * FL page, its size, and its PCI DMA address (if the page is mapped).  The FL
 178 * page size and its PCI DMA mapped state are stored in the low bits of the
 179 * PCI DMA address as per below.
 180 */
 181struct rx_sw_desc {
 182        struct page *page;              /* Free List page buffer */
 183        dma_addr_t dma_addr;            /* PCI DMA address (if mapped) */
 184                                        /*   and flags (see below) */
 185};
 186
 187/*
 188 * The low bits of rx_sw_desc.dma_addr have special meaning.  Note that the
 189 * SGE also uses the low 4 bits to determine the size of the buffer.  It uses
 190 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
 191 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
 192 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
 193 * to the SGE.  Thus, our software state of "is the buffer mapped for DMA" is
 194 * maintained in an inverse sense so the hardware never sees that bit high.
 195 */
 196enum {
 197        RX_LARGE_BUF    = 1 << 0,       /* buffer is SGE_FL_BUFFER_SIZE[1] */
 198        RX_UNMAPPED_BUF = 1 << 1,       /* buffer is not mapped */
 199};
 200
 201/**
 202 *      get_buf_addr - return DMA buffer address of software descriptor
 203 *      @sdesc: pointer to the software buffer descriptor
 204 *
 205 *      Return the DMA buffer address of a software descriptor (stripping out
 206 *      our low-order flag bits).
 207 */
 208static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
 209{
 210        return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
 211}
 212
 213/**
 214 *      is_buf_mapped - is buffer mapped for DMA?
 215 *      @sdesc: pointer to the software buffer descriptor
 216 *
 217 *      Determine whether the buffer associated with a software descriptor in
 218 *      mapped for DMA or not.
 219 */
 220static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
 221{
 222        return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
 223}
 224
 225/**
 226 *      need_skb_unmap - does the platform need unmapping of sk_buffs?
 227 *
 228 *      Returns true if the platform needs sk_buff unmapping.  The compiler
 229 *      optimizes away unnecessary code if this returns true.
 230 */
 231static inline int need_skb_unmap(void)
 232{
 233#ifdef CONFIG_NEED_DMA_MAP_STATE
 234        return 1;
 235#else
 236        return 0;
 237#endif
 238}
 239
 240/**
 241 *      txq_avail - return the number of available slots in a TX queue
 242 *      @tq: the TX queue
 243 *
 244 *      Returns the number of available descriptors in a TX queue.
 245 */
 246static inline unsigned int txq_avail(const struct sge_txq *tq)
 247{
 248        return tq->size - 1 - tq->in_use;
 249}
 250
 251/**
 252 *      fl_cap - return the capacity of a Free List
 253 *      @fl: the Free List
 254 *
 255 *      Returns the capacity of a Free List.  The capacity is less than the
 256 *      size because an Egress Queue Index Unit worth of descriptors needs to
 257 *      be left unpopulated, otherwise the Producer and Consumer indices PIDX
 258 *      and CIDX will match and the hardware will think the FL is empty.
 259 */
 260static inline unsigned int fl_cap(const struct sge_fl *fl)
 261{
 262        return fl->size - FL_PER_EQ_UNIT;
 263}
 264
 265/**
 266 *      fl_starving - return whether a Free List is starving.
 267 *      @fl: the Free List
 268 *
 269 *      Tests specified Free List to see whether the number of buffers
 270 *      available to the hardware has falled below our "starvation"
 271 *      threshold.
 272 */
 273static inline bool fl_starving(const struct sge_fl *fl)
 274{
 275        return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
 276}
 277
 278/**
 279 *      map_skb -  map an skb for DMA to the device
 280 *      @dev: the egress net device
 281 *      @skb: the packet to map
 282 *      @addr: a pointer to the base of the DMA mapping array
 283 *
 284 *      Map an skb for DMA to the device and return an array of DMA addresses.
 285 */
 286static int map_skb(struct device *dev, const struct sk_buff *skb,
 287                   dma_addr_t *addr)
 288{
 289        const skb_frag_t *fp, *end;
 290        const struct skb_shared_info *si;
 291
 292        *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
 293        if (dma_mapping_error(dev, *addr))
 294                goto out_err;
 295
 296        si = skb_shinfo(skb);
 297        end = &si->frags[si->nr_frags];
 298        for (fp = si->frags; fp < end; fp++) {
 299                *++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
 300                                           DMA_TO_DEVICE);
 301                if (dma_mapping_error(dev, *addr))
 302                        goto unwind;
 303        }
 304        return 0;
 305
 306unwind:
 307        while (fp-- > si->frags)
 308                dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
 309        dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
 310
 311out_err:
 312        return -ENOMEM;
 313}
 314
 315static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
 316                      const struct ulptx_sgl *sgl, const struct sge_txq *tq)
 317{
 318        const struct ulptx_sge_pair *p;
 319        unsigned int nfrags = skb_shinfo(skb)->nr_frags;
 320
 321        if (likely(skb_headlen(skb)))
 322                dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
 323                                 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
 324        else {
 325                dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
 326                               be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
 327                nfrags--;
 328        }
 329
 330        /*
 331         * the complexity below is because of the possibility of a wrap-around
 332         * in the middle of an SGL
 333         */
 334        for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
 335                if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
 336unmap:
 337                        dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
 338                                       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
 339                        dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
 340                                       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
 341                        p++;
 342                } else if ((u8 *)p == (u8 *)tq->stat) {
 343                        p = (const struct ulptx_sge_pair *)tq->desc;
 344                        goto unmap;
 345                } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
 346                        const __be64 *addr = (const __be64 *)tq->desc;
 347
 348                        dma_unmap_page(dev, be64_to_cpu(addr[0]),
 349                                       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
 350                        dma_unmap_page(dev, be64_to_cpu(addr[1]),
 351                                       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
 352                        p = (const struct ulptx_sge_pair *)&addr[2];
 353                } else {
 354                        const __be64 *addr = (const __be64 *)tq->desc;
 355
 356                        dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
 357                                       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
 358                        dma_unmap_page(dev, be64_to_cpu(addr[0]),
 359                                       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
 360                        p = (const struct ulptx_sge_pair *)&addr[1];
 361                }
 362        }
 363        if (nfrags) {
 364                __be64 addr;
 365
 366                if ((u8 *)p == (u8 *)tq->stat)
 367                        p = (const struct ulptx_sge_pair *)tq->desc;
 368                addr = ((u8 *)p + 16 <= (u8 *)tq->stat
 369                        ? p->addr[0]
 370                        : *(const __be64 *)tq->desc);
 371                dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
 372                               DMA_TO_DEVICE);
 373        }
 374}
 375
 376/**
 377 *      free_tx_desc - reclaims TX descriptors and their buffers
 378 *      @adapter: the adapter
 379 *      @tq: the TX queue to reclaim descriptors from
 380 *      @n: the number of descriptors to reclaim
 381 *      @unmap: whether the buffers should be unmapped for DMA
 382 *
 383 *      Reclaims TX descriptors from an SGE TX queue and frees the associated
 384 *      TX buffers.  Called with the TX queue lock held.
 385 */
 386static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
 387                         unsigned int n, bool unmap)
 388{
 389        struct tx_sw_desc *sdesc;
 390        unsigned int cidx = tq->cidx;
 391        struct device *dev = adapter->pdev_dev;
 392
 393        const int need_unmap = need_skb_unmap() && unmap;
 394
 395        sdesc = &tq->sdesc[cidx];
 396        while (n--) {
 397                /*
 398                 * If we kept a reference to the original TX skb, we need to
 399                 * unmap it from PCI DMA space (if required) and free it.
 400                 */
 401                if (sdesc->skb) {
 402                        if (need_unmap)
 403                                unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
 404                        kfree_skb(sdesc->skb);
 405                        sdesc->skb = NULL;
 406                }
 407
 408                sdesc++;
 409                if (++cidx == tq->size) {
 410                        cidx = 0;
 411                        sdesc = tq->sdesc;
 412                }
 413        }
 414        tq->cidx = cidx;
 415}
 416
 417/*
 418 * Return the number of reclaimable descriptors in a TX queue.
 419 */
 420static inline int reclaimable(const struct sge_txq *tq)
 421{
 422        int hw_cidx = be16_to_cpu(tq->stat->cidx);
 423        int reclaimable = hw_cidx - tq->cidx;
 424        if (reclaimable < 0)
 425                reclaimable += tq->size;
 426        return reclaimable;
 427}
 428
 429/**
 430 *      reclaim_completed_tx - reclaims completed TX descriptors
 431 *      @adapter: the adapter
 432 *      @tq: the TX queue to reclaim completed descriptors from
 433 *      @unmap: whether the buffers should be unmapped for DMA
 434 *
 435 *      Reclaims TX descriptors that the SGE has indicated it has processed,
 436 *      and frees the associated buffers if possible.  Called with the TX
 437 *      queue locked.
 438 */
 439static inline void reclaim_completed_tx(struct adapter *adapter,
 440                                        struct sge_txq *tq,
 441                                        bool unmap)
 442{
 443        int avail = reclaimable(tq);
 444
 445        if (avail) {
 446                /*
 447                 * Limit the amount of clean up work we do at a time to keep
 448                 * the TX lock hold time O(1).
 449                 */
 450                if (avail > MAX_TX_RECLAIM)
 451                        avail = MAX_TX_RECLAIM;
 452
 453                free_tx_desc(adapter, tq, avail, unmap);
 454                tq->in_use -= avail;
 455        }
 456}
 457
 458/**
 459 *      get_buf_size - return the size of an RX Free List buffer.
 460 *      @sdesc: pointer to the software buffer descriptor
 461 */
 462static inline int get_buf_size(const struct rx_sw_desc *sdesc)
 463{
 464        return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
 465                ? (PAGE_SIZE << FL_PG_ORDER)
 466                : PAGE_SIZE;
 467}
 468
 469/**
 470 *      free_rx_bufs - free RX buffers on an SGE Free List
 471 *      @adapter: the adapter
 472 *      @fl: the SGE Free List to free buffers from
 473 *      @n: how many buffers to free
 474 *
 475 *      Release the next @n buffers on an SGE Free List RX queue.   The
 476 *      buffers must be made inaccessible to hardware before calling this
 477 *      function.
 478 */
 479static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
 480{
 481        while (n--) {
 482                struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
 483
 484                if (is_buf_mapped(sdesc))
 485                        dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
 486                                       get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
 487                put_page(sdesc->page);
 488                sdesc->page = NULL;
 489                if (++fl->cidx == fl->size)
 490                        fl->cidx = 0;
 491                fl->avail--;
 492        }
 493}
 494
 495/**
 496 *      unmap_rx_buf - unmap the current RX buffer on an SGE Free List
 497 *      @adapter: the adapter
 498 *      @fl: the SGE Free List
 499 *
 500 *      Unmap the current buffer on an SGE Free List RX queue.   The
 501 *      buffer must be made inaccessible to HW before calling this function.
 502 *
 503 *      This is similar to @free_rx_bufs above but does not free the buffer.
 504 *      Do note that the FL still loses any further access to the buffer.
 505 *      This is used predominantly to "transfer ownership" of an FL buffer
 506 *      to another entity (typically an skb's fragment list).
 507 */
 508static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
 509{
 510        struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
 511
 512        if (is_buf_mapped(sdesc))
 513                dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
 514                               get_buf_size(sdesc), PCI_DMA_FROMDEVICE);
 515        sdesc->page = NULL;
 516        if (++fl->cidx == fl->size)
 517                fl->cidx = 0;
 518        fl->avail--;
 519}
 520
 521/**
 522 *      ring_fl_db - righ doorbell on free list
 523 *      @adapter: the adapter
 524 *      @fl: the Free List whose doorbell should be rung ...
 525 *
 526 *      Tell the Scatter Gather Engine that there are new free list entries
 527 *      available.
 528 */
 529static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
 530{
 531        u32 val;
 532
 533        /*
 534         * The SGE keeps track of its Producer and Consumer Indices in terms
 535         * of Egress Queue Units so we can only tell it about integral numbers
 536         * of multiples of Free List Entries per Egress Queue Units ...
 537         */
 538        if (fl->pend_cred >= FL_PER_EQ_UNIT) {
 539                val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
 540                if (!is_t4(adapter->chip))
 541                        val |= DBTYPE(1);
 542                wmb();
 543                t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
 544                             DBPRIO(1) |
 545                             QID(fl->cntxt_id) | val);
 546                fl->pend_cred %= FL_PER_EQ_UNIT;
 547        }
 548}
 549
 550/**
 551 *      set_rx_sw_desc - initialize software RX buffer descriptor
 552 *      @sdesc: pointer to the softwore RX buffer descriptor
 553 *      @page: pointer to the page data structure backing the RX buffer
 554 *      @dma_addr: PCI DMA address (possibly with low-bit flags)
 555 */
 556static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
 557                                  dma_addr_t dma_addr)
 558{
 559        sdesc->page = page;
 560        sdesc->dma_addr = dma_addr;
 561}
 562
 563/*
 564 * Support for poisoning RX buffers ...
 565 */
 566#define POISON_BUF_VAL -1
 567
 568static inline void poison_buf(struct page *page, size_t sz)
 569{
 570#if POISON_BUF_VAL >= 0
 571        memset(page_address(page), POISON_BUF_VAL, sz);
 572#endif
 573}
 574
 575/**
 576 *      refill_fl - refill an SGE RX buffer ring
 577 *      @adapter: the adapter
 578 *      @fl: the Free List ring to refill
 579 *      @n: the number of new buffers to allocate
 580 *      @gfp: the gfp flags for the allocations
 581 *
 582 *      (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
 583 *      allocated with the supplied gfp flags.  The caller must assure that
 584 *      @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
 585 *      EGRESS QUEUE UNITS_ indicates an empty Free List!  Returns the number
 586 *      of buffers allocated.  If afterwards the queue is found critically low,
 587 *      mark it as starving in the bitmap of starving FLs.
 588 */
 589static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
 590                              int n, gfp_t gfp)
 591{
 592        struct page *page;
 593        dma_addr_t dma_addr;
 594        unsigned int cred = fl->avail;
 595        __be64 *d = &fl->desc[fl->pidx];
 596        struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
 597
 598        /*
 599         * Sanity: ensure that the result of adding n Free List buffers
 600         * won't result in wrapping the SGE's Producer Index around to
 601         * it's Consumer Index thereby indicating an empty Free List ...
 602         */
 603        BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
 604
 605        /*
 606         * If we support large pages, prefer large buffers and fail over to
 607         * small pages if we can't allocate large pages to satisfy the refill.
 608         * If we don't support large pages, drop directly into the small page
 609         * allocation code.
 610         */
 611        if (FL_PG_ORDER == 0)
 612                goto alloc_small_pages;
 613
 614        while (n) {
 615                page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
 616                                   FL_PG_ORDER);
 617                if (unlikely(!page)) {
 618                        /*
 619                         * We've failed inour attempt to allocate a "large
 620                         * page".  Fail over to the "small page" allocation
 621                         * below.
 622                         */
 623                        fl->large_alloc_failed++;
 624                        break;
 625                }
 626                poison_buf(page, PAGE_SIZE << FL_PG_ORDER);
 627
 628                dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
 629                                        PAGE_SIZE << FL_PG_ORDER,
 630                                        PCI_DMA_FROMDEVICE);
 631                if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
 632                        /*
 633                         * We've run out of DMA mapping space.  Free up the
 634                         * buffer and return with what we've managed to put
 635                         * into the free list.  We don't want to fail over to
 636                         * the small page allocation below in this case
 637                         * because DMA mapping resources are typically
 638                         * critical resources once they become scarse.
 639                         */
 640                        __free_pages(page, FL_PG_ORDER);
 641                        goto out;
 642                }
 643                dma_addr |= RX_LARGE_BUF;
 644                *d++ = cpu_to_be64(dma_addr);
 645
 646                set_rx_sw_desc(sdesc, page, dma_addr);
 647                sdesc++;
 648
 649                fl->avail++;
 650                if (++fl->pidx == fl->size) {
 651                        fl->pidx = 0;
 652                        sdesc = fl->sdesc;
 653                        d = fl->desc;
 654                }
 655                n--;
 656        }
 657
 658alloc_small_pages:
 659        while (n--) {
 660                page = __skb_alloc_page(gfp | __GFP_NOWARN, NULL);
 661                if (unlikely(!page)) {
 662                        fl->alloc_failed++;
 663                        break;
 664                }
 665                poison_buf(page, PAGE_SIZE);
 666
 667                dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
 668                                       PCI_DMA_FROMDEVICE);
 669                if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
 670                        put_page(page);
 671                        break;
 672                }
 673                *d++ = cpu_to_be64(dma_addr);
 674
 675                set_rx_sw_desc(sdesc, page, dma_addr);
 676                sdesc++;
 677
 678                fl->avail++;
 679                if (++fl->pidx == fl->size) {
 680                        fl->pidx = 0;
 681                        sdesc = fl->sdesc;
 682                        d = fl->desc;
 683                }
 684        }
 685
 686out:
 687        /*
 688         * Update our accounting state to incorporate the new Free List
 689         * buffers, tell the hardware about them and return the number of
 690         * buffers which we were able to allocate.
 691         */
 692        cred = fl->avail - cred;
 693        fl->pend_cred += cred;
 694        ring_fl_db(adapter, fl);
 695
 696        if (unlikely(fl_starving(fl))) {
 697                smp_wmb();
 698                set_bit(fl->cntxt_id, adapter->sge.starving_fl);
 699        }
 700
 701        return cred;
 702}
 703
 704/*
 705 * Refill a Free List to its capacity or the Maximum Refill Increment,
 706 * whichever is smaller ...
 707 */
 708static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
 709{
 710        refill_fl(adapter, fl,
 711                  min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
 712                  GFP_ATOMIC);
 713}
 714
 715/**
 716 *      alloc_ring - allocate resources for an SGE descriptor ring
 717 *      @dev: the PCI device's core device
 718 *      @nelem: the number of descriptors
 719 *      @hwsize: the size of each hardware descriptor
 720 *      @swsize: the size of each software descriptor
 721 *      @busaddrp: the physical PCI bus address of the allocated ring
 722 *      @swringp: return address pointer for software ring
 723 *      @stat_size: extra space in hardware ring for status information
 724 *
 725 *      Allocates resources for an SGE descriptor ring, such as TX queues,
 726 *      free buffer lists, response queues, etc.  Each SGE ring requires
 727 *      space for its hardware descriptors plus, optionally, space for software
 728 *      state associated with each hardware entry (the metadata).  The function
 729 *      returns three values: the virtual address for the hardware ring (the
 730 *      return value of the function), the PCI bus address of the hardware
 731 *      ring (in *busaddrp), and the address of the software ring (in swringp).
 732 *      Both the hardware and software rings are returned zeroed out.
 733 */
 734static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
 735                        size_t swsize, dma_addr_t *busaddrp, void *swringp,
 736                        size_t stat_size)
 737{
 738        /*
 739         * Allocate the hardware ring and PCI DMA bus address space for said.
 740         */
 741        size_t hwlen = nelem * hwsize + stat_size;
 742        void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
 743
 744        if (!hwring)
 745                return NULL;
 746
 747        /*
 748         * If the caller wants a software ring, allocate it and return a
 749         * pointer to it in *swringp.
 750         */
 751        BUG_ON((swsize != 0) != (swringp != NULL));
 752        if (swsize) {
 753                void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
 754
 755                if (!swring) {
 756                        dma_free_coherent(dev, hwlen, hwring, *busaddrp);
 757                        return NULL;
 758                }
 759                *(void **)swringp = swring;
 760        }
 761
 762        /*
 763         * Zero out the hardware ring and return its address as our function
 764         * value.
 765         */
 766        memset(hwring, 0, hwlen);
 767        return hwring;
 768}
 769
 770/**
 771 *      sgl_len - calculates the size of an SGL of the given capacity
 772 *      @n: the number of SGL entries
 773 *
 774 *      Calculates the number of flits (8-byte units) needed for a Direct
 775 *      Scatter/Gather List that can hold the given number of entries.
 776 */
 777static inline unsigned int sgl_len(unsigned int n)
 778{
 779        /*
 780         * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
 781         * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
 782         * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
 783         * repeated sequences of { Length[i], Length[i+1], Address[i],
 784         * Address[i+1] } (this ensures that all addresses are on 64-bit
 785         * boundaries).  If N is even, then Length[N+1] should be set to 0 and
 786         * Address[N+1] is omitted.
 787         *
 788         * The following calculation incorporates all of the above.  It's
 789         * somewhat hard to follow but, briefly: the "+2" accounts for the
 790         * first two flits which include the DSGL header, Length0 and
 791         * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
 792         * flits for every pair of the remaining N) +1 if (n-1) is odd; and
 793         * finally the "+((n-1)&1)" adds the one remaining flit needed if
 794         * (n-1) is odd ...
 795         */
 796        n--;
 797        return (3 * n) / 2 + (n & 1) + 2;
 798}
 799
 800/**
 801 *      flits_to_desc - returns the num of TX descriptors for the given flits
 802 *      @flits: the number of flits
 803 *
 804 *      Returns the number of TX descriptors needed for the supplied number
 805 *      of flits.
 806 */
 807static inline unsigned int flits_to_desc(unsigned int flits)
 808{
 809        BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
 810        return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
 811}
 812
 813/**
 814 *      is_eth_imm - can an Ethernet packet be sent as immediate data?
 815 *      @skb: the packet
 816 *
 817 *      Returns whether an Ethernet packet is small enough to fit completely as
 818 *      immediate data.
 819 */
 820static inline int is_eth_imm(const struct sk_buff *skb)
 821{
 822        /*
 823         * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
 824         * which does not accommodate immediate data.  We could dike out all
 825         * of the support code for immediate data but that would tie our hands
 826         * too much if we ever want to enhace the firmware.  It would also
 827         * create more differences between the PF and VF Drivers.
 828         */
 829        return false;
 830}
 831
 832/**
 833 *      calc_tx_flits - calculate the number of flits for a packet TX WR
 834 *      @skb: the packet
 835 *
 836 *      Returns the number of flits needed for a TX Work Request for the
 837 *      given Ethernet packet, including the needed WR and CPL headers.
 838 */
 839static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
 840{
 841        unsigned int flits;
 842
 843        /*
 844         * If the skb is small enough, we can pump it out as a work request
 845         * with only immediate data.  In that case we just have to have the
 846         * TX Packet header plus the skb data in the Work Request.
 847         */
 848        if (is_eth_imm(skb))
 849                return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
 850                                    sizeof(__be64));
 851
 852        /*
 853         * Otherwise, we're going to have to construct a Scatter gather list
 854         * of the skb body and fragments.  We also include the flits necessary
 855         * for the TX Packet Work Request and CPL.  We always have a firmware
 856         * Write Header (incorporated as part of the cpl_tx_pkt_lso and
 857         * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
 858         * message or, if we're doing a Large Send Offload, an LSO CPL message
 859         * with an embeded TX Packet Write CPL message.
 860         */
 861        flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
 862        if (skb_shinfo(skb)->gso_size)
 863                flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
 864                          sizeof(struct cpl_tx_pkt_lso_core) +
 865                          sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
 866        else
 867                flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
 868                          sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
 869        return flits;
 870}
 871
 872/**
 873 *      write_sgl - populate a Scatter/Gather List for a packet
 874 *      @skb: the packet
 875 *      @tq: the TX queue we are writing into
 876 *      @sgl: starting location for writing the SGL
 877 *      @end: points right after the end of the SGL
 878 *      @start: start offset into skb main-body data to include in the SGL
 879 *      @addr: the list of DMA bus addresses for the SGL elements
 880 *
 881 *      Generates a Scatter/Gather List for the buffers that make up a packet.
 882 *      The caller must provide adequate space for the SGL that will be written.
 883 *      The SGL includes all of the packet's page fragments and the data in its
 884 *      main body except for the first @start bytes.  @pos must be 16-byte
 885 *      aligned and within a TX descriptor with available space.  @end points
 886 *      write after the end of the SGL but does not account for any potential
 887 *      wrap around, i.e., @end > @tq->stat.
 888 */
 889static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
 890                      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
 891                      const dma_addr_t *addr)
 892{
 893        unsigned int i, len;
 894        struct ulptx_sge_pair *to;
 895        const struct skb_shared_info *si = skb_shinfo(skb);
 896        unsigned int nfrags = si->nr_frags;
 897        struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
 898
 899        len = skb_headlen(skb) - start;
 900        if (likely(len)) {
 901                sgl->len0 = htonl(len);
 902                sgl->addr0 = cpu_to_be64(addr[0] + start);
 903                nfrags++;
 904        } else {
 905                sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
 906                sgl->addr0 = cpu_to_be64(addr[1]);
 907        }
 908
 909        sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
 910                              ULPTX_NSGE(nfrags));
 911        if (likely(--nfrags == 0))
 912                return;
 913        /*
 914         * Most of the complexity below deals with the possibility we hit the
 915         * end of the queue in the middle of writing the SGL.  For this case
 916         * only we create the SGL in a temporary buffer and then copy it.
 917         */
 918        to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
 919
 920        for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
 921                to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
 922                to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
 923                to->addr[0] = cpu_to_be64(addr[i]);
 924                to->addr[1] = cpu_to_be64(addr[++i]);
 925        }
 926        if (nfrags) {
 927                to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
 928                to->len[1] = cpu_to_be32(0);
 929                to->addr[0] = cpu_to_be64(addr[i + 1]);
 930        }
 931        if (unlikely((u8 *)end > (u8 *)tq->stat)) {
 932                unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
 933
 934                if (likely(part0))
 935                        memcpy(sgl->sge, buf, part0);
 936                part1 = (u8 *)end - (u8 *)tq->stat;
 937                memcpy(tq->desc, (u8 *)buf + part0, part1);
 938                end = (void *)tq->desc + part1;
 939        }
 940        if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
 941                *end = 0;
 942}
 943
 944/**
 945 *      check_ring_tx_db - check and potentially ring a TX queue's doorbell
 946 *      @adapter: the adapter
 947 *      @tq: the TX queue
 948 *      @n: number of new descriptors to give to HW
 949 *
 950 *      Ring the doorbel for a TX queue.
 951 */
 952static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
 953                              int n)
 954{
 955        /*
 956         * Warn if we write doorbells with the wrong priority and write
 957         * descriptors before telling HW.
 958         */
 959        WARN_ON((QID(tq->cntxt_id) | PIDX(n)) & DBPRIO(1));
 960        wmb();
 961        t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
 962                     QID(tq->cntxt_id) | PIDX(n));
 963}
 964
 965/**
 966 *      inline_tx_skb - inline a packet's data into TX descriptors
 967 *      @skb: the packet
 968 *      @tq: the TX queue where the packet will be inlined
 969 *      @pos: starting position in the TX queue to inline the packet
 970 *
 971 *      Inline a packet's contents directly into TX descriptors, starting at
 972 *      the given position within the TX DMA ring.
 973 *      Most of the complexity of this operation is dealing with wrap arounds
 974 *      in the middle of the packet we want to inline.
 975 */
 976static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
 977                          void *pos)
 978{
 979        u64 *p;
 980        int left = (void *)tq->stat - pos;
 981
 982        if (likely(skb->len <= left)) {
 983                if (likely(!skb->data_len))
 984                        skb_copy_from_linear_data(skb, pos, skb->len);
 985                else
 986                        skb_copy_bits(skb, 0, pos, skb->len);
 987                pos += skb->len;
 988        } else {
 989                skb_copy_bits(skb, 0, pos, left);
 990                skb_copy_bits(skb, left, tq->desc, skb->len - left);
 991                pos = (void *)tq->desc + (skb->len - left);
 992        }
 993
 994        /* 0-pad to multiple of 16 */
 995        p = PTR_ALIGN(pos, 8);
 996        if ((uintptr_t)p & 8)
 997                *p = 0;
 998}
 999
1000/*
1001 * Figure out what HW csum a packet wants and return the appropriate control
1002 * bits.
1003 */
1004static u64 hwcsum(const struct sk_buff *skb)
1005{
1006        int csum_type;
1007        const struct iphdr *iph = ip_hdr(skb);
1008
1009        if (iph->version == 4) {
1010                if (iph->protocol == IPPROTO_TCP)
1011                        csum_type = TX_CSUM_TCPIP;
1012                else if (iph->protocol == IPPROTO_UDP)
1013                        csum_type = TX_CSUM_UDPIP;
1014                else {
1015nocsum:
1016                        /*
1017                         * unknown protocol, disable HW csum
1018                         * and hope a bad packet is detected
1019                         */
1020                        return TXPKT_L4CSUM_DIS;
1021                }
1022        } else {
1023                /*
1024                 * this doesn't work with extension headers
1025                 */
1026                const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1027
1028                if (ip6h->nexthdr == IPPROTO_TCP)
1029                        csum_type = TX_CSUM_TCPIP6;
1030                else if (ip6h->nexthdr == IPPROTO_UDP)
1031                        csum_type = TX_CSUM_UDPIP6;
1032                else
1033                        goto nocsum;
1034        }
1035
1036        if (likely(csum_type >= TX_CSUM_TCPIP))
1037                return TXPKT_CSUM_TYPE(csum_type) |
1038                        TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1039                        TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1040        else {
1041                int start = skb_transport_offset(skb);
1042
1043                return TXPKT_CSUM_TYPE(csum_type) |
1044                        TXPKT_CSUM_START(start) |
1045                        TXPKT_CSUM_LOC(start + skb->csum_offset);
1046        }
1047}
1048
1049/*
1050 * Stop an Ethernet TX queue and record that state change.
1051 */
1052static void txq_stop(struct sge_eth_txq *txq)
1053{
1054        netif_tx_stop_queue(txq->txq);
1055        txq->q.stops++;
1056}
1057
1058/*
1059 * Advance our software state for a TX queue by adding n in use descriptors.
1060 */
1061static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1062{
1063        tq->in_use += n;
1064        tq->pidx += n;
1065        if (tq->pidx >= tq->size)
1066                tq->pidx -= tq->size;
1067}
1068
1069/**
1070 *      t4vf_eth_xmit - add a packet to an Ethernet TX queue
1071 *      @skb: the packet
1072 *      @dev: the egress net device
1073 *
1074 *      Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
1075 */
1076int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1077{
1078        u32 wr_mid;
1079        u64 cntrl, *end;
1080        int qidx, credits;
1081        unsigned int flits, ndesc;
1082        struct adapter *adapter;
1083        struct sge_eth_txq *txq;
1084        const struct port_info *pi;
1085        struct fw_eth_tx_pkt_vm_wr *wr;
1086        struct cpl_tx_pkt_core *cpl;
1087        const struct skb_shared_info *ssi;
1088        dma_addr_t addr[MAX_SKB_FRAGS + 1];
1089        const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1090                                        sizeof(wr->ethmacsrc) +
1091                                        sizeof(wr->ethtype) +
1092                                        sizeof(wr->vlantci));
1093
1094        /*
1095         * The chip minimum packet length is 10 octets but the firmware
1096         * command that we are using requires that we copy the Ethernet header
1097         * (including the VLAN tag) into the header so we reject anything
1098         * smaller than that ...
1099         */
1100        if (unlikely(skb->len < fw_hdr_copy_len))
1101                goto out_free;
1102
1103        /*
1104         * Figure out which TX Queue we're going to use.
1105         */
1106        pi = netdev_priv(dev);
1107        adapter = pi->adapter;
1108        qidx = skb_get_queue_mapping(skb);
1109        BUG_ON(qidx >= pi->nqsets);
1110        txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1111
1112        /*
1113         * Take this opportunity to reclaim any TX Descriptors whose DMA
1114         * transfers have completed.
1115         */
1116        reclaim_completed_tx(adapter, &txq->q, true);
1117
1118        /*
1119         * Calculate the number of flits and TX Descriptors we're going to
1120         * need along with how many TX Descriptors will be left over after
1121         * we inject our Work Request.
1122         */
1123        flits = calc_tx_flits(skb);
1124        ndesc = flits_to_desc(flits);
1125        credits = txq_avail(&txq->q) - ndesc;
1126
1127        if (unlikely(credits < 0)) {
1128                /*
1129                 * Not enough room for this packet's Work Request.  Stop the
1130                 * TX Queue and return a "busy" condition.  The queue will get
1131                 * started later on when the firmware informs us that space
1132                 * has opened up.
1133                 */
1134                txq_stop(txq);
1135                dev_err(adapter->pdev_dev,
1136                        "%s: TX ring %u full while queue awake!\n",
1137                        dev->name, qidx);
1138                return NETDEV_TX_BUSY;
1139        }
1140
1141        if (!is_eth_imm(skb) &&
1142            unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1143                /*
1144                 * We need to map the skb into PCI DMA space (because it can't
1145                 * be in-lined directly into the Work Request) and the mapping
1146                 * operation failed.  Record the error and drop the packet.
1147                 */
1148                txq->mapping_err++;
1149                goto out_free;
1150        }
1151
1152        wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1153        if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1154                /*
1155                 * After we're done injecting the Work Request for this
1156                 * packet, we'll be below our "stop threshold" so stop the TX
1157                 * Queue now and schedule a request for an SGE Egress Queue
1158                 * Update message.  The queue will get started later on when
1159                 * the firmware processes this Work Request and sends us an
1160                 * Egress Queue Status Update message indicating that space
1161                 * has opened up.
1162                 */
1163                txq_stop(txq);
1164                wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
1165        }
1166
1167        /*
1168         * Start filling in our Work Request.  Note that we do _not_ handle
1169         * the WR Header wrapping around the TX Descriptor Ring.  If our
1170         * maximum header size ever exceeds one TX Descriptor, we'll need to
1171         * do something else here.
1172         */
1173        BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1174        wr = (void *)&txq->q.desc[txq->q.pidx];
1175        wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1176        wr->r3[0] = cpu_to_be64(0);
1177        wr->r3[1] = cpu_to_be64(0);
1178        skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1179        end = (u64 *)wr + flits;
1180
1181        /*
1182         * If this is a Large Send Offload packet we'll put in an LSO CPL
1183         * message with an encapsulated TX Packet CPL message.  Otherwise we
1184         * just use a TX Packet CPL message.
1185         */
1186        ssi = skb_shinfo(skb);
1187        if (ssi->gso_size) {
1188                struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1189                bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1190                int l3hdr_len = skb_network_header_len(skb);
1191                int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1192
1193                wr->op_immdlen =
1194                        cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
1195                                    FW_WR_IMMDLEN(sizeof(*lso) +
1196                                                  sizeof(*cpl)));
1197                /*
1198                 * Fill in the LSO CPL message.
1199                 */
1200                lso->lso_ctrl =
1201                        cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
1202                                    LSO_FIRST_SLICE |
1203                                    LSO_LAST_SLICE |
1204                                    LSO_IPV6(v6) |
1205                                    LSO_ETHHDR_LEN(eth_xtra_len/4) |
1206                                    LSO_IPHDR_LEN(l3hdr_len/4) |
1207                                    LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1208                lso->ipid_ofst = cpu_to_be16(0);
1209                lso->mss = cpu_to_be16(ssi->gso_size);
1210                lso->seqno_offset = cpu_to_be32(0);
1211                lso->len = cpu_to_be32(skb->len);
1212
1213                /*
1214                 * Set up TX Packet CPL pointer, control word and perform
1215                 * accounting.
1216                 */
1217                cpl = (void *)(lso + 1);
1218                cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1219                         TXPKT_IPHDR_LEN(l3hdr_len) |
1220                         TXPKT_ETHHDR_LEN(eth_xtra_len));
1221                txq->tso++;
1222                txq->tx_cso += ssi->gso_segs;
1223        } else {
1224                int len;
1225
1226                len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1227                wr->op_immdlen =
1228                        cpu_to_be32(FW_WR_OP(FW_ETH_TX_PKT_VM_WR) |
1229                                    FW_WR_IMMDLEN(len));
1230
1231                /*
1232                 * Set up TX Packet CPL pointer, control word and perform
1233                 * accounting.
1234                 */
1235                cpl = (void *)(wr + 1);
1236                if (skb->ip_summed == CHECKSUM_PARTIAL) {
1237                        cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1238                        txq->tx_cso++;
1239                } else
1240                        cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1241        }
1242
1243        /*
1244         * If there's a VLAN tag present, add that to the list of things to
1245         * do in this Work Request.
1246         */
1247        if (vlan_tx_tag_present(skb)) {
1248                txq->vlan_ins++;
1249                cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
1250        }
1251
1252        /*
1253         * Fill in the TX Packet CPL message header.
1254         */
1255        cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1256                                 TXPKT_INTF(pi->port_id) |
1257                                 TXPKT_PF(0));
1258        cpl->pack = cpu_to_be16(0);
1259        cpl->len = cpu_to_be16(skb->len);
1260        cpl->ctrl1 = cpu_to_be64(cntrl);
1261
1262#ifdef T4_TRACE
1263        T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1264                  "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1265                  ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1266#endif
1267
1268        /*
1269         * Fill in the body of the TX Packet CPL message with either in-lined
1270         * data or a Scatter/Gather List.
1271         */
1272        if (is_eth_imm(skb)) {
1273                /*
1274                 * In-line the packet's data and free the skb since we don't
1275                 * need it any longer.
1276                 */
1277                inline_tx_skb(skb, &txq->q, cpl + 1);
1278                dev_kfree_skb(skb);
1279        } else {
1280                /*
1281                 * Write the skb's Scatter/Gather list into the TX Packet CPL
1282                 * message and retain a pointer to the skb so we can free it
1283                 * later when its DMA completes.  (We store the skb pointer
1284                 * in the Software Descriptor corresponding to the last TX
1285                 * Descriptor used by the Work Request.)
1286                 *
1287                 * The retained skb will be freed when the corresponding TX
1288                 * Descriptors are reclaimed after their DMAs complete.
1289                 * However, this could take quite a while since, in general,
1290                 * the hardware is set up to be lazy about sending DMA
1291                 * completion notifications to us and we mostly perform TX
1292                 * reclaims in the transmit routine.
1293                 *
1294                 * This is good for performamce but means that we rely on new
1295                 * TX packets arriving to run the destructors of completed
1296                 * packets, which open up space in their sockets' send queues.
1297                 * Sometimes we do not get such new packets causing TX to
1298                 * stall.  A single UDP transmitter is a good example of this
1299                 * situation.  We have a clean up timer that periodically
1300                 * reclaims completed packets but it doesn't run often enough
1301                 * (nor do we want it to) to prevent lengthy stalls.  A
1302                 * solution to this problem is to run the destructor early,
1303                 * after the packet is queued but before it's DMAd.  A con is
1304                 * that we lie to socket memory accounting, but the amount of
1305                 * extra memory is reasonable (limited by the number of TX
1306                 * descriptors), the packets do actually get freed quickly by
1307                 * new packets almost always, and for protocols like TCP that
1308                 * wait for acks to really free up the data the extra memory
1309                 * is even less.  On the positive side we run the destructors
1310                 * on the sending CPU rather than on a potentially different
1311                 * completing CPU, usually a good thing.
1312                 *
1313                 * Run the destructor before telling the DMA engine about the
1314                 * packet to make sure it doesn't complete and get freed
1315                 * prematurely.
1316                 */
1317                struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1318                struct sge_txq *tq = &txq->q;
1319                int last_desc;
1320
1321                /*
1322                 * If the Work Request header was an exact multiple of our TX
1323                 * Descriptor length, then it's possible that the starting SGL
1324                 * pointer lines up exactly with the end of our TX Descriptor
1325                 * ring.  If that's the case, wrap around to the beginning
1326                 * here ...
1327                 */
1328                if (unlikely((void *)sgl == (void *)tq->stat)) {
1329                        sgl = (void *)tq->desc;
1330                        end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1331                }
1332
1333                write_sgl(skb, tq, sgl, end, 0, addr);
1334                skb_orphan(skb);
1335
1336                last_desc = tq->pidx + ndesc - 1;
1337                if (last_desc >= tq->size)
1338                        last_desc -= tq->size;
1339                tq->sdesc[last_desc].skb = skb;
1340                tq->sdesc[last_desc].sgl = sgl;
1341        }
1342
1343        /*
1344         * Advance our internal TX Queue state, tell the hardware about
1345         * the new TX descriptors and return success.
1346         */
1347        txq_advance(&txq->q, ndesc);
1348        dev->trans_start = jiffies;
1349        ring_tx_db(adapter, &txq->q, ndesc);
1350        return NETDEV_TX_OK;
1351
1352out_free:
1353        /*
1354         * An error of some sort happened.  Free the TX skb and tell the
1355         * OS that we've "dealt" with the packet ...
1356         */
1357        dev_kfree_skb(skb);
1358        return NETDEV_TX_OK;
1359}
1360
1361/**
1362 *      copy_frags - copy fragments from gather list into skb_shared_info
1363 *      @skb: destination skb
1364 *      @gl: source internal packet gather list
1365 *      @offset: packet start offset in first page
1366 *
1367 *      Copy an internal packet gather list into a Linux skb_shared_info
1368 *      structure.
1369 */
1370static inline void copy_frags(struct sk_buff *skb,
1371                              const struct pkt_gl *gl,
1372                              unsigned int offset)
1373{
1374        int i;
1375
1376        /* usually there's just one frag */
1377        __skb_fill_page_desc(skb, 0, gl->frags[0].page,
1378                             gl->frags[0].offset + offset,
1379                             gl->frags[0].size - offset);
1380        skb_shinfo(skb)->nr_frags = gl->nfrags;
1381        for (i = 1; i < gl->nfrags; i++)
1382                __skb_fill_page_desc(skb, i, gl->frags[i].page,
1383                                     gl->frags[i].offset,
1384                                     gl->frags[i].size);
1385
1386        /* get a reference to the last page, we don't own it */
1387        get_page(gl->frags[gl->nfrags - 1].page);
1388}
1389
1390/**
1391 *      t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1392 *      @gl: the gather list
1393 *      @skb_len: size of sk_buff main body if it carries fragments
1394 *      @pull_len: amount of data to move to the sk_buff's main body
1395 *
1396 *      Builds an sk_buff from the given packet gather list.  Returns the
1397 *      sk_buff or %NULL if sk_buff allocation failed.
1398 */
1399struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1400                                  unsigned int skb_len, unsigned int pull_len)
1401{
1402        struct sk_buff *skb;
1403
1404        /*
1405         * If the ingress packet is small enough, allocate an skb large enough
1406         * for all of the data and copy it inline.  Otherwise, allocate an skb
1407         * with enough room to pull in the header and reference the rest of
1408         * the data via the skb fragment list.
1409         *
1410         * Below we rely on RX_COPY_THRES being less than the smallest Rx
1411         * buff!  size, which is expected since buffers are at least
1412         * PAGE_SIZEd.  In this case packets up to RX_COPY_THRES have only one
1413         * fragment.
1414         */
1415        if (gl->tot_len <= RX_COPY_THRES) {
1416                /* small packets have only one fragment */
1417                skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1418                if (unlikely(!skb))
1419                        goto out;
1420                __skb_put(skb, gl->tot_len);
1421                skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1422        } else {
1423                skb = alloc_skb(skb_len, GFP_ATOMIC);
1424                if (unlikely(!skb))
1425                        goto out;
1426                __skb_put(skb, pull_len);
1427                skb_copy_to_linear_data(skb, gl->va, pull_len);
1428
1429                copy_frags(skb, gl, pull_len);
1430                skb->len = gl->tot_len;
1431                skb->data_len = skb->len - pull_len;
1432                skb->truesize += skb->data_len;
1433        }
1434
1435out:
1436        return skb;
1437}
1438
1439/**
1440 *      t4vf_pktgl_free - free a packet gather list
1441 *      @gl: the gather list
1442 *
1443 *      Releases the pages of a packet gather list.  We do not own the last
1444 *      page on the list and do not free it.
1445 */
1446void t4vf_pktgl_free(const struct pkt_gl *gl)
1447{
1448        int frag;
1449
1450        frag = gl->nfrags - 1;
1451        while (frag--)
1452                put_page(gl->frags[frag].page);
1453}
1454
1455/**
1456 *      do_gro - perform Generic Receive Offload ingress packet processing
1457 *      @rxq: ingress RX Ethernet Queue
1458 *      @gl: gather list for ingress packet
1459 *      @pkt: CPL header for last packet fragment
1460 *
1461 *      Perform Generic Receive Offload (GRO) ingress packet processing.
1462 *      We use the standard Linux GRO interfaces for this.
1463 */
1464static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1465                   const struct cpl_rx_pkt *pkt)
1466{
1467        int ret;
1468        struct sk_buff *skb;
1469
1470        skb = napi_get_frags(&rxq->rspq.napi);
1471        if (unlikely(!skb)) {
1472                t4vf_pktgl_free(gl);
1473                rxq->stats.rx_drops++;
1474                return;
1475        }
1476
1477        copy_frags(skb, gl, PKTSHIFT);
1478        skb->len = gl->tot_len - PKTSHIFT;
1479        skb->data_len = skb->len;
1480        skb->truesize += skb->data_len;
1481        skb->ip_summed = CHECKSUM_UNNECESSARY;
1482        skb_record_rx_queue(skb, rxq->rspq.idx);
1483
1484        if (pkt->vlan_ex) {
1485                __vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1486                                        be16_to_cpu(pkt->vlan));
1487                rxq->stats.vlan_ex++;
1488        }
1489        ret = napi_gro_frags(&rxq->rspq.napi);
1490
1491        if (ret == GRO_HELD)
1492                rxq->stats.lro_pkts++;
1493        else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1494                rxq->stats.lro_merged++;
1495        rxq->stats.pkts++;
1496        rxq->stats.rx_cso++;
1497}
1498
1499/**
1500 *      t4vf_ethrx_handler - process an ingress ethernet packet
1501 *      @rspq: the response queue that received the packet
1502 *      @rsp: the response queue descriptor holding the RX_PKT message
1503 *      @gl: the gather list of packet fragments
1504 *
1505 *      Process an ingress ethernet packet and deliver it to the stack.
1506 */
1507int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1508                       const struct pkt_gl *gl)
1509{
1510        struct sk_buff *skb;
1511        const struct cpl_rx_pkt *pkt = (void *)rsp;
1512        bool csum_ok = pkt->csum_calc && !pkt->err_vec;
1513        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1514
1515        /*
1516         * If this is a good TCP packet and we have Generic Receive Offload
1517         * enabled, handle the packet in the GRO path.
1518         */
1519        if ((pkt->l2info & cpu_to_be32(RXF_TCP)) &&
1520            (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1521            !pkt->ip_frag) {
1522                do_gro(rxq, gl, pkt);
1523                return 0;
1524        }
1525
1526        /*
1527         * Convert the Packet Gather List into an skb.
1528         */
1529        skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1530        if (unlikely(!skb)) {
1531                t4vf_pktgl_free(gl);
1532                rxq->stats.rx_drops++;
1533                return 0;
1534        }
1535        __skb_pull(skb, PKTSHIFT);
1536        skb->protocol = eth_type_trans(skb, rspq->netdev);
1537        skb_record_rx_queue(skb, rspq->idx);
1538        rxq->stats.pkts++;
1539
1540        if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
1541            !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
1542                if (!pkt->ip_frag)
1543                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1544                else {
1545                        __sum16 c = (__force __sum16)pkt->csum;
1546                        skb->csum = csum_unfold(c);
1547                        skb->ip_summed = CHECKSUM_COMPLETE;
1548                }
1549                rxq->stats.rx_cso++;
1550        } else
1551                skb_checksum_none_assert(skb);
1552
1553        if (pkt->vlan_ex) {
1554                rxq->stats.vlan_ex++;
1555                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
1556        }
1557
1558        netif_receive_skb(skb);
1559
1560        return 0;
1561}
1562
1563/**
1564 *      is_new_response - check if a response is newly written
1565 *      @rc: the response control descriptor
1566 *      @rspq: the response queue
1567 *
1568 *      Returns true if a response descriptor contains a yet unprocessed
1569 *      response.
1570 */
1571static inline bool is_new_response(const struct rsp_ctrl *rc,
1572                                   const struct sge_rspq *rspq)
1573{
1574        return RSPD_GEN(rc->type_gen) == rspq->gen;
1575}
1576
1577/**
1578 *      restore_rx_bufs - put back a packet's RX buffers
1579 *      @gl: the packet gather list
1580 *      @fl: the SGE Free List
1581 *      @nfrags: how many fragments in @si
1582 *
1583 *      Called when we find out that the current packet, @si, can't be
1584 *      processed right away for some reason.  This is a very rare event and
1585 *      there's no effort to make this suspension/resumption process
1586 *      particularly efficient.
1587 *
1588 *      We implement the suspension by putting all of the RX buffers associated
1589 *      with the current packet back on the original Free List.  The buffers
1590 *      have already been unmapped and are left unmapped, we mark them as
1591 *      unmapped in order to prevent further unmapping attempts.  (Effectively
1592 *      this function undoes the series of @unmap_rx_buf calls which were done
1593 *      to create the current packet's gather list.)  This leaves us ready to
1594 *      restart processing of the packet the next time we start processing the
1595 *      RX Queue ...
1596 */
1597static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1598                            int frags)
1599{
1600        struct rx_sw_desc *sdesc;
1601
1602        while (frags--) {
1603                if (fl->cidx == 0)
1604                        fl->cidx = fl->size - 1;
1605                else
1606                        fl->cidx--;
1607                sdesc = &fl->sdesc[fl->cidx];
1608                sdesc->page = gl->frags[frags].page;
1609                sdesc->dma_addr |= RX_UNMAPPED_BUF;
1610                fl->avail++;
1611        }
1612}
1613
1614/**
1615 *      rspq_next - advance to the next entry in a response queue
1616 *      @rspq: the queue
1617 *
1618 *      Updates the state of a response queue to advance it to the next entry.
1619 */
1620static inline void rspq_next(struct sge_rspq *rspq)
1621{
1622        rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1623        if (unlikely(++rspq->cidx == rspq->size)) {
1624                rspq->cidx = 0;
1625                rspq->gen ^= 1;
1626                rspq->cur_desc = rspq->desc;
1627        }
1628}
1629
1630/**
1631 *      process_responses - process responses from an SGE response queue
1632 *      @rspq: the ingress response queue to process
1633 *      @budget: how many responses can be processed in this round
1634 *
1635 *      Process responses from a Scatter Gather Engine response queue up to
1636 *      the supplied budget.  Responses include received packets as well as
1637 *      control messages from firmware or hardware.
1638 *
1639 *      Additionally choose the interrupt holdoff time for the next interrupt
1640 *      on this queue.  If the system is under memory shortage use a fairly
1641 *      long delay to help recovery.
1642 */
1643int process_responses(struct sge_rspq *rspq, int budget)
1644{
1645        struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1646        int budget_left = budget;
1647
1648        while (likely(budget_left)) {
1649                int ret, rsp_type;
1650                const struct rsp_ctrl *rc;
1651
1652                rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1653                if (!is_new_response(rc, rspq))
1654                        break;
1655
1656                /*
1657                 * Figure out what kind of response we've received from the
1658                 * SGE.
1659                 */
1660                rmb();
1661                rsp_type = RSPD_TYPE(rc->type_gen);
1662                if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1663                        struct page_frag *fp;
1664                        struct pkt_gl gl;
1665                        const struct rx_sw_desc *sdesc;
1666                        u32 bufsz, frag;
1667                        u32 len = be32_to_cpu(rc->pldbuflen_qid);
1668
1669                        /*
1670                         * If we get a "new buffer" message from the SGE we
1671                         * need to move on to the next Free List buffer.
1672                         */
1673                        if (len & RSPD_NEWBUF) {
1674                                /*
1675                                 * We get one "new buffer" message when we
1676                                 * first start up a queue so we need to ignore
1677                                 * it when our offset into the buffer is 0.
1678                                 */
1679                                if (likely(rspq->offset > 0)) {
1680                                        free_rx_bufs(rspq->adapter, &rxq->fl,
1681                                                     1);
1682                                        rspq->offset = 0;
1683                                }
1684                                len = RSPD_LEN(len);
1685                        }
1686                        gl.tot_len = len;
1687
1688                        /*
1689                         * Gather packet fragments.
1690                         */
1691                        for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1692                                BUG_ON(frag >= MAX_SKB_FRAGS);
1693                                BUG_ON(rxq->fl.avail == 0);
1694                                sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1695                                bufsz = get_buf_size(sdesc);
1696                                fp->page = sdesc->page;
1697                                fp->offset = rspq->offset;
1698                                fp->size = min(bufsz, len);
1699                                len -= fp->size;
1700                                if (!len)
1701                                        break;
1702                                unmap_rx_buf(rspq->adapter, &rxq->fl);
1703                        }
1704                        gl.nfrags = frag+1;
1705
1706                        /*
1707                         * Last buffer remains mapped so explicitly make it
1708                         * coherent for CPU access and start preloading first
1709                         * cache line ...
1710                         */
1711                        dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1712                                                get_buf_addr(sdesc),
1713                                                fp->size, DMA_FROM_DEVICE);
1714                        gl.va = (page_address(gl.frags[0].page) +
1715                                 gl.frags[0].offset);
1716                        prefetch(gl.va);
1717
1718                        /*
1719                         * Hand the new ingress packet to the handler for
1720                         * this Response Queue.
1721                         */
1722                        ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1723                        if (likely(ret == 0))
1724                                rspq->offset += ALIGN(fp->size, FL_ALIGN);
1725                        else
1726                                restore_rx_bufs(&gl, &rxq->fl, frag);
1727                } else if (likely(rsp_type == RSP_TYPE_CPL)) {
1728                        ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1729                } else {
1730                        WARN_ON(rsp_type > RSP_TYPE_CPL);
1731                        ret = 0;
1732                }
1733
1734                if (unlikely(ret)) {
1735                        /*
1736                         * Couldn't process descriptor, back off for recovery.
1737                         * We use the SGE's last timer which has the longest
1738                         * interrupt coalescing value ...
1739                         */
1740                        const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1741                        rspq->next_intr_params =
1742                                QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
1743                        break;
1744                }
1745
1746                rspq_next(rspq);
1747                budget_left--;
1748        }
1749
1750        /*
1751         * If this is a Response Queue with an associated Free List and
1752         * at least two Egress Queue units available in the Free List
1753         * for new buffer pointers, refill the Free List.
1754         */
1755        if (rspq->offset >= 0 &&
1756            rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1757                __refill_fl(rspq->adapter, &rxq->fl);
1758        return budget - budget_left;
1759}
1760
1761/**
1762 *      napi_rx_handler - the NAPI handler for RX processing
1763 *      @napi: the napi instance
1764 *      @budget: how many packets we can process in this round
1765 *
1766 *      Handler for new data events when using NAPI.  This does not need any
1767 *      locking or protection from interrupts as data interrupts are off at
1768 *      this point and other adapter interrupts do not interfere (the latter
1769 *      in not a concern at all with MSI-X as non-data interrupts then have
1770 *      a separate handler).
1771 */
1772static int napi_rx_handler(struct napi_struct *napi, int budget)
1773{
1774        unsigned int intr_params;
1775        struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1776        int work_done = process_responses(rspq, budget);
1777
1778        if (likely(work_done < budget)) {
1779                napi_complete(napi);
1780                intr_params = rspq->next_intr_params;
1781                rspq->next_intr_params = rspq->intr_params;
1782        } else
1783                intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
1784
1785        if (unlikely(work_done == 0))
1786                rspq->unhandled_irqs++;
1787
1788        t4_write_reg(rspq->adapter,
1789                     T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1790                     CIDXINC(work_done) |
1791                     INGRESSQID((u32)rspq->cntxt_id) |
1792                     SEINTARM(intr_params));
1793        return work_done;
1794}
1795
1796/*
1797 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1798 * (i.e., response queue serviced by NAPI polling).
1799 */
1800irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1801{
1802        struct sge_rspq *rspq = cookie;
1803
1804        napi_schedule(&rspq->napi);
1805        return IRQ_HANDLED;
1806}
1807
1808/*
1809 * Process the indirect interrupt entries in the interrupt queue and kick off
1810 * NAPI for each queue that has generated an entry.
1811 */
1812static unsigned int process_intrq(struct adapter *adapter)
1813{
1814        struct sge *s = &adapter->sge;
1815        struct sge_rspq *intrq = &s->intrq;
1816        unsigned int work_done;
1817
1818        spin_lock(&adapter->sge.intrq_lock);
1819        for (work_done = 0; ; work_done++) {
1820                const struct rsp_ctrl *rc;
1821                unsigned int qid, iq_idx;
1822                struct sge_rspq *rspq;
1823
1824                /*
1825                 * Grab the next response from the interrupt queue and bail
1826                 * out if it's not a new response.
1827                 */
1828                rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1829                if (!is_new_response(rc, intrq))
1830                        break;
1831
1832                /*
1833                 * If the response isn't a forwarded interrupt message issue a
1834                 * error and go on to the next response message.  This should
1835                 * never happen ...
1836                 */
1837                rmb();
1838                if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
1839                        dev_err(adapter->pdev_dev,
1840                                "Unexpected INTRQ response type %d\n",
1841                                RSPD_TYPE(rc->type_gen));
1842                        continue;
1843                }
1844
1845                /*
1846                 * Extract the Queue ID from the interrupt message and perform
1847                 * sanity checking to make sure it really refers to one of our
1848                 * Ingress Queues which is active and matches the queue's ID.
1849                 * None of these error conditions should ever happen so we may
1850                 * want to either make them fatal and/or conditionalized under
1851                 * DEBUG.
1852                 */
1853                qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
1854                iq_idx = IQ_IDX(s, qid);
1855                if (unlikely(iq_idx >= MAX_INGQ)) {
1856                        dev_err(adapter->pdev_dev,
1857                                "Ingress QID %d out of range\n", qid);
1858                        continue;
1859                }
1860                rspq = s->ingr_map[iq_idx];
1861                if (unlikely(rspq == NULL)) {
1862                        dev_err(adapter->pdev_dev,
1863                                "Ingress QID %d RSPQ=NULL\n", qid);
1864                        continue;
1865                }
1866                if (unlikely(rspq->abs_id != qid)) {
1867                        dev_err(adapter->pdev_dev,
1868                                "Ingress QID %d refers to RSPQ %d\n",
1869                                qid, rspq->abs_id);
1870                        continue;
1871                }
1872
1873                /*
1874                 * Schedule NAPI processing on the indicated Response Queue
1875                 * and move on to the next entry in the Forwarded Interrupt
1876                 * Queue.
1877                 */
1878                napi_schedule(&rspq->napi);
1879                rspq_next(intrq);
1880        }
1881
1882        t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1883                     CIDXINC(work_done) |
1884                     INGRESSQID(intrq->cntxt_id) |
1885                     SEINTARM(intrq->intr_params));
1886
1887        spin_unlock(&adapter->sge.intrq_lock);
1888
1889        return work_done;
1890}
1891
1892/*
1893 * The MSI interrupt handler handles data events from SGE response queues as
1894 * well as error and other async events as they all use the same MSI vector.
1895 */
1896irqreturn_t t4vf_intr_msi(int irq, void *cookie)
1897{
1898        struct adapter *adapter = cookie;
1899
1900        process_intrq(adapter);
1901        return IRQ_HANDLED;
1902}
1903
1904/**
1905 *      t4vf_intr_handler - select the top-level interrupt handler
1906 *      @adapter: the adapter
1907 *
1908 *      Selects the top-level interrupt handler based on the type of interrupts
1909 *      (MSI-X or MSI).
1910 */
1911irq_handler_t t4vf_intr_handler(struct adapter *adapter)
1912{
1913        BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
1914        if (adapter->flags & USING_MSIX)
1915                return t4vf_sge_intr_msix;
1916        else
1917                return t4vf_intr_msi;
1918}
1919
1920/**
1921 *      sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
1922 *      @data: the adapter
1923 *
1924 *      Runs periodically from a timer to perform maintenance of SGE RX queues.
1925 *
1926 *      a) Replenishes RX queues that have run out due to memory shortage.
1927 *      Normally new RX buffers are added when existing ones are consumed but
1928 *      when out of memory a queue can become empty.  We schedule NAPI to do
1929 *      the actual refill.
1930 */
1931static void sge_rx_timer_cb(unsigned long data)
1932{
1933        struct adapter *adapter = (struct adapter *)data;
1934        struct sge *s = &adapter->sge;
1935        unsigned int i;
1936
1937        /*
1938         * Scan the "Starving Free Lists" flag array looking for any Free
1939         * Lists in need of more free buffers.  If we find one and it's not
1940         * being actively polled, then bump its "starving" counter and attempt
1941         * to refill it.  If we're successful in adding enough buffers to push
1942         * the Free List over the starving threshold, then we can clear its
1943         * "starving" status.
1944         */
1945        for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
1946                unsigned long m;
1947
1948                for (m = s->starving_fl[i]; m; m &= m - 1) {
1949                        unsigned int id = __ffs(m) + i * BITS_PER_LONG;
1950                        struct sge_fl *fl = s->egr_map[id];
1951
1952                        clear_bit(id, s->starving_fl);
1953                        smp_mb__after_clear_bit();
1954
1955                        /*
1956                         * Since we are accessing fl without a lock there's a
1957                         * small probability of a false positive where we
1958                         * schedule napi but the FL is no longer starving.
1959                         * No biggie.
1960                         */
1961                        if (fl_starving(fl)) {
1962                                struct sge_eth_rxq *rxq;
1963
1964                                rxq = container_of(fl, struct sge_eth_rxq, fl);
1965                                if (napi_reschedule(&rxq->rspq.napi))
1966                                        fl->starving++;
1967                                else
1968                                        set_bit(id, s->starving_fl);
1969                        }
1970                }
1971        }
1972
1973        /*
1974         * Reschedule the next scan for starving Free Lists ...
1975         */
1976        mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
1977}
1978
1979/**
1980 *      sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
1981 *      @data: the adapter
1982 *
1983 *      Runs periodically from a timer to perform maintenance of SGE TX queues.
1984 *
1985 *      b) Reclaims completed Tx packets for the Ethernet queues.  Normally
1986 *      packets are cleaned up by new Tx packets, this timer cleans up packets
1987 *      when no new packets are being submitted.  This is essential for pktgen,
1988 *      at least.
1989 */
1990static void sge_tx_timer_cb(unsigned long data)
1991{
1992        struct adapter *adapter = (struct adapter *)data;
1993        struct sge *s = &adapter->sge;
1994        unsigned int i, budget;
1995
1996        budget = MAX_TIMER_TX_RECLAIM;
1997        i = s->ethtxq_rover;
1998        do {
1999                struct sge_eth_txq *txq = &s->ethtxq[i];
2000
2001                if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2002                        int avail = reclaimable(&txq->q);
2003
2004                        if (avail > budget)
2005                                avail = budget;
2006
2007                        free_tx_desc(adapter, &txq->q, avail, true);
2008                        txq->q.in_use -= avail;
2009                        __netif_tx_unlock(txq->txq);
2010
2011                        budget -= avail;
2012                        if (!budget)
2013                                break;
2014                }
2015
2016                i++;
2017                if (i >= s->ethqsets)
2018                        i = 0;
2019        } while (i != s->ethtxq_rover);
2020        s->ethtxq_rover = i;
2021
2022        /*
2023         * If we found too many reclaimable packets schedule a timer in the
2024         * near future to continue where we left off.  Otherwise the next timer
2025         * will be at its normal interval.
2026         */
2027        mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2028}
2029
2030/**
2031 *      t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2032 *      @adapter: the adapter
2033 *      @rspq: pointer to to the new rxq's Response Queue to be filled in
2034 *      @iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2035 *      @dev: the network device associated with the new rspq
2036 *      @intr_dest: MSI-X vector index (overriden in MSI mode)
2037 *      @fl: pointer to the new rxq's Free List to be filled in
2038 *      @hnd: the interrupt handler to invoke for the rspq
2039 */
2040int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2041                       bool iqasynch, struct net_device *dev,
2042                       int intr_dest,
2043                       struct sge_fl *fl, rspq_handler_t hnd)
2044{
2045        struct port_info *pi = netdev_priv(dev);
2046        struct fw_iq_cmd cmd, rpl;
2047        int ret, iqandst, flsz = 0;
2048
2049        /*
2050         * If we're using MSI interrupts and we're not initializing the
2051         * Forwarded Interrupt Queue itself, then set up this queue for
2052         * indirect interrupts to the Forwarded Interrupt Queue.  Obviously
2053         * the Forwarded Interrupt Queue must be set up before any other
2054         * ingress queue ...
2055         */
2056        if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
2057                iqandst = SGE_INTRDST_IQ;
2058                intr_dest = adapter->sge.intrq.abs_id;
2059        } else
2060                iqandst = SGE_INTRDST_PCI;
2061
2062        /*
2063         * Allocate the hardware ring for the Response Queue.  The size needs
2064         * to be a multiple of 16 which includes the mandatory status entry
2065         * (regardless of whether the Status Page capabilities are enabled or
2066         * not).
2067         */
2068        rspq->size = roundup(rspq->size, 16);
2069        rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2070                                0, &rspq->phys_addr, NULL, 0);
2071        if (!rspq->desc)
2072                return -ENOMEM;
2073
2074        /*
2075         * Fill in the Ingress Queue Command.  Note: Ideally this code would
2076         * be in t4vf_hw.c but there are so many parameters and dependencies
2077         * on our Linux SGE state that we would end up having to pass tons of
2078         * parameters.  We'll have to think about how this might be migrated
2079         * into OS-independent common code ...
2080         */
2081        memset(&cmd, 0, sizeof(cmd));
2082        cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_IQ_CMD) |
2083                                    FW_CMD_REQUEST |
2084                                    FW_CMD_WRITE |
2085                                    FW_CMD_EXEC);
2086        cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC |
2087                                         FW_IQ_CMD_IQSTART(1) |
2088                                         FW_LEN16(cmd));
2089        cmd.type_to_iqandstindex =
2090                cpu_to_be32(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
2091                            FW_IQ_CMD_IQASYNCH(iqasynch) |
2092                            FW_IQ_CMD_VIID(pi->viid) |
2093                            FW_IQ_CMD_IQANDST(iqandst) |
2094                            FW_IQ_CMD_IQANUS(1) |
2095                            FW_IQ_CMD_IQANUD(SGE_UPDATEDEL_INTR) |
2096                            FW_IQ_CMD_IQANDSTINDEX(intr_dest));
2097        cmd.iqdroprss_to_iqesize =
2098                cpu_to_be16(FW_IQ_CMD_IQPCIECH(pi->port_id) |
2099                            FW_IQ_CMD_IQGTSMODE |
2100                            FW_IQ_CMD_IQINTCNTTHRESH(rspq->pktcnt_idx) |
2101                            FW_IQ_CMD_IQESIZE(ilog2(rspq->iqe_len) - 4));
2102        cmd.iqsize = cpu_to_be16(rspq->size);
2103        cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2104
2105        if (fl) {
2106                /*
2107                 * Allocate the ring for the hardware free list (with space
2108                 * for its status page) along with the associated software
2109                 * descriptor ring.  The free list size needs to be a multiple
2110                 * of the Egress Queue Unit.
2111                 */
2112                fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2113                fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2114                                      sizeof(__be64), sizeof(struct rx_sw_desc),
2115                                      &fl->addr, &fl->sdesc, STAT_LEN);
2116                if (!fl->desc) {
2117                        ret = -ENOMEM;
2118                        goto err;
2119                }
2120
2121                /*
2122                 * Calculate the size of the hardware free list ring plus
2123                 * Status Page (which the SGE will place after the end of the
2124                 * free list ring) in Egress Queue Units.
2125                 */
2126                flsz = (fl->size / FL_PER_EQ_UNIT +
2127                        STAT_LEN / EQ_UNIT);
2128
2129                /*
2130                 * Fill in all the relevant firmware Ingress Queue Command
2131                 * fields for the free list.
2132                 */
2133                cmd.iqns_to_fl0congen =
2134                        cpu_to_be32(
2135                                FW_IQ_CMD_FL0HOSTFCMODE(SGE_HOSTFCMODE_NONE) |
2136                                FW_IQ_CMD_FL0PACKEN(1) |
2137                                FW_IQ_CMD_FL0PADEN(1));
2138                cmd.fl0dcaen_to_fl0cidxfthresh =
2139                        cpu_to_be16(
2140                                FW_IQ_CMD_FL0FBMIN(SGE_FETCHBURSTMIN_64B) |
2141                                FW_IQ_CMD_FL0FBMAX(SGE_FETCHBURSTMAX_512B));
2142                cmd.fl0size = cpu_to_be16(flsz);
2143                cmd.fl0addr = cpu_to_be64(fl->addr);
2144        }
2145
2146        /*
2147         * Issue the firmware Ingress Queue Command and extract the results if
2148         * it completes successfully.
2149         */
2150        ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2151        if (ret)
2152                goto err;
2153
2154        netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2155        rspq->cur_desc = rspq->desc;
2156        rspq->cidx = 0;
2157        rspq->gen = 1;
2158        rspq->next_intr_params = rspq->intr_params;
2159        rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2160        rspq->abs_id = be16_to_cpu(rpl.physiqid);
2161        rspq->size--;                   /* subtract status entry */
2162        rspq->adapter = adapter;
2163        rspq->netdev = dev;
2164        rspq->handler = hnd;
2165
2166        /* set offset to -1 to distinguish ingress queues without FL */
2167        rspq->offset = fl ? 0 : -1;
2168
2169        if (fl) {
2170                fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2171                fl->avail = 0;
2172                fl->pend_cred = 0;
2173                fl->pidx = 0;
2174                fl->cidx = 0;
2175                fl->alloc_failed = 0;
2176                fl->large_alloc_failed = 0;
2177                fl->starving = 0;
2178                refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2179        }
2180
2181        return 0;
2182
2183err:
2184        /*
2185         * An error occurred.  Clean up our partial allocation state and
2186         * return the error.
2187         */
2188        if (rspq->desc) {
2189                dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2190                                  rspq->desc, rspq->phys_addr);
2191                rspq->desc = NULL;
2192        }
2193        if (fl && fl->desc) {
2194                kfree(fl->sdesc);
2195                fl->sdesc = NULL;
2196                dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2197                                  fl->desc, fl->addr);
2198                fl->desc = NULL;
2199        }
2200        return ret;
2201}
2202
2203/**
2204 *      t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2205 *      @adapter: the adapter
2206 *      @txq: pointer to the new txq to be filled in
2207 *      @devq: the network TX queue associated with the new txq
2208 *      @iqid: the relative ingress queue ID to which events relating to
2209 *              the new txq should be directed
2210 */
2211int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2212                           struct net_device *dev, struct netdev_queue *devq,
2213                           unsigned int iqid)
2214{
2215        int ret, nentries;
2216        struct fw_eq_eth_cmd cmd, rpl;
2217        struct port_info *pi = netdev_priv(dev);
2218
2219        /*
2220         * Calculate the size of the hardware TX Queue (including the Status
2221         * Page on the end of the TX Queue) in units of TX Descriptors.
2222         */
2223        nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2224
2225        /*
2226         * Allocate the hardware ring for the TX ring (with space for its
2227         * status page) along with the associated software descriptor ring.
2228         */
2229        txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2230                                 sizeof(struct tx_desc),
2231                                 sizeof(struct tx_sw_desc),
2232                                 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
2233        if (!txq->q.desc)
2234                return -ENOMEM;
2235
2236        /*
2237         * Fill in the Egress Queue Command.  Note: As with the direct use of
2238         * the firmware Ingress Queue COmmand above in our RXQ allocation
2239         * routine, ideally, this code would be in t4vf_hw.c.  Again, we'll
2240         * have to see if there's some reasonable way to parameterize it
2241         * into the common code ...
2242         */
2243        memset(&cmd, 0, sizeof(cmd));
2244        cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_EQ_ETH_CMD) |
2245                                    FW_CMD_REQUEST |
2246                                    FW_CMD_WRITE |
2247                                    FW_CMD_EXEC);
2248        cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
2249                                         FW_EQ_ETH_CMD_EQSTART |
2250                                         FW_LEN16(cmd));
2251        cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid));
2252        cmd.fetchszm_to_iqid =
2253                cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
2254                            FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
2255                            FW_EQ_ETH_CMD_IQID(iqid));
2256        cmd.dcaen_to_eqsize =
2257                cpu_to_be32(FW_EQ_ETH_CMD_FBMIN(SGE_FETCHBURSTMIN_64B) |
2258                            FW_EQ_ETH_CMD_FBMAX(SGE_FETCHBURSTMAX_512B) |
2259                            FW_EQ_ETH_CMD_CIDXFTHRESH(SGE_CIDXFLUSHTHRESH_32) |
2260                            FW_EQ_ETH_CMD_EQSIZE(nentries));
2261        cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2262
2263        /*
2264         * Issue the firmware Egress Queue Command and extract the results if
2265         * it completes successfully.
2266         */
2267        ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2268        if (ret) {
2269                /*
2270                 * The girmware Ingress Queue Command failed for some reason.
2271                 * Free up our partial allocation state and return the error.
2272                 */
2273                kfree(txq->q.sdesc);
2274                txq->q.sdesc = NULL;
2275                dma_free_coherent(adapter->pdev_dev,
2276                                  nentries * sizeof(struct tx_desc),
2277                                  txq->q.desc, txq->q.phys_addr);
2278                txq->q.desc = NULL;
2279                return ret;
2280        }
2281
2282        txq->q.in_use = 0;
2283        txq->q.cidx = 0;
2284        txq->q.pidx = 0;
2285        txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2286        txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_GET(be32_to_cpu(rpl.eqid_pkd));
2287        txq->q.abs_id =
2288                FW_EQ_ETH_CMD_PHYSEQID_GET(be32_to_cpu(rpl.physeqid_pkd));
2289        txq->txq = devq;
2290        txq->tso = 0;
2291        txq->tx_cso = 0;
2292        txq->vlan_ins = 0;
2293        txq->q.stops = 0;
2294        txq->q.restarts = 0;
2295        txq->mapping_err = 0;
2296        return 0;
2297}
2298
2299/*
2300 * Free the DMA map resources associated with a TX queue.
2301 */
2302static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2303{
2304        dma_free_coherent(adapter->pdev_dev,
2305                          tq->size * sizeof(*tq->desc) + STAT_LEN,
2306                          tq->desc, tq->phys_addr);
2307        tq->cntxt_id = 0;
2308        tq->sdesc = NULL;
2309        tq->desc = NULL;
2310}
2311
2312/*
2313 * Free the resources associated with a response queue (possibly including a
2314 * free list).
2315 */
2316static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2317                         struct sge_fl *fl)
2318{
2319        unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2320
2321        t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2322                     rspq->cntxt_id, flid, 0xffff);
2323        dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2324                          rspq->desc, rspq->phys_addr);
2325        netif_napi_del(&rspq->napi);
2326        rspq->netdev = NULL;
2327        rspq->cntxt_id = 0;
2328        rspq->abs_id = 0;
2329        rspq->desc = NULL;
2330
2331        if (fl) {
2332                free_rx_bufs(adapter, fl, fl->avail);
2333                dma_free_coherent(adapter->pdev_dev,
2334                                  fl->size * sizeof(*fl->desc) + STAT_LEN,
2335                                  fl->desc, fl->addr);
2336                kfree(fl->sdesc);
2337                fl->sdesc = NULL;
2338                fl->cntxt_id = 0;
2339                fl->desc = NULL;
2340        }
2341}
2342
2343/**
2344 *      t4vf_free_sge_resources - free SGE resources
2345 *      @adapter: the adapter
2346 *
2347 *      Frees resources used by the SGE queue sets.
2348 */
2349void t4vf_free_sge_resources(struct adapter *adapter)
2350{
2351        struct sge *s = &adapter->sge;
2352        struct sge_eth_rxq *rxq = s->ethrxq;
2353        struct sge_eth_txq *txq = s->ethtxq;
2354        struct sge_rspq *evtq = &s->fw_evtq;
2355        struct sge_rspq *intrq = &s->intrq;
2356        int qs;
2357
2358        for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2359                if (rxq->rspq.desc)
2360                        free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2361                if (txq->q.desc) {
2362                        t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2363                        free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2364                        kfree(txq->q.sdesc);
2365                        free_txq(adapter, &txq->q);
2366                }
2367        }
2368        if (evtq->desc)
2369                free_rspq_fl(adapter, evtq, NULL);
2370        if (intrq->desc)
2371                free_rspq_fl(adapter, intrq, NULL);
2372}
2373
2374/**
2375 *      t4vf_sge_start - enable SGE operation
2376 *      @adapter: the adapter
2377 *
2378 *      Start tasklets and timers associated with the DMA engine.
2379 */
2380void t4vf_sge_start(struct adapter *adapter)
2381{
2382        adapter->sge.ethtxq_rover = 0;
2383        mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2384        mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2385}
2386
2387/**
2388 *      t4vf_sge_stop - disable SGE operation
2389 *      @adapter: the adapter
2390 *
2391 *      Stop tasklets and timers associated with the DMA engine.  Note that
2392 *      this is effective only if measures have been taken to disable any HW
2393 *      events that may restart them.
2394 */
2395void t4vf_sge_stop(struct adapter *adapter)
2396{
2397        struct sge *s = &adapter->sge;
2398
2399        if (s->rx_timer.function)
2400                del_timer_sync(&s->rx_timer);
2401        if (s->tx_timer.function)
2402                del_timer_sync(&s->tx_timer);
2403}
2404
2405/**
2406 *      t4vf_sge_init - initialize SGE
2407 *      @adapter: the adapter
2408 *
2409 *      Performs SGE initialization needed every time after a chip reset.
2410 *      We do not initialize any of the queue sets here, instead the driver
2411 *      top-level must request those individually.  We also do not enable DMA
2412 *      here, that should be done after the queues have been set up.
2413 */
2414int t4vf_sge_init(struct adapter *adapter)
2415{
2416        struct sge_params *sge_params = &adapter->params.sge;
2417        u32 fl0 = sge_params->sge_fl_buffer_size[0];
2418        u32 fl1 = sge_params->sge_fl_buffer_size[1];
2419        struct sge *s = &adapter->sge;
2420
2421        /*
2422         * Start by vetting the basic SGE parameters which have been set up by
2423         * the Physical Function Driver.  Ideally we should be able to deal
2424         * with _any_ configuration.  Practice is different ...
2425         */
2426        if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2427                dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2428                        fl0, fl1);
2429                return -EINVAL;
2430        }
2431        if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
2432                dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2433                return -EINVAL;
2434        }
2435
2436        /*
2437         * Now translate the adapter parameters into our internal forms.
2438         */
2439        if (fl1)
2440                FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
2441        STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2442                    ? 128 : 64);
2443        PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
2444        FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2445                         SGE_INGPADBOUNDARY_SHIFT);
2446
2447        /*
2448         * Set up tasklet timers.
2449         */
2450        setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
2451        setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
2452
2453        /*
2454         * Initialize Forwarded Interrupt Queue lock.
2455         */
2456        spin_lock_init(&s->intrq_lock);
2457
2458        return 0;
2459}
2460