linux/drivers/net/cxgb4/cxgb4_main.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/bitmap.h>
  38#include <linux/crc32.h>
  39#include <linux/ctype.h>
  40#include <linux/debugfs.h>
  41#include <linux/err.h>
  42#include <linux/etherdevice.h>
  43#include <linux/firmware.h>
  44#include <linux/if_vlan.h>
  45#include <linux/init.h>
  46#include <linux/log2.h>
  47#include <linux/mdio.h>
  48#include <linux/module.h>
  49#include <linux/moduleparam.h>
  50#include <linux/mutex.h>
  51#include <linux/netdevice.h>
  52#include <linux/pci.h>
  53#include <linux/aer.h>
  54#include <linux/rtnetlink.h>
  55#include <linux/sched.h>
  56#include <linux/seq_file.h>
  57#include <linux/sockios.h>
  58#include <linux/vmalloc.h>
  59#include <linux/workqueue.h>
  60#include <net/neighbour.h>
  61#include <net/netevent.h>
  62#include <asm/uaccess.h>
  63
  64#include "cxgb4.h"
  65#include "t4_regs.h"
  66#include "t4_msg.h"
  67#include "t4fw_api.h"
  68#include "l2t.h"
  69
  70#define DRV_VERSION "1.3.0-ko"
  71#define DRV_DESC "Chelsio T4 Network Driver"
  72
  73/*
  74 * Max interrupt hold-off timer value in us.  Queues fall back to this value
  75 * under extreme memory pressure so it's largish to give the system time to
  76 * recover.
  77 */
  78#define MAX_SGE_TIMERVAL 200U
  79
  80#ifdef CONFIG_PCI_IOV
  81/*
  82 * Virtual Function provisioning constants.  We need two extra Ingress Queues
  83 * with Interrupt capability to serve as the VF's Firmware Event Queue and
  84 * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
  85 * Lists associated with them).  For each Ethernet/Control Egress Queue and
  86 * for each Free List, we need an Egress Context.
  87 */
  88enum {
  89        VFRES_NPORTS = 1,               /* # of "ports" per VF */
  90        VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
  91
  92        VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
  93        VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
  94        VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
  95        VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
  96        VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
  97        VFRES_TC = 0,                   /* PCI-E traffic class */
  98        VFRES_NEXACTF = 16,             /* # of exact MPS filters */
  99
 100        VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
 101        VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
 102};
 103
 104/*
 105 * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
 106 * static and likely not to be useful in the long run.  We really need to
 107 * implement some form of persistent configuration which the firmware
 108 * controls.
 109 */
 110static unsigned int pfvfres_pmask(struct adapter *adapter,
 111                                  unsigned int pf, unsigned int vf)
 112{
 113        unsigned int portn, portvec;
 114
 115        /*
 116         * Give PF's access to all of the ports.
 117         */
 118        if (vf == 0)
 119                return FW_PFVF_CMD_PMASK_MASK;
 120
 121        /*
 122         * For VFs, we'll assign them access to the ports based purely on the
 123         * PF.  We assign active ports in order, wrapping around if there are
 124         * fewer active ports than PFs: e.g. active port[pf % nports].
 125         * Unfortunately the adapter's port_info structs haven't been
 126         * initialized yet so we have to compute this.
 127         */
 128        if (adapter->params.nports == 0)
 129                return 0;
 130
 131        portn = pf % adapter->params.nports;
 132        portvec = adapter->params.portvec;
 133        for (;;) {
 134                /*
 135                 * Isolate the lowest set bit in the port vector.  If we're at
 136                 * the port number that we want, return that as the pmask.
 137                 * otherwise mask that bit out of the port vector and
 138                 * decrement our port number ...
 139                 */
 140                unsigned int pmask = portvec ^ (portvec & (portvec-1));
 141                if (portn == 0)
 142                        return pmask;
 143                portn--;
 144                portvec &= ~pmask;
 145        }
 146        /*NOTREACHED*/
 147}
 148#endif
 149
 150enum {
 151        MEMWIN0_APERTURE = 65536,
 152        MEMWIN0_BASE     = 0x30000,
 153        MEMWIN1_APERTURE = 32768,
 154        MEMWIN1_BASE     = 0x28000,
 155        MEMWIN2_APERTURE = 2048,
 156        MEMWIN2_BASE     = 0x1b800,
 157};
 158
 159enum {
 160        MAX_TXQ_ENTRIES      = 16384,
 161        MAX_CTRL_TXQ_ENTRIES = 1024,
 162        MAX_RSPQ_ENTRIES     = 16384,
 163        MAX_RX_BUFFERS       = 16384,
 164        MIN_TXQ_ENTRIES      = 32,
 165        MIN_CTRL_TXQ_ENTRIES = 32,
 166        MIN_RSPQ_ENTRIES     = 128,
 167        MIN_FL_ENTRIES       = 16
 168};
 169
 170#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 171                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 172                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 173
 174#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
 175
 176static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
 177        CH_DEVICE(0xa000, 0),  /* PE10K */
 178        CH_DEVICE(0x4001, -1),
 179        CH_DEVICE(0x4002, -1),
 180        CH_DEVICE(0x4003, -1),
 181        CH_DEVICE(0x4004, -1),
 182        CH_DEVICE(0x4005, -1),
 183        CH_DEVICE(0x4006, -1),
 184        CH_DEVICE(0x4007, -1),
 185        CH_DEVICE(0x4008, -1),
 186        CH_DEVICE(0x4009, -1),
 187        CH_DEVICE(0x400a, -1),
 188        CH_DEVICE(0x4401, 4),
 189        CH_DEVICE(0x4402, 4),
 190        CH_DEVICE(0x4403, 4),
 191        CH_DEVICE(0x4404, 4),
 192        CH_DEVICE(0x4405, 4),
 193        CH_DEVICE(0x4406, 4),
 194        CH_DEVICE(0x4407, 4),
 195        CH_DEVICE(0x4408, 4),
 196        CH_DEVICE(0x4409, 4),
 197        CH_DEVICE(0x440a, 4),
 198        { 0, }
 199};
 200
 201#define FW_FNAME "cxgb4/t4fw.bin"
 202
 203MODULE_DESCRIPTION(DRV_DESC);
 204MODULE_AUTHOR("Chelsio Communications");
 205MODULE_LICENSE("Dual BSD/GPL");
 206MODULE_VERSION(DRV_VERSION);
 207MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 208MODULE_FIRMWARE(FW_FNAME);
 209
 210static int dflt_msg_enable = DFLT_MSG_ENABLE;
 211
 212module_param(dflt_msg_enable, int, 0644);
 213MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
 214
 215/*
 216 * The driver uses the best interrupt scheme available on a platform in the
 217 * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 218 * of these schemes the driver may consider as follows:
 219 *
 220 * msi = 2: choose from among all three options
 221 * msi = 1: only consider MSI and INTx interrupts
 222 * msi = 0: force INTx interrupts
 223 */
 224static int msi = 2;
 225
 226module_param(msi, int, 0644);
 227MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
 228
 229/*
 230 * Queue interrupt hold-off timer values.  Queues default to the first of these
 231 * upon creation.
 232 */
 233static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
 234
 235module_param_array(intr_holdoff, uint, NULL, 0644);
 236MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
 237                 "0..4 in microseconds");
 238
 239static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
 240
 241module_param_array(intr_cnt, uint, NULL, 0644);
 242MODULE_PARM_DESC(intr_cnt,
 243                 "thresholds 1..3 for queue interrupt packet counters");
 244
 245static int vf_acls;
 246
 247#ifdef CONFIG_PCI_IOV
 248module_param(vf_acls, bool, 0644);
 249MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
 250
 251static unsigned int num_vf[4];
 252
 253module_param_array(num_vf, uint, NULL, 0644);
 254MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
 255#endif
 256
 257static struct dentry *cxgb4_debugfs_root;
 258
 259static LIST_HEAD(adapter_list);
 260static DEFINE_MUTEX(uld_mutex);
 261static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
 262static const char *uld_str[] = { "RDMA", "iSCSI" };
 263
 264static void link_report(struct net_device *dev)
 265{
 266        if (!netif_carrier_ok(dev))
 267                netdev_info(dev, "link down\n");
 268        else {
 269                static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
 270
 271                const char *s = "10Mbps";
 272                const struct port_info *p = netdev_priv(dev);
 273
 274                switch (p->link_cfg.speed) {
 275                case SPEED_10000:
 276                        s = "10Gbps";
 277                        break;
 278                case SPEED_1000:
 279                        s = "1000Mbps";
 280                        break;
 281                case SPEED_100:
 282                        s = "100Mbps";
 283                        break;
 284                }
 285
 286                netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
 287                            fc[p->link_cfg.fc]);
 288        }
 289}
 290
 291void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 292{
 293        struct net_device *dev = adapter->port[port_id];
 294
 295        /* Skip changes from disabled ports. */
 296        if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
 297                if (link_stat)
 298                        netif_carrier_on(dev);
 299                else
 300                        netif_carrier_off(dev);
 301
 302                link_report(dev);
 303        }
 304}
 305
 306void t4_os_portmod_changed(const struct adapter *adap, int port_id)
 307{
 308        static const char *mod_str[] = {
 309                NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
 310        };
 311
 312        const struct net_device *dev = adap->port[port_id];
 313        const struct port_info *pi = netdev_priv(dev);
 314
 315        if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
 316                netdev_info(dev, "port module unplugged\n");
 317        else if (pi->mod_type < ARRAY_SIZE(mod_str))
 318                netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
 319}
 320
 321/*
 322 * Configure the exact and hash address filters to handle a port's multicast
 323 * and secondary unicast MAC addresses.
 324 */
 325static int set_addr_filters(const struct net_device *dev, bool sleep)
 326{
 327        u64 mhash = 0;
 328        u64 uhash = 0;
 329        bool free = true;
 330        u16 filt_idx[7];
 331        const u8 *addr[7];
 332        int ret, naddr = 0;
 333        const struct netdev_hw_addr *ha;
 334        int uc_cnt = netdev_uc_count(dev);
 335        int mc_cnt = netdev_mc_count(dev);
 336        const struct port_info *pi = netdev_priv(dev);
 337        unsigned int mb = pi->adapter->fn;
 338
 339        /* first do the secondary unicast addresses */
 340        netdev_for_each_uc_addr(ha, dev) {
 341                addr[naddr++] = ha->addr;
 342                if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
 343                        ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 344                                        naddr, addr, filt_idx, &uhash, sleep);
 345                        if (ret < 0)
 346                                return ret;
 347
 348                        free = false;
 349                        naddr = 0;
 350                }
 351        }
 352
 353        /* next set up the multicast addresses */
 354        netdev_for_each_mc_addr(ha, dev) {
 355                addr[naddr++] = ha->addr;
 356                if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
 357                        ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 358                                        naddr, addr, filt_idx, &mhash, sleep);
 359                        if (ret < 0)
 360                                return ret;
 361
 362                        free = false;
 363                        naddr = 0;
 364                }
 365        }
 366
 367        return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
 368                                uhash | mhash, sleep);
 369}
 370
 371/*
 372 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 373 * If @mtu is -1 it is left unchanged.
 374 */
 375static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
 376{
 377        int ret;
 378        struct port_info *pi = netdev_priv(dev);
 379
 380        ret = set_addr_filters(dev, sleep_ok);
 381        if (ret == 0)
 382                ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
 383                                    (dev->flags & IFF_PROMISC) ? 1 : 0,
 384                                    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
 385                                    sleep_ok);
 386        return ret;
 387}
 388
 389/**
 390 *      link_start - enable a port
 391 *      @dev: the port to enable
 392 *
 393 *      Performs the MAC and PHY actions needed to enable a port.
 394 */
 395static int link_start(struct net_device *dev)
 396{
 397        int ret;
 398        struct port_info *pi = netdev_priv(dev);
 399        unsigned int mb = pi->adapter->fn;
 400
 401        /*
 402         * We do not set address filters and promiscuity here, the stack does
 403         * that step explicitly.
 404         */
 405        ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
 406                            !!(dev->features & NETIF_F_HW_VLAN_RX), true);
 407        if (ret == 0) {
 408                ret = t4_change_mac(pi->adapter, mb, pi->viid,
 409                                    pi->xact_addr_filt, dev->dev_addr, true,
 410                                    true);
 411                if (ret >= 0) {
 412                        pi->xact_addr_filt = ret;
 413                        ret = 0;
 414                }
 415        }
 416        if (ret == 0)
 417                ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
 418                                    &pi->link_cfg);
 419        if (ret == 0)
 420                ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
 421        return ret;
 422}
 423
 424/*
 425 * Response queue handler for the FW event queue.
 426 */
 427static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 428                          const struct pkt_gl *gl)
 429{
 430        u8 opcode = ((const struct rss_header *)rsp)->opcode;
 431
 432        rsp++;                                          /* skip RSS header */
 433        if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
 434                const struct cpl_sge_egr_update *p = (void *)rsp;
 435                unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
 436                struct sge_txq *txq;
 437
 438                txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
 439                txq->restarts++;
 440                if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
 441                        struct sge_eth_txq *eq;
 442
 443                        eq = container_of(txq, struct sge_eth_txq, q);
 444                        netif_tx_wake_queue(eq->txq);
 445                } else {
 446                        struct sge_ofld_txq *oq;
 447
 448                        oq = container_of(txq, struct sge_ofld_txq, q);
 449                        tasklet_schedule(&oq->qresume_tsk);
 450                }
 451        } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
 452                const struct cpl_fw6_msg *p = (void *)rsp;
 453
 454                if (p->type == 0)
 455                        t4_handle_fw_rpl(q->adap, p->data);
 456        } else if (opcode == CPL_L2T_WRITE_RPL) {
 457                const struct cpl_l2t_write_rpl *p = (void *)rsp;
 458
 459                do_l2t_write_rpl(q->adap, p);
 460        } else
 461                dev_err(q->adap->pdev_dev,
 462                        "unexpected CPL %#x on FW event queue\n", opcode);
 463        return 0;
 464}
 465
 466/**
 467 *      uldrx_handler - response queue handler for ULD queues
 468 *      @q: the response queue that received the packet
 469 *      @rsp: the response queue descriptor holding the offload message
 470 *      @gl: the gather list of packet fragments
 471 *
 472 *      Deliver an ingress offload packet to a ULD.  All processing is done by
 473 *      the ULD, we just maintain statistics.
 474 */
 475static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
 476                         const struct pkt_gl *gl)
 477{
 478        struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
 479
 480        if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
 481                rxq->stats.nomem++;
 482                return -1;
 483        }
 484        if (gl == NULL)
 485                rxq->stats.imm++;
 486        else if (gl == CXGB4_MSG_AN)
 487                rxq->stats.an++;
 488        else
 489                rxq->stats.pkts++;
 490        return 0;
 491}
 492
 493static void disable_msi(struct adapter *adapter)
 494{
 495        if (adapter->flags & USING_MSIX) {
 496                pci_disable_msix(adapter->pdev);
 497                adapter->flags &= ~USING_MSIX;
 498        } else if (adapter->flags & USING_MSI) {
 499                pci_disable_msi(adapter->pdev);
 500                adapter->flags &= ~USING_MSI;
 501        }
 502}
 503
 504/*
 505 * Interrupt handler for non-data events used with MSI-X.
 506 */
 507static irqreturn_t t4_nondata_intr(int irq, void *cookie)
 508{
 509        struct adapter *adap = cookie;
 510
 511        u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
 512        if (v & PFSW) {
 513                adap->swintr = 1;
 514                t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
 515        }
 516        t4_slow_intr_handler(adap);
 517        return IRQ_HANDLED;
 518}
 519
 520/*
 521 * Name the MSI-X interrupts.
 522 */
 523static void name_msix_vecs(struct adapter *adap)
 524{
 525        int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
 526
 527        /* non-data interrupts */
 528        snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
 529
 530        /* FW events */
 531        snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
 532                 adap->port[0]->name);
 533
 534        /* Ethernet queues */
 535        for_each_port(adap, j) {
 536                struct net_device *d = adap->port[j];
 537                const struct port_info *pi = netdev_priv(d);
 538
 539                for (i = 0; i < pi->nqsets; i++, msi_idx++)
 540                        snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
 541                                 d->name, i);
 542        }
 543
 544        /* offload queues */
 545        for_each_ofldrxq(&adap->sge, i)
 546                snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
 547                         adap->port[0]->name, i);
 548
 549        for_each_rdmarxq(&adap->sge, i)
 550                snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
 551                         adap->port[0]->name, i);
 552}
 553
 554static int request_msix_queue_irqs(struct adapter *adap)
 555{
 556        struct sge *s = &adap->sge;
 557        int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
 558
 559        err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
 560                          adap->msix_info[1].desc, &s->fw_evtq);
 561        if (err)
 562                return err;
 563
 564        for_each_ethrxq(s, ethqidx) {
 565                err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
 566                                  adap->msix_info[msi].desc,
 567                                  &s->ethrxq[ethqidx].rspq);
 568                if (err)
 569                        goto unwind;
 570                msi++;
 571        }
 572        for_each_ofldrxq(s, ofldqidx) {
 573                err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
 574                                  adap->msix_info[msi].desc,
 575                                  &s->ofldrxq[ofldqidx].rspq);
 576                if (err)
 577                        goto unwind;
 578                msi++;
 579        }
 580        for_each_rdmarxq(s, rdmaqidx) {
 581                err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
 582                                  adap->msix_info[msi].desc,
 583                                  &s->rdmarxq[rdmaqidx].rspq);
 584                if (err)
 585                        goto unwind;
 586                msi++;
 587        }
 588        return 0;
 589
 590unwind:
 591        while (--rdmaqidx >= 0)
 592                free_irq(adap->msix_info[--msi].vec,
 593                         &s->rdmarxq[rdmaqidx].rspq);
 594        while (--ofldqidx >= 0)
 595                free_irq(adap->msix_info[--msi].vec,
 596                         &s->ofldrxq[ofldqidx].rspq);
 597        while (--ethqidx >= 0)
 598                free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
 599        free_irq(adap->msix_info[1].vec, &s->fw_evtq);
 600        return err;
 601}
 602
 603static void free_msix_queue_irqs(struct adapter *adap)
 604{
 605        int i, msi = 2;
 606        struct sge *s = &adap->sge;
 607
 608        free_irq(adap->msix_info[1].vec, &s->fw_evtq);
 609        for_each_ethrxq(s, i)
 610                free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
 611        for_each_ofldrxq(s, i)
 612                free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
 613        for_each_rdmarxq(s, i)
 614                free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
 615}
 616
 617/**
 618 *      write_rss - write the RSS table for a given port
 619 *      @pi: the port
 620 *      @queues: array of queue indices for RSS
 621 *
 622 *      Sets up the portion of the HW RSS table for the port's VI to distribute
 623 *      packets to the Rx queues in @queues.
 624 */
 625static int write_rss(const struct port_info *pi, const u16 *queues)
 626{
 627        u16 *rss;
 628        int i, err;
 629        const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
 630
 631        rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
 632        if (!rss)
 633                return -ENOMEM;
 634
 635        /* map the queue indices to queue ids */
 636        for (i = 0; i < pi->rss_size; i++, queues++)
 637                rss[i] = q[*queues].rspq.abs_id;
 638
 639        err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
 640                                  pi->rss_size, rss, pi->rss_size);
 641        kfree(rss);
 642        return err;
 643}
 644
 645/**
 646 *      setup_rss - configure RSS
 647 *      @adap: the adapter
 648 *
 649 *      Sets up RSS for each port.
 650 */
 651static int setup_rss(struct adapter *adap)
 652{
 653        int i, err;
 654
 655        for_each_port(adap, i) {
 656                const struct port_info *pi = adap2pinfo(adap, i);
 657
 658                err = write_rss(pi, pi->rss);
 659                if (err)
 660                        return err;
 661        }
 662        return 0;
 663}
 664
 665/*
 666 * Return the channel of the ingress queue with the given qid.
 667 */
 668static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
 669{
 670        qid -= p->ingr_start;
 671        return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
 672}
 673
 674/*
 675 * Wait until all NAPI handlers are descheduled.
 676 */
 677static void quiesce_rx(struct adapter *adap)
 678{
 679        int i;
 680
 681        for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
 682                struct sge_rspq *q = adap->sge.ingr_map[i];
 683
 684                if (q && q->handler)
 685                        napi_disable(&q->napi);
 686        }
 687}
 688
 689/*
 690 * Enable NAPI scheduling and interrupt generation for all Rx queues.
 691 */
 692static void enable_rx(struct adapter *adap)
 693{
 694        int i;
 695
 696        for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
 697                struct sge_rspq *q = adap->sge.ingr_map[i];
 698
 699                if (!q)
 700                        continue;
 701                if (q->handler)
 702                        napi_enable(&q->napi);
 703                /* 0-increment GTS to start the timer and enable interrupts */
 704                t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
 705                             SEINTARM(q->intr_params) |
 706                             INGRESSQID(q->cntxt_id));
 707        }
 708}
 709
 710/**
 711 *      setup_sge_queues - configure SGE Tx/Rx/response queues
 712 *      @adap: the adapter
 713 *
 714 *      Determines how many sets of SGE queues to use and initializes them.
 715 *      We support multiple queue sets per port if we have MSI-X, otherwise
 716 *      just one queue set per port.
 717 */
 718static int setup_sge_queues(struct adapter *adap)
 719{
 720        int err, msi_idx, i, j;
 721        struct sge *s = &adap->sge;
 722
 723        bitmap_zero(s->starving_fl, MAX_EGRQ);
 724        bitmap_zero(s->txq_maperr, MAX_EGRQ);
 725
 726        if (adap->flags & USING_MSIX)
 727                msi_idx = 1;         /* vector 0 is for non-queue interrupts */
 728        else {
 729                err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
 730                                       NULL, NULL);
 731                if (err)
 732                        return err;
 733                msi_idx = -((int)s->intrq.abs_id + 1);
 734        }
 735
 736        err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
 737                               msi_idx, NULL, fwevtq_handler);
 738        if (err) {
 739freeout:        t4_free_sge_resources(adap);
 740                return err;
 741        }
 742
 743        for_each_port(adap, i) {
 744                struct net_device *dev = adap->port[i];
 745                struct port_info *pi = netdev_priv(dev);
 746                struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
 747                struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
 748
 749                for (j = 0; j < pi->nqsets; j++, q++) {
 750                        if (msi_idx > 0)
 751                                msi_idx++;
 752                        err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
 753                                               msi_idx, &q->fl,
 754                                               t4_ethrx_handler);
 755                        if (err)
 756                                goto freeout;
 757                        q->rspq.idx = j;
 758                        memset(&q->stats, 0, sizeof(q->stats));
 759                }
 760                for (j = 0; j < pi->nqsets; j++, t++) {
 761                        err = t4_sge_alloc_eth_txq(adap, t, dev,
 762                                        netdev_get_tx_queue(dev, j),
 763                                        s->fw_evtq.cntxt_id);
 764                        if (err)
 765                                goto freeout;
 766                }
 767        }
 768
 769        j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
 770        for_each_ofldrxq(s, i) {
 771                struct sge_ofld_rxq *q = &s->ofldrxq[i];
 772                struct net_device *dev = adap->port[i / j];
 773
 774                if (msi_idx > 0)
 775                        msi_idx++;
 776                err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
 777                                       &q->fl, uldrx_handler);
 778                if (err)
 779                        goto freeout;
 780                memset(&q->stats, 0, sizeof(q->stats));
 781                s->ofld_rxq[i] = q->rspq.abs_id;
 782                err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
 783                                            s->fw_evtq.cntxt_id);
 784                if (err)
 785                        goto freeout;
 786        }
 787
 788        for_each_rdmarxq(s, i) {
 789                struct sge_ofld_rxq *q = &s->rdmarxq[i];
 790
 791                if (msi_idx > 0)
 792                        msi_idx++;
 793                err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
 794                                       msi_idx, &q->fl, uldrx_handler);
 795                if (err)
 796                        goto freeout;
 797                memset(&q->stats, 0, sizeof(q->stats));
 798                s->rdma_rxq[i] = q->rspq.abs_id;
 799        }
 800
 801        for_each_port(adap, i) {
 802                /*
 803                 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
 804                 * have RDMA queues, and that's the right value.
 805                 */
 806                err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
 807                                            s->fw_evtq.cntxt_id,
 808                                            s->rdmarxq[i].rspq.cntxt_id);
 809                if (err)
 810                        goto freeout;
 811        }
 812
 813        t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
 814                     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
 815                     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
 816        return 0;
 817}
 818
 819/*
 820 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
 821 * started but failed, and a negative errno if flash load couldn't start.
 822 */
 823static int upgrade_fw(struct adapter *adap)
 824{
 825        int ret;
 826        u32 vers;
 827        const struct fw_hdr *hdr;
 828        const struct firmware *fw;
 829        struct device *dev = adap->pdev_dev;
 830
 831        ret = request_firmware(&fw, FW_FNAME, dev);
 832        if (ret < 0) {
 833                dev_err(dev, "unable to load firmware image " FW_FNAME
 834                        ", error %d\n", ret);
 835                return ret;
 836        }
 837
 838        hdr = (const struct fw_hdr *)fw->data;
 839        vers = ntohl(hdr->fw_ver);
 840        if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
 841                ret = -EINVAL;              /* wrong major version, won't do */
 842                goto out;
 843        }
 844
 845        /*
 846         * If the flash FW is unusable or we found something newer, load it.
 847         */
 848        if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
 849            vers > adap->params.fw_vers) {
 850                ret = -t4_load_fw(adap, fw->data, fw->size);
 851                if (!ret)
 852                        dev_info(dev, "firmware upgraded to version %pI4 from "
 853                                 FW_FNAME "\n", &hdr->fw_ver);
 854        }
 855out:    release_firmware(fw);
 856        return ret;
 857}
 858
 859/*
 860 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
 861 * The allocated memory is cleared.
 862 */
 863void *t4_alloc_mem(size_t size)
 864{
 865        void *p = kzalloc(size, GFP_KERNEL);
 866
 867        if (!p)
 868                p = vzalloc(size);
 869        return p;
 870}
 871
 872/*
 873 * Free memory allocated through alloc_mem().
 874 */
 875static void t4_free_mem(void *addr)
 876{
 877        if (is_vmalloc_addr(addr))
 878                vfree(addr);
 879        else
 880                kfree(addr);
 881}
 882
 883static inline int is_offload(const struct adapter *adap)
 884{
 885        return adap->params.offload;
 886}
 887
 888/*
 889 * Implementation of ethtool operations.
 890 */
 891
 892static u32 get_msglevel(struct net_device *dev)
 893{
 894        return netdev2adap(dev)->msg_enable;
 895}
 896
 897static void set_msglevel(struct net_device *dev, u32 val)
 898{
 899        netdev2adap(dev)->msg_enable = val;
 900}
 901
 902static char stats_strings[][ETH_GSTRING_LEN] = {
 903        "TxOctetsOK         ",
 904        "TxFramesOK         ",
 905        "TxBroadcastFrames  ",
 906        "TxMulticastFrames  ",
 907        "TxUnicastFrames    ",
 908        "TxErrorFrames      ",
 909
 910        "TxFrames64         ",
 911        "TxFrames65To127    ",
 912        "TxFrames128To255   ",
 913        "TxFrames256To511   ",
 914        "TxFrames512To1023  ",
 915        "TxFrames1024To1518 ",
 916        "TxFrames1519ToMax  ",
 917
 918        "TxFramesDropped    ",
 919        "TxPauseFrames      ",
 920        "TxPPP0Frames       ",
 921        "TxPPP1Frames       ",
 922        "TxPPP2Frames       ",
 923        "TxPPP3Frames       ",
 924        "TxPPP4Frames       ",
 925        "TxPPP5Frames       ",
 926        "TxPPP6Frames       ",
 927        "TxPPP7Frames       ",
 928
 929        "RxOctetsOK         ",
 930        "RxFramesOK         ",
 931        "RxBroadcastFrames  ",
 932        "RxMulticastFrames  ",
 933        "RxUnicastFrames    ",
 934
 935        "RxFramesTooLong    ",
 936        "RxJabberErrors     ",
 937        "RxFCSErrors        ",
 938        "RxLengthErrors     ",
 939        "RxSymbolErrors     ",
 940        "RxRuntFrames       ",
 941
 942        "RxFrames64         ",
 943        "RxFrames65To127    ",
 944        "RxFrames128To255   ",
 945        "RxFrames256To511   ",
 946        "RxFrames512To1023  ",
 947        "RxFrames1024To1518 ",
 948        "RxFrames1519ToMax  ",
 949
 950        "RxPauseFrames      ",
 951        "RxPPP0Frames       ",
 952        "RxPPP1Frames       ",
 953        "RxPPP2Frames       ",
 954        "RxPPP3Frames       ",
 955        "RxPPP4Frames       ",
 956        "RxPPP5Frames       ",
 957        "RxPPP6Frames       ",
 958        "RxPPP7Frames       ",
 959
 960        "RxBG0FramesDropped ",
 961        "RxBG1FramesDropped ",
 962        "RxBG2FramesDropped ",
 963        "RxBG3FramesDropped ",
 964        "RxBG0FramesTrunc   ",
 965        "RxBG1FramesTrunc   ",
 966        "RxBG2FramesTrunc   ",
 967        "RxBG3FramesTrunc   ",
 968
 969        "TSO                ",
 970        "TxCsumOffload      ",
 971        "RxCsumGood         ",
 972        "VLANextractions    ",
 973        "VLANinsertions     ",
 974        "GROpackets         ",
 975        "GROmerged          ",
 976};
 977
 978static int get_sset_count(struct net_device *dev, int sset)
 979{
 980        switch (sset) {
 981        case ETH_SS_STATS:
 982                return ARRAY_SIZE(stats_strings);
 983        default:
 984                return -EOPNOTSUPP;
 985        }
 986}
 987
 988#define T4_REGMAP_SIZE (160 * 1024)
 989
 990static int get_regs_len(struct net_device *dev)
 991{
 992        return T4_REGMAP_SIZE;
 993}
 994
 995static int get_eeprom_len(struct net_device *dev)
 996{
 997        return EEPROMSIZE;
 998}
 999
1000static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1001{
1002        struct adapter *adapter = netdev2adap(dev);
1003
1004        strcpy(info->driver, KBUILD_MODNAME);
1005        strcpy(info->version, DRV_VERSION);
1006        strcpy(info->bus_info, pci_name(adapter->pdev));
1007
1008        if (!adapter->params.fw_vers)
1009                strcpy(info->fw_version, "N/A");
1010        else
1011                snprintf(info->fw_version, sizeof(info->fw_version),
1012                        "%u.%u.%u.%u, TP %u.%u.%u.%u",
1013                        FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1014                        FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1015                        FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1016                        FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1017                        FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1018                        FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1019                        FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1020                        FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1021}
1022
1023static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1024{
1025        if (stringset == ETH_SS_STATS)
1026                memcpy(data, stats_strings, sizeof(stats_strings));
1027}
1028
1029/*
1030 * port stats maintained per queue of the port.  They should be in the same
1031 * order as in stats_strings above.
1032 */
1033struct queue_port_stats {
1034        u64 tso;
1035        u64 tx_csum;
1036        u64 rx_csum;
1037        u64 vlan_ex;
1038        u64 vlan_ins;
1039        u64 gro_pkts;
1040        u64 gro_merged;
1041};
1042
1043static void collect_sge_port_stats(const struct adapter *adap,
1044                const struct port_info *p, struct queue_port_stats *s)
1045{
1046        int i;
1047        const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1048        const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1049
1050        memset(s, 0, sizeof(*s));
1051        for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1052                s->tso += tx->tso;
1053                s->tx_csum += tx->tx_cso;
1054                s->rx_csum += rx->stats.rx_cso;
1055                s->vlan_ex += rx->stats.vlan_ex;
1056                s->vlan_ins += tx->vlan_ins;
1057                s->gro_pkts += rx->stats.lro_pkts;
1058                s->gro_merged += rx->stats.lro_merged;
1059        }
1060}
1061
1062static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1063                      u64 *data)
1064{
1065        struct port_info *pi = netdev_priv(dev);
1066        struct adapter *adapter = pi->adapter;
1067
1068        t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1069
1070        data += sizeof(struct port_stats) / sizeof(u64);
1071        collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1072}
1073
1074/*
1075 * Return a version number to identify the type of adapter.  The scheme is:
1076 * - bits 0..9: chip version
1077 * - bits 10..15: chip revision
1078 * - bits 16..23: register dump version
1079 */
1080static inline unsigned int mk_adap_vers(const struct adapter *ap)
1081{
1082        return 4 | (ap->params.rev << 10) | (1 << 16);
1083}
1084
1085static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1086                           unsigned int end)
1087{
1088        u32 *p = buf + start;
1089
1090        for ( ; start <= end; start += sizeof(u32))
1091                *p++ = t4_read_reg(ap, start);
1092}
1093
1094static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1095                     void *buf)
1096{
1097        static const unsigned int reg_ranges[] = {
1098                0x1008, 0x1108,
1099                0x1180, 0x11b4,
1100                0x11fc, 0x123c,
1101                0x1300, 0x173c,
1102                0x1800, 0x18fc,
1103                0x3000, 0x30d8,
1104                0x30e0, 0x5924,
1105                0x5960, 0x59d4,
1106                0x5a00, 0x5af8,
1107                0x6000, 0x6098,
1108                0x6100, 0x6150,
1109                0x6200, 0x6208,
1110                0x6240, 0x6248,
1111                0x6280, 0x6338,
1112                0x6370, 0x638c,
1113                0x6400, 0x643c,
1114                0x6500, 0x6524,
1115                0x6a00, 0x6a38,
1116                0x6a60, 0x6a78,
1117                0x6b00, 0x6b84,
1118                0x6bf0, 0x6c84,
1119                0x6cf0, 0x6d84,
1120                0x6df0, 0x6e84,
1121                0x6ef0, 0x6f84,
1122                0x6ff0, 0x7084,
1123                0x70f0, 0x7184,
1124                0x71f0, 0x7284,
1125                0x72f0, 0x7384,
1126                0x73f0, 0x7450,
1127                0x7500, 0x7530,
1128                0x7600, 0x761c,
1129                0x7680, 0x76cc,
1130                0x7700, 0x7798,
1131                0x77c0, 0x77fc,
1132                0x7900, 0x79fc,
1133                0x7b00, 0x7c38,
1134                0x7d00, 0x7efc,
1135                0x8dc0, 0x8e1c,
1136                0x8e30, 0x8e78,
1137                0x8ea0, 0x8f6c,
1138                0x8fc0, 0x9074,
1139                0x90fc, 0x90fc,
1140                0x9400, 0x9458,
1141                0x9600, 0x96bc,
1142                0x9800, 0x9808,
1143                0x9820, 0x983c,
1144                0x9850, 0x9864,
1145                0x9c00, 0x9c6c,
1146                0x9c80, 0x9cec,
1147                0x9d00, 0x9d6c,
1148                0x9d80, 0x9dec,
1149                0x9e00, 0x9e6c,
1150                0x9e80, 0x9eec,
1151                0x9f00, 0x9f6c,
1152                0x9f80, 0x9fec,
1153                0xd004, 0xd03c,
1154                0xdfc0, 0xdfe0,
1155                0xe000, 0xea7c,
1156                0xf000, 0x11190,
1157                0x19040, 0x1906c,
1158                0x19078, 0x19080,
1159                0x1908c, 0x19124,
1160                0x19150, 0x191b0,
1161                0x191d0, 0x191e8,
1162                0x19238, 0x1924c,
1163                0x193f8, 0x19474,
1164                0x19490, 0x194f8,
1165                0x19800, 0x19f30,
1166                0x1a000, 0x1a06c,
1167                0x1a0b0, 0x1a120,
1168                0x1a128, 0x1a138,
1169                0x1a190, 0x1a1c4,
1170                0x1a1fc, 0x1a1fc,
1171                0x1e040, 0x1e04c,
1172                0x1e284, 0x1e28c,
1173                0x1e2c0, 0x1e2c0,
1174                0x1e2e0, 0x1e2e0,
1175                0x1e300, 0x1e384,
1176                0x1e3c0, 0x1e3c8,
1177                0x1e440, 0x1e44c,
1178                0x1e684, 0x1e68c,
1179                0x1e6c0, 0x1e6c0,
1180                0x1e6e0, 0x1e6e0,
1181                0x1e700, 0x1e784,
1182                0x1e7c0, 0x1e7c8,
1183                0x1e840, 0x1e84c,
1184                0x1ea84, 0x1ea8c,
1185                0x1eac0, 0x1eac0,
1186                0x1eae0, 0x1eae0,
1187                0x1eb00, 0x1eb84,
1188                0x1ebc0, 0x1ebc8,
1189                0x1ec40, 0x1ec4c,
1190                0x1ee84, 0x1ee8c,
1191                0x1eec0, 0x1eec0,
1192                0x1eee0, 0x1eee0,
1193                0x1ef00, 0x1ef84,
1194                0x1efc0, 0x1efc8,
1195                0x1f040, 0x1f04c,
1196                0x1f284, 0x1f28c,
1197                0x1f2c0, 0x1f2c0,
1198                0x1f2e0, 0x1f2e0,
1199                0x1f300, 0x1f384,
1200                0x1f3c0, 0x1f3c8,
1201                0x1f440, 0x1f44c,
1202                0x1f684, 0x1f68c,
1203                0x1f6c0, 0x1f6c0,
1204                0x1f6e0, 0x1f6e0,
1205                0x1f700, 0x1f784,
1206                0x1f7c0, 0x1f7c8,
1207                0x1f840, 0x1f84c,
1208                0x1fa84, 0x1fa8c,
1209                0x1fac0, 0x1fac0,
1210                0x1fae0, 0x1fae0,
1211                0x1fb00, 0x1fb84,
1212                0x1fbc0, 0x1fbc8,
1213                0x1fc40, 0x1fc4c,
1214                0x1fe84, 0x1fe8c,
1215                0x1fec0, 0x1fec0,
1216                0x1fee0, 0x1fee0,
1217                0x1ff00, 0x1ff84,
1218                0x1ffc0, 0x1ffc8,
1219                0x20000, 0x2002c,
1220                0x20100, 0x2013c,
1221                0x20190, 0x201c8,
1222                0x20200, 0x20318,
1223                0x20400, 0x20528,
1224                0x20540, 0x20614,
1225                0x21000, 0x21040,
1226                0x2104c, 0x21060,
1227                0x210c0, 0x210ec,
1228                0x21200, 0x21268,
1229                0x21270, 0x21284,
1230                0x212fc, 0x21388,
1231                0x21400, 0x21404,
1232                0x21500, 0x21518,
1233                0x2152c, 0x2153c,
1234                0x21550, 0x21554,
1235                0x21600, 0x21600,
1236                0x21608, 0x21628,
1237                0x21630, 0x2163c,
1238                0x21700, 0x2171c,
1239                0x21780, 0x2178c,
1240                0x21800, 0x21c38,
1241                0x21c80, 0x21d7c,
1242                0x21e00, 0x21e04,
1243                0x22000, 0x2202c,
1244                0x22100, 0x2213c,
1245                0x22190, 0x221c8,
1246                0x22200, 0x22318,
1247                0x22400, 0x22528,
1248                0x22540, 0x22614,
1249                0x23000, 0x23040,
1250                0x2304c, 0x23060,
1251                0x230c0, 0x230ec,
1252                0x23200, 0x23268,
1253                0x23270, 0x23284,
1254                0x232fc, 0x23388,
1255                0x23400, 0x23404,
1256                0x23500, 0x23518,
1257                0x2352c, 0x2353c,
1258                0x23550, 0x23554,
1259                0x23600, 0x23600,
1260                0x23608, 0x23628,
1261                0x23630, 0x2363c,
1262                0x23700, 0x2371c,
1263                0x23780, 0x2378c,
1264                0x23800, 0x23c38,
1265                0x23c80, 0x23d7c,
1266                0x23e00, 0x23e04,
1267                0x24000, 0x2402c,
1268                0x24100, 0x2413c,
1269                0x24190, 0x241c8,
1270                0x24200, 0x24318,
1271                0x24400, 0x24528,
1272                0x24540, 0x24614,
1273                0x25000, 0x25040,
1274                0x2504c, 0x25060,
1275                0x250c0, 0x250ec,
1276                0x25200, 0x25268,
1277                0x25270, 0x25284,
1278                0x252fc, 0x25388,
1279                0x25400, 0x25404,
1280                0x25500, 0x25518,
1281                0x2552c, 0x2553c,
1282                0x25550, 0x25554,
1283                0x25600, 0x25600,
1284                0x25608, 0x25628,
1285                0x25630, 0x2563c,
1286                0x25700, 0x2571c,
1287                0x25780, 0x2578c,
1288                0x25800, 0x25c38,
1289                0x25c80, 0x25d7c,
1290                0x25e00, 0x25e04,
1291                0x26000, 0x2602c,
1292                0x26100, 0x2613c,
1293                0x26190, 0x261c8,
1294                0x26200, 0x26318,
1295                0x26400, 0x26528,
1296                0x26540, 0x26614,
1297                0x27000, 0x27040,
1298                0x2704c, 0x27060,
1299                0x270c0, 0x270ec,
1300                0x27200, 0x27268,
1301                0x27270, 0x27284,
1302                0x272fc, 0x27388,
1303                0x27400, 0x27404,
1304                0x27500, 0x27518,
1305                0x2752c, 0x2753c,
1306                0x27550, 0x27554,
1307                0x27600, 0x27600,
1308                0x27608, 0x27628,
1309                0x27630, 0x2763c,
1310                0x27700, 0x2771c,
1311                0x27780, 0x2778c,
1312                0x27800, 0x27c38,
1313                0x27c80, 0x27d7c,
1314                0x27e00, 0x27e04
1315        };
1316
1317        int i;
1318        struct adapter *ap = netdev2adap(dev);
1319
1320        regs->version = mk_adap_vers(ap);
1321
1322        memset(buf, 0, T4_REGMAP_SIZE);
1323        for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1324                reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1325}
1326
1327static int restart_autoneg(struct net_device *dev)
1328{
1329        struct port_info *p = netdev_priv(dev);
1330
1331        if (!netif_running(dev))
1332                return -EAGAIN;
1333        if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1334                return -EINVAL;
1335        t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1336        return 0;
1337}
1338
1339static int identify_port(struct net_device *dev, u32 data)
1340{
1341        struct adapter *adap = netdev2adap(dev);
1342
1343        if (data == 0)
1344                data = 2;     /* default to 2 seconds */
1345
1346        return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
1347                                data * 5);
1348}
1349
1350static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1351{
1352        unsigned int v = 0;
1353
1354        if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1355            type == FW_PORT_TYPE_BT_XAUI) {
1356                v |= SUPPORTED_TP;
1357                if (caps & FW_PORT_CAP_SPEED_100M)
1358                        v |= SUPPORTED_100baseT_Full;
1359                if (caps & FW_PORT_CAP_SPEED_1G)
1360                        v |= SUPPORTED_1000baseT_Full;
1361                if (caps & FW_PORT_CAP_SPEED_10G)
1362                        v |= SUPPORTED_10000baseT_Full;
1363        } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1364                v |= SUPPORTED_Backplane;
1365                if (caps & FW_PORT_CAP_SPEED_1G)
1366                        v |= SUPPORTED_1000baseKX_Full;
1367                if (caps & FW_PORT_CAP_SPEED_10G)
1368                        v |= SUPPORTED_10000baseKX4_Full;
1369        } else if (type == FW_PORT_TYPE_KR)
1370                v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1371        else if (type == FW_PORT_TYPE_BP_AP)
1372                v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1373                     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1374        else if (type == FW_PORT_TYPE_BP4_AP)
1375                v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1376                     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1377                     SUPPORTED_10000baseKX4_Full;
1378        else if (type == FW_PORT_TYPE_FIBER_XFI ||
1379                 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1380                v |= SUPPORTED_FIBRE;
1381
1382        if (caps & FW_PORT_CAP_ANEG)
1383                v |= SUPPORTED_Autoneg;
1384        return v;
1385}
1386
1387static unsigned int to_fw_linkcaps(unsigned int caps)
1388{
1389        unsigned int v = 0;
1390
1391        if (caps & ADVERTISED_100baseT_Full)
1392                v |= FW_PORT_CAP_SPEED_100M;
1393        if (caps & ADVERTISED_1000baseT_Full)
1394                v |= FW_PORT_CAP_SPEED_1G;
1395        if (caps & ADVERTISED_10000baseT_Full)
1396                v |= FW_PORT_CAP_SPEED_10G;
1397        return v;
1398}
1399
1400static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1401{
1402        const struct port_info *p = netdev_priv(dev);
1403
1404        if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1405            p->port_type == FW_PORT_TYPE_BT_XFI ||
1406            p->port_type == FW_PORT_TYPE_BT_XAUI)
1407                cmd->port = PORT_TP;
1408        else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1409                 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1410                cmd->port = PORT_FIBRE;
1411        else if (p->port_type == FW_PORT_TYPE_SFP) {
1412                if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1413                    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1414                        cmd->port = PORT_DA;
1415                else
1416                        cmd->port = PORT_FIBRE;
1417        } else
1418                cmd->port = PORT_OTHER;
1419
1420        if (p->mdio_addr >= 0) {
1421                cmd->phy_address = p->mdio_addr;
1422                cmd->transceiver = XCVR_EXTERNAL;
1423                cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1424                        MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1425        } else {
1426                cmd->phy_address = 0;  /* not really, but no better option */
1427                cmd->transceiver = XCVR_INTERNAL;
1428                cmd->mdio_support = 0;
1429        }
1430
1431        cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1432        cmd->advertising = from_fw_linkcaps(p->port_type,
1433                                            p->link_cfg.advertising);
1434        cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1435        cmd->duplex = DUPLEX_FULL;
1436        cmd->autoneg = p->link_cfg.autoneg;
1437        cmd->maxtxpkt = 0;
1438        cmd->maxrxpkt = 0;
1439        return 0;
1440}
1441
1442static unsigned int speed_to_caps(int speed)
1443{
1444        if (speed == SPEED_100)
1445                return FW_PORT_CAP_SPEED_100M;
1446        if (speed == SPEED_1000)
1447                return FW_PORT_CAP_SPEED_1G;
1448        if (speed == SPEED_10000)
1449                return FW_PORT_CAP_SPEED_10G;
1450        return 0;
1451}
1452
1453static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1454{
1455        unsigned int cap;
1456        struct port_info *p = netdev_priv(dev);
1457        struct link_config *lc = &p->link_cfg;
1458
1459        if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
1460                return -EINVAL;
1461
1462        if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1463                /*
1464                 * PHY offers a single speed.  See if that's what's
1465                 * being requested.
1466                 */
1467                if (cmd->autoneg == AUTONEG_DISABLE &&
1468                    (lc->supported & speed_to_caps(cmd->speed)))
1469                                return 0;
1470                return -EINVAL;
1471        }
1472
1473        if (cmd->autoneg == AUTONEG_DISABLE) {
1474                cap = speed_to_caps(cmd->speed);
1475
1476                if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1477                    cmd->speed == SPEED_10000)
1478                        return -EINVAL;
1479                lc->requested_speed = cap;
1480                lc->advertising = 0;
1481        } else {
1482                cap = to_fw_linkcaps(cmd->advertising);
1483                if (!(lc->supported & cap))
1484                        return -EINVAL;
1485                lc->requested_speed = 0;
1486                lc->advertising = cap | FW_PORT_CAP_ANEG;
1487        }
1488        lc->autoneg = cmd->autoneg;
1489
1490        if (netif_running(dev))
1491                return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1492                                     lc);
1493        return 0;
1494}
1495
1496static void get_pauseparam(struct net_device *dev,
1497                           struct ethtool_pauseparam *epause)
1498{
1499        struct port_info *p = netdev_priv(dev);
1500
1501        epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1502        epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1503        epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1504}
1505
1506static int set_pauseparam(struct net_device *dev,
1507                          struct ethtool_pauseparam *epause)
1508{
1509        struct port_info *p = netdev_priv(dev);
1510        struct link_config *lc = &p->link_cfg;
1511
1512        if (epause->autoneg == AUTONEG_DISABLE)
1513                lc->requested_fc = 0;
1514        else if (lc->supported & FW_PORT_CAP_ANEG)
1515                lc->requested_fc = PAUSE_AUTONEG;
1516        else
1517                return -EINVAL;
1518
1519        if (epause->rx_pause)
1520                lc->requested_fc |= PAUSE_RX;
1521        if (epause->tx_pause)
1522                lc->requested_fc |= PAUSE_TX;
1523        if (netif_running(dev))
1524                return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1525                                     lc);
1526        return 0;
1527}
1528
1529static u32 get_rx_csum(struct net_device *dev)
1530{
1531        struct port_info *p = netdev_priv(dev);
1532
1533        return p->rx_offload & RX_CSO;
1534}
1535
1536static int set_rx_csum(struct net_device *dev, u32 data)
1537{
1538        struct port_info *p = netdev_priv(dev);
1539
1540        if (data)
1541                p->rx_offload |= RX_CSO;
1542        else
1543                p->rx_offload &= ~RX_CSO;
1544        return 0;
1545}
1546
1547static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1548{
1549        const struct port_info *pi = netdev_priv(dev);
1550        const struct sge *s = &pi->adapter->sge;
1551
1552        e->rx_max_pending = MAX_RX_BUFFERS;
1553        e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1554        e->rx_jumbo_max_pending = 0;
1555        e->tx_max_pending = MAX_TXQ_ENTRIES;
1556
1557        e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1558        e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1559        e->rx_jumbo_pending = 0;
1560        e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1561}
1562
1563static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1564{
1565        int i;
1566        const struct port_info *pi = netdev_priv(dev);
1567        struct adapter *adapter = pi->adapter;
1568        struct sge *s = &adapter->sge;
1569
1570        if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1571            e->tx_pending > MAX_TXQ_ENTRIES ||
1572            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1573            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1574            e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1575                return -EINVAL;
1576
1577        if (adapter->flags & FULL_INIT_DONE)
1578                return -EBUSY;
1579
1580        for (i = 0; i < pi->nqsets; ++i) {
1581                s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1582                s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1583                s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1584        }
1585        return 0;
1586}
1587
1588static int closest_timer(const struct sge *s, int time)
1589{
1590        int i, delta, match = 0, min_delta = INT_MAX;
1591
1592        for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1593                delta = time - s->timer_val[i];
1594                if (delta < 0)
1595                        delta = -delta;
1596                if (delta < min_delta) {
1597                        min_delta = delta;
1598                        match = i;
1599                }
1600        }
1601        return match;
1602}
1603
1604static int closest_thres(const struct sge *s, int thres)
1605{
1606        int i, delta, match = 0, min_delta = INT_MAX;
1607
1608        for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1609                delta = thres - s->counter_val[i];
1610                if (delta < 0)
1611                        delta = -delta;
1612                if (delta < min_delta) {
1613                        min_delta = delta;
1614                        match = i;
1615                }
1616        }
1617        return match;
1618}
1619
1620/*
1621 * Return a queue's interrupt hold-off time in us.  0 means no timer.
1622 */
1623static unsigned int qtimer_val(const struct adapter *adap,
1624                               const struct sge_rspq *q)
1625{
1626        unsigned int idx = q->intr_params >> 1;
1627
1628        return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1629}
1630
1631/**
1632 *      set_rxq_intr_params - set a queue's interrupt holdoff parameters
1633 *      @adap: the adapter
1634 *      @q: the Rx queue
1635 *      @us: the hold-off time in us, or 0 to disable timer
1636 *      @cnt: the hold-off packet count, or 0 to disable counter
1637 *
1638 *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1639 *      one of the two needs to be enabled for the queue to generate interrupts.
1640 */
1641static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1642                               unsigned int us, unsigned int cnt)
1643{
1644        if ((us | cnt) == 0)
1645                cnt = 1;
1646
1647        if (cnt) {
1648                int err;
1649                u32 v, new_idx;
1650
1651                new_idx = closest_thres(&adap->sge, cnt);
1652                if (q->desc && q->pktcnt_idx != new_idx) {
1653                        /* the queue has already been created, update it */
1654                        v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1655                            FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1656                            FW_PARAMS_PARAM_YZ(q->cntxt_id);
1657                        err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1658                                            &new_idx);
1659                        if (err)
1660                                return err;
1661                }
1662                q->pktcnt_idx = new_idx;
1663        }
1664
1665        us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1666        q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1667        return 0;
1668}
1669
1670static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671{
1672        const struct port_info *pi = netdev_priv(dev);
1673        struct adapter *adap = pi->adapter;
1674
1675        return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1676                        c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1677}
1678
1679static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1680{
1681        const struct port_info *pi = netdev_priv(dev);
1682        const struct adapter *adap = pi->adapter;
1683        const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1684
1685        c->rx_coalesce_usecs = qtimer_val(adap, rq);
1686        c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1687                adap->sge.counter_val[rq->pktcnt_idx] : 0;
1688        return 0;
1689}
1690
1691/**
1692 *      eeprom_ptov - translate a physical EEPROM address to virtual
1693 *      @phys_addr: the physical EEPROM address
1694 *      @fn: the PCI function number
1695 *      @sz: size of function-specific area
1696 *
1697 *      Translate a physical EEPROM address to virtual.  The first 1K is
1698 *      accessed through virtual addresses starting at 31K, the rest is
1699 *      accessed through virtual addresses starting at 0.
1700 *
1701 *      The mapping is as follows:
1702 *      [0..1K) -> [31K..32K)
1703 *      [1K..1K+A) -> [31K-A..31K)
1704 *      [1K+A..ES) -> [0..ES-A-1K)
1705 *
1706 *      where A = @fn * @sz, and ES = EEPROM size.
1707 */
1708static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1709{
1710        fn *= sz;
1711        if (phys_addr < 1024)
1712                return phys_addr + (31 << 10);
1713        if (phys_addr < 1024 + fn)
1714                return 31744 - fn + phys_addr - 1024;
1715        if (phys_addr < EEPROMSIZE)
1716                return phys_addr - 1024 - fn;
1717        return -EINVAL;
1718}
1719
1720/*
1721 * The next two routines implement eeprom read/write from physical addresses.
1722 */
1723static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1724{
1725        int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1726
1727        if (vaddr >= 0)
1728                vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1729        return vaddr < 0 ? vaddr : 0;
1730}
1731
1732static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1733{
1734        int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1735
1736        if (vaddr >= 0)
1737                vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1738        return vaddr < 0 ? vaddr : 0;
1739}
1740
1741#define EEPROM_MAGIC 0x38E2F10C
1742
1743static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1744                      u8 *data)
1745{
1746        int i, err = 0;
1747        struct adapter *adapter = netdev2adap(dev);
1748
1749        u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1750        if (!buf)
1751                return -ENOMEM;
1752
1753        e->magic = EEPROM_MAGIC;
1754        for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1755                err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1756
1757        if (!err)
1758                memcpy(data, buf + e->offset, e->len);
1759        kfree(buf);
1760        return err;
1761}
1762
1763static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1764                      u8 *data)
1765{
1766        u8 *buf;
1767        int err = 0;
1768        u32 aligned_offset, aligned_len, *p;
1769        struct adapter *adapter = netdev2adap(dev);
1770
1771        if (eeprom->magic != EEPROM_MAGIC)
1772                return -EINVAL;
1773
1774        aligned_offset = eeprom->offset & ~3;
1775        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1776
1777        if (adapter->fn > 0) {
1778                u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1779
1780                if (aligned_offset < start ||
1781                    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1782                        return -EPERM;
1783        }
1784
1785        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1786                /*
1787                 * RMW possibly needed for first or last words.
1788                 */
1789                buf = kmalloc(aligned_len, GFP_KERNEL);
1790                if (!buf)
1791                        return -ENOMEM;
1792                err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1793                if (!err && aligned_len > 4)
1794                        err = eeprom_rd_phys(adapter,
1795                                             aligned_offset + aligned_len - 4,
1796                                             (u32 *)&buf[aligned_len - 4]);
1797                if (err)
1798                        goto out;
1799                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1800        } else
1801                buf = data;
1802
1803        err = t4_seeprom_wp(adapter, false);
1804        if (err)
1805                goto out;
1806
1807        for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1808                err = eeprom_wr_phys(adapter, aligned_offset, *p);
1809                aligned_offset += 4;
1810        }
1811
1812        if (!err)
1813                err = t4_seeprom_wp(adapter, true);
1814out:
1815        if (buf != data)
1816                kfree(buf);
1817        return err;
1818}
1819
1820static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1821{
1822        int ret;
1823        const struct firmware *fw;
1824        struct adapter *adap = netdev2adap(netdev);
1825
1826        ef->data[sizeof(ef->data) - 1] = '\0';
1827        ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1828        if (ret < 0)
1829                return ret;
1830
1831        ret = t4_load_fw(adap, fw->data, fw->size);
1832        release_firmware(fw);
1833        if (!ret)
1834                dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1835        return ret;
1836}
1837
1838#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1839#define BCAST_CRC 0xa0ccc1a6
1840
1841static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1842{
1843        wol->supported = WAKE_BCAST | WAKE_MAGIC;
1844        wol->wolopts = netdev2adap(dev)->wol;
1845        memset(&wol->sopass, 0, sizeof(wol->sopass));
1846}
1847
1848static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1849{
1850        int err = 0;
1851        struct port_info *pi = netdev_priv(dev);
1852
1853        if (wol->wolopts & ~WOL_SUPPORTED)
1854                return -EINVAL;
1855        t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1856                            (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1857        if (wol->wolopts & WAKE_BCAST) {
1858                err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1859                                        ~0ULL, 0, false);
1860                if (!err)
1861                        err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1862                                                ~6ULL, ~0ULL, BCAST_CRC, true);
1863        } else
1864                t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1865        return err;
1866}
1867
1868#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1869
1870static int set_tso(struct net_device *dev, u32 value)
1871{
1872        if (value)
1873                dev->features |= TSO_FLAGS;
1874        else
1875                dev->features &= ~TSO_FLAGS;
1876        return 0;
1877}
1878
1879static int set_flags(struct net_device *dev, u32 flags)
1880{
1881        int err;
1882        unsigned long old_feat = dev->features;
1883
1884        err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
1885                                   ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
1886        if (err)
1887                return err;
1888
1889        if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
1890                const struct port_info *pi = netdev_priv(dev);
1891
1892                err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1893                                    -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
1894                                    true);
1895                if (err)
1896                        dev->features = old_feat;
1897        }
1898        return err;
1899}
1900
1901static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
1902{
1903        const struct port_info *pi = netdev_priv(dev);
1904        unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
1905
1906        p->size = pi->rss_size;
1907        while (n--)
1908                p->ring_index[n] = pi->rss[n];
1909        return 0;
1910}
1911
1912static int set_rss_table(struct net_device *dev,
1913                         const struct ethtool_rxfh_indir *p)
1914{
1915        unsigned int i;
1916        struct port_info *pi = netdev_priv(dev);
1917
1918        if (p->size != pi->rss_size)
1919                return -EINVAL;
1920        for (i = 0; i < p->size; i++)
1921                if (p->ring_index[i] >= pi->nqsets)
1922                        return -EINVAL;
1923        for (i = 0; i < p->size; i++)
1924                pi->rss[i] = p->ring_index[i];
1925        if (pi->adapter->flags & FULL_INIT_DONE)
1926                return write_rss(pi, pi->rss);
1927        return 0;
1928}
1929
1930static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1931                     void *rules)
1932{
1933        const struct port_info *pi = netdev_priv(dev);
1934
1935        switch (info->cmd) {
1936        case ETHTOOL_GRXFH: {
1937                unsigned int v = pi->rss_mode;
1938
1939                info->data = 0;
1940                switch (info->flow_type) {
1941                case TCP_V4_FLOW:
1942                        if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1943                                info->data = RXH_IP_SRC | RXH_IP_DST |
1944                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
1945                        else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1946                                info->data = RXH_IP_SRC | RXH_IP_DST;
1947                        break;
1948                case UDP_V4_FLOW:
1949                        if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1950                            (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1951                                info->data = RXH_IP_SRC | RXH_IP_DST |
1952                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
1953                        else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1954                                info->data = RXH_IP_SRC | RXH_IP_DST;
1955                        break;
1956                case SCTP_V4_FLOW:
1957                case AH_ESP_V4_FLOW:
1958                case IPV4_FLOW:
1959                        if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1960                                info->data = RXH_IP_SRC | RXH_IP_DST;
1961                        break;
1962                case TCP_V6_FLOW:
1963                        if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1964                                info->data = RXH_IP_SRC | RXH_IP_DST |
1965                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
1966                        else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1967                                info->data = RXH_IP_SRC | RXH_IP_DST;
1968                        break;
1969                case UDP_V6_FLOW:
1970                        if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1971                            (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1972                                info->data = RXH_IP_SRC | RXH_IP_DST |
1973                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
1974                        else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1975                                info->data = RXH_IP_SRC | RXH_IP_DST;
1976                        break;
1977                case SCTP_V6_FLOW:
1978                case AH_ESP_V6_FLOW:
1979                case IPV6_FLOW:
1980                        if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1981                                info->data = RXH_IP_SRC | RXH_IP_DST;
1982                        break;
1983                }
1984                return 0;
1985        }
1986        case ETHTOOL_GRXRINGS:
1987                info->data = pi->nqsets;
1988                return 0;
1989        }
1990        return -EOPNOTSUPP;
1991}
1992
1993static struct ethtool_ops cxgb_ethtool_ops = {
1994        .get_settings      = get_settings,
1995        .set_settings      = set_settings,
1996        .get_drvinfo       = get_drvinfo,
1997        .get_msglevel      = get_msglevel,
1998        .set_msglevel      = set_msglevel,
1999        .get_ringparam     = get_sge_param,
2000        .set_ringparam     = set_sge_param,
2001        .get_coalesce      = get_coalesce,
2002        .set_coalesce      = set_coalesce,
2003        .get_eeprom_len    = get_eeprom_len,
2004        .get_eeprom        = get_eeprom,
2005        .set_eeprom        = set_eeprom,
2006        .get_pauseparam    = get_pauseparam,
2007        .set_pauseparam    = set_pauseparam,
2008        .get_rx_csum       = get_rx_csum,
2009        .set_rx_csum       = set_rx_csum,
2010        .set_tx_csum       = ethtool_op_set_tx_ipv6_csum,
2011        .set_sg            = ethtool_op_set_sg,
2012        .get_link          = ethtool_op_get_link,
2013        .get_strings       = get_strings,
2014        .phys_id           = identify_port,
2015        .nway_reset        = restart_autoneg,
2016        .get_sset_count    = get_sset_count,
2017        .get_ethtool_stats = get_stats,
2018        .get_regs_len      = get_regs_len,
2019        .get_regs          = get_regs,
2020        .get_wol           = get_wol,
2021        .set_wol           = set_wol,
2022        .set_tso           = set_tso,
2023        .set_flags         = set_flags,
2024        .get_rxnfc         = get_rxnfc,
2025        .get_rxfh_indir    = get_rss_table,
2026        .set_rxfh_indir    = set_rss_table,
2027        .flash_device      = set_flash,
2028};
2029
2030/*
2031 * debugfs support
2032 */
2033
2034static int mem_open(struct inode *inode, struct file *file)
2035{
2036        file->private_data = inode->i_private;
2037        return 0;
2038}
2039
2040static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2041                        loff_t *ppos)
2042{
2043        loff_t pos = *ppos;
2044        loff_t avail = file->f_path.dentry->d_inode->i_size;
2045        unsigned int mem = (uintptr_t)file->private_data & 3;
2046        struct adapter *adap = file->private_data - mem;
2047
2048        if (pos < 0)
2049                return -EINVAL;
2050        if (pos >= avail)
2051                return 0;
2052        if (count > avail - pos)
2053                count = avail - pos;
2054
2055        while (count) {
2056                size_t len;
2057                int ret, ofst;
2058                __be32 data[16];
2059
2060                if (mem == MEM_MC)
2061                        ret = t4_mc_read(adap, pos, data, NULL);
2062                else
2063                        ret = t4_edc_read(adap, mem, pos, data, NULL);
2064                if (ret)
2065                        return ret;
2066
2067                ofst = pos % sizeof(data);
2068                len = min(count, sizeof(data) - ofst);
2069                if (copy_to_user(buf, (u8 *)data + ofst, len))
2070                        return -EFAULT;
2071
2072                buf += len;
2073                pos += len;
2074                count -= len;
2075        }
2076        count = pos - *ppos;
2077        *ppos = pos;
2078        return count;
2079}
2080
2081static const struct file_operations mem_debugfs_fops = {
2082        .owner   = THIS_MODULE,
2083        .open    = mem_open,
2084        .read    = mem_read,
2085        .llseek  = default_llseek,
2086};
2087
2088static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2089                                      unsigned int idx, unsigned int size_mb)
2090{
2091        struct dentry *de;
2092
2093        de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2094                                 (void *)adap + idx, &mem_debugfs_fops);
2095        if (de && de->d_inode)
2096                de->d_inode->i_size = size_mb << 20;
2097}
2098
2099static int __devinit setup_debugfs(struct adapter *adap)
2100{
2101        int i;
2102
2103        if (IS_ERR_OR_NULL(adap->debugfs_root))
2104                return -1;
2105
2106        i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2107        if (i & EDRAM0_ENABLE)
2108                add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2109        if (i & EDRAM1_ENABLE)
2110                add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2111        if (i & EXT_MEM_ENABLE)
2112                add_debugfs_mem(adap, "mc", MEM_MC,
2113                        EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2114        if (adap->l2t)
2115                debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2116                                    &t4_l2t_fops);
2117        return 0;
2118}
2119
2120/*
2121 * upper-layer driver support
2122 */
2123
2124/*
2125 * Allocate an active-open TID and set it to the supplied value.
2126 */
2127int cxgb4_alloc_atid(struct tid_info *t, void *data)
2128{
2129        int atid = -1;
2130
2131        spin_lock_bh(&t->atid_lock);
2132        if (t->afree) {
2133                union aopen_entry *p = t->afree;
2134
2135                atid = p - t->atid_tab;
2136                t->afree = p->next;
2137                p->data = data;
2138                t->atids_in_use++;
2139        }
2140        spin_unlock_bh(&t->atid_lock);
2141        return atid;
2142}
2143EXPORT_SYMBOL(cxgb4_alloc_atid);
2144
2145/*
2146 * Release an active-open TID.
2147 */
2148void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2149{
2150        union aopen_entry *p = &t->atid_tab[atid];
2151
2152        spin_lock_bh(&t->atid_lock);
2153        p->next = t->afree;
2154        t->afree = p;
2155        t->atids_in_use--;
2156        spin_unlock_bh(&t->atid_lock);
2157}
2158EXPORT_SYMBOL(cxgb4_free_atid);
2159
2160/*
2161 * Allocate a server TID and set it to the supplied value.
2162 */
2163int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2164{
2165        int stid;
2166
2167        spin_lock_bh(&t->stid_lock);
2168        if (family == PF_INET) {
2169                stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2170                if (stid < t->nstids)
2171                        __set_bit(stid, t->stid_bmap);
2172                else
2173                        stid = -1;
2174        } else {
2175                stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2176                if (stid < 0)
2177                        stid = -1;
2178        }
2179        if (stid >= 0) {
2180                t->stid_tab[stid].data = data;
2181                stid += t->stid_base;
2182                t->stids_in_use++;
2183        }
2184        spin_unlock_bh(&t->stid_lock);
2185        return stid;
2186}
2187EXPORT_SYMBOL(cxgb4_alloc_stid);
2188
2189/*
2190 * Release a server TID.
2191 */
2192void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2193{
2194        stid -= t->stid_base;
2195        spin_lock_bh(&t->stid_lock);
2196        if (family == PF_INET)
2197                __clear_bit(stid, t->stid_bmap);
2198        else
2199                bitmap_release_region(t->stid_bmap, stid, 2);
2200        t->stid_tab[stid].data = NULL;
2201        t->stids_in_use--;
2202        spin_unlock_bh(&t->stid_lock);
2203}
2204EXPORT_SYMBOL(cxgb4_free_stid);
2205
2206/*
2207 * Populate a TID_RELEASE WR.  Caller must properly size the skb.
2208 */
2209static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2210                           unsigned int tid)
2211{
2212        struct cpl_tid_release *req;
2213
2214        set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2215        req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2216        INIT_TP_WR(req, tid);
2217        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2218}
2219
2220/*
2221 * Queue a TID release request and if necessary schedule a work queue to
2222 * process it.
2223 */
2224static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2225                                    unsigned int tid)
2226{
2227        void **p = &t->tid_tab[tid];
2228        struct adapter *adap = container_of(t, struct adapter, tids);
2229
2230        spin_lock_bh(&adap->tid_release_lock);
2231        *p = adap->tid_release_head;
2232        /* Low 2 bits encode the Tx channel number */
2233        adap->tid_release_head = (void **)((uintptr_t)p | chan);
2234        if (!adap->tid_release_task_busy) {
2235                adap->tid_release_task_busy = true;
2236                schedule_work(&adap->tid_release_task);
2237        }
2238        spin_unlock_bh(&adap->tid_release_lock);
2239}
2240
2241/*
2242 * Process the list of pending TID release requests.
2243 */
2244static void process_tid_release_list(struct work_struct *work)
2245{
2246        struct sk_buff *skb;
2247        struct adapter *adap;
2248
2249        adap = container_of(work, struct adapter, tid_release_task);
2250
2251        spin_lock_bh(&adap->tid_release_lock);
2252        while (adap->tid_release_head) {
2253                void **p = adap->tid_release_head;
2254                unsigned int chan = (uintptr_t)p & 3;
2255                p = (void *)p - chan;
2256
2257                adap->tid_release_head = *p;
2258                *p = NULL;
2259                spin_unlock_bh(&adap->tid_release_lock);
2260
2261                while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2262                                         GFP_KERNEL)))
2263                        schedule_timeout_uninterruptible(1);
2264
2265                mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2266                t4_ofld_send(adap, skb);
2267                spin_lock_bh(&adap->tid_release_lock);
2268        }
2269        adap->tid_release_task_busy = false;
2270        spin_unlock_bh(&adap->tid_release_lock);
2271}
2272
2273/*
2274 * Release a TID and inform HW.  If we are unable to allocate the release
2275 * message we defer to a work queue.
2276 */
2277void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2278{
2279        void *old;
2280        struct sk_buff *skb;
2281        struct adapter *adap = container_of(t, struct adapter, tids);
2282
2283        old = t->tid_tab[tid];
2284        skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2285        if (likely(skb)) {
2286                t->tid_tab[tid] = NULL;
2287                mk_tid_release(skb, chan, tid);
2288                t4_ofld_send(adap, skb);
2289        } else
2290                cxgb4_queue_tid_release(t, chan, tid);
2291        if (old)
2292                atomic_dec(&t->tids_in_use);
2293}
2294EXPORT_SYMBOL(cxgb4_remove_tid);
2295
2296/*
2297 * Allocate and initialize the TID tables.  Returns 0 on success.
2298 */
2299static int tid_init(struct tid_info *t)
2300{
2301        size_t size;
2302        unsigned int natids = t->natids;
2303
2304        size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2305               t->nstids * sizeof(*t->stid_tab) +
2306               BITS_TO_LONGS(t->nstids) * sizeof(long);
2307        t->tid_tab = t4_alloc_mem(size);
2308        if (!t->tid_tab)
2309                return -ENOMEM;
2310
2311        t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2312        t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2313        t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2314        spin_lock_init(&t->stid_lock);
2315        spin_lock_init(&t->atid_lock);
2316
2317        t->stids_in_use = 0;
2318        t->afree = NULL;
2319        t->atids_in_use = 0;
2320        atomic_set(&t->tids_in_use, 0);
2321
2322        /* Setup the free list for atid_tab and clear the stid bitmap. */
2323        if (natids) {
2324                while (--natids)
2325                        t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2326                t->afree = t->atid_tab;
2327        }
2328        bitmap_zero(t->stid_bmap, t->nstids);
2329        return 0;
2330}
2331
2332/**
2333 *      cxgb4_create_server - create an IP server
2334 *      @dev: the device
2335 *      @stid: the server TID
2336 *      @sip: local IP address to bind server to
2337 *      @sport: the server's TCP port
2338 *      @queue: queue to direct messages from this server to
2339 *
2340 *      Create an IP server for the given port and address.
2341 *      Returns <0 on error and one of the %NET_XMIT_* values on success.
2342 */
2343int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2344                        __be32 sip, __be16 sport, unsigned int queue)
2345{
2346        unsigned int chan;
2347        struct sk_buff *skb;
2348        struct adapter *adap;
2349        struct cpl_pass_open_req *req;
2350
2351        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2352        if (!skb)
2353                return -ENOMEM;
2354
2355        adap = netdev2adap(dev);
2356        req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2357        INIT_TP_WR(req, 0);
2358        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2359        req->local_port = sport;
2360        req->peer_port = htons(0);
2361        req->local_ip = sip;
2362        req->peer_ip = htonl(0);
2363        chan = rxq_to_chan(&adap->sge, queue);
2364        req->opt0 = cpu_to_be64(TX_CHAN(chan));
2365        req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2366                                SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2367        return t4_mgmt_tx(adap, skb);
2368}
2369EXPORT_SYMBOL(cxgb4_create_server);
2370
2371/**
2372 *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2373 *      @mtus: the HW MTU table
2374 *      @mtu: the target MTU
2375 *      @idx: index of selected entry in the MTU table
2376 *
2377 *      Returns the index and the value in the HW MTU table that is closest to
2378 *      but does not exceed @mtu, unless @mtu is smaller than any value in the
2379 *      table, in which case that smallest available value is selected.
2380 */
2381unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2382                            unsigned int *idx)
2383{
2384        unsigned int i = 0;
2385
2386        while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2387                ++i;
2388        if (idx)
2389                *idx = i;
2390        return mtus[i];
2391}
2392EXPORT_SYMBOL(cxgb4_best_mtu);
2393
2394/**
2395 *      cxgb4_port_chan - get the HW channel of a port
2396 *      @dev: the net device for the port
2397 *
2398 *      Return the HW Tx channel of the given port.
2399 */
2400unsigned int cxgb4_port_chan(const struct net_device *dev)
2401{
2402        return netdev2pinfo(dev)->tx_chan;
2403}
2404EXPORT_SYMBOL(cxgb4_port_chan);
2405
2406/**
2407 *      cxgb4_port_viid - get the VI id of a port
2408 *      @dev: the net device for the port
2409 *
2410 *      Return the VI id of the given port.
2411 */
2412unsigned int cxgb4_port_viid(const struct net_device *dev)
2413{
2414        return netdev2pinfo(dev)->viid;
2415}
2416EXPORT_SYMBOL(cxgb4_port_viid);
2417
2418/**
2419 *      cxgb4_port_idx - get the index of a port
2420 *      @dev: the net device for the port
2421 *
2422 *      Return the index of the given port.
2423 */
2424unsigned int cxgb4_port_idx(const struct net_device *dev)
2425{
2426        return netdev2pinfo(dev)->port_id;
2427}
2428EXPORT_SYMBOL(cxgb4_port_idx);
2429
2430void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2431                         struct tp_tcp_stats *v6)
2432{
2433        struct adapter *adap = pci_get_drvdata(pdev);
2434
2435        spin_lock(&adap->stats_lock);
2436        t4_tp_get_tcp_stats(adap, v4, v6);
2437        spin_unlock(&adap->stats_lock);
2438}
2439EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2440
2441void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2442                      const unsigned int *pgsz_order)
2443{
2444        struct adapter *adap = netdev2adap(dev);
2445
2446        t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2447        t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2448                     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2449                     HPZ3(pgsz_order[3]));
2450}
2451EXPORT_SYMBOL(cxgb4_iscsi_init);
2452
2453static struct pci_driver cxgb4_driver;
2454
2455static void check_neigh_update(struct neighbour *neigh)
2456{
2457        const struct device *parent;
2458        const struct net_device *netdev = neigh->dev;
2459
2460        if (netdev->priv_flags & IFF_802_1Q_VLAN)
2461                netdev = vlan_dev_real_dev(netdev);
2462        parent = netdev->dev.parent;
2463        if (parent && parent->driver == &cxgb4_driver.driver)
2464                t4_l2t_update(dev_get_drvdata(parent), neigh);
2465}
2466
2467static int netevent_cb(struct notifier_block *nb, unsigned long event,
2468                       void *data)
2469{
2470        switch (event) {
2471        case NETEVENT_NEIGH_UPDATE:
2472                check_neigh_update(data);
2473                break;
2474        case NETEVENT_PMTU_UPDATE:
2475        case NETEVENT_REDIRECT:
2476        default:
2477                break;
2478        }
2479        return 0;
2480}
2481
2482static bool netevent_registered;
2483static struct notifier_block cxgb4_netevent_nb = {
2484        .notifier_call = netevent_cb
2485};
2486
2487static void uld_attach(struct adapter *adap, unsigned int uld)
2488{
2489        void *handle;
2490        struct cxgb4_lld_info lli;
2491
2492        lli.pdev = adap->pdev;
2493        lli.l2t = adap->l2t;
2494        lli.tids = &adap->tids;
2495        lli.ports = adap->port;
2496        lli.vr = &adap->vres;
2497        lli.mtus = adap->params.mtus;
2498        if (uld == CXGB4_ULD_RDMA) {
2499                lli.rxq_ids = adap->sge.rdma_rxq;
2500                lli.nrxq = adap->sge.rdmaqs;
2501        } else if (uld == CXGB4_ULD_ISCSI) {
2502                lli.rxq_ids = adap->sge.ofld_rxq;
2503                lli.nrxq = adap->sge.ofldqsets;
2504        }
2505        lli.ntxq = adap->sge.ofldqsets;
2506        lli.nchan = adap->params.nports;
2507        lli.nports = adap->params.nports;
2508        lli.wr_cred = adap->params.ofldq_wr_cred;
2509        lli.adapter_type = adap->params.rev;
2510        lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2511        lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2512                        t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2513                        (adap->fn * 4));
2514        lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2515                        t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2516                        (adap->fn * 4));
2517        lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2518        lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2519        lli.fw_vers = adap->params.fw_vers;
2520
2521        handle = ulds[uld].add(&lli);
2522        if (IS_ERR(handle)) {
2523                dev_warn(adap->pdev_dev,
2524                         "could not attach to the %s driver, error %ld\n",
2525                         uld_str[uld], PTR_ERR(handle));
2526                return;
2527        }
2528
2529        adap->uld_handle[uld] = handle;
2530
2531        if (!netevent_registered) {
2532                register_netevent_notifier(&cxgb4_netevent_nb);
2533                netevent_registered = true;
2534        }
2535
2536        if (adap->flags & FULL_INIT_DONE)
2537                ulds[uld].state_change(handle, CXGB4_STATE_UP);
2538}
2539
2540static void attach_ulds(struct adapter *adap)
2541{
2542        unsigned int i;
2543
2544        mutex_lock(&uld_mutex);
2545        list_add_tail(&adap->list_node, &adapter_list);
2546        for (i = 0; i < CXGB4_ULD_MAX; i++)
2547                if (ulds[i].add)
2548                        uld_attach(adap, i);
2549        mutex_unlock(&uld_mutex);
2550}
2551
2552static void detach_ulds(struct adapter *adap)
2553{
2554        unsigned int i;
2555
2556        mutex_lock(&uld_mutex);
2557        list_del(&adap->list_node);
2558        for (i = 0; i < CXGB4_ULD_MAX; i++)
2559                if (adap->uld_handle[i]) {
2560                        ulds[i].state_change(adap->uld_handle[i],
2561                                             CXGB4_STATE_DETACH);
2562                        adap->uld_handle[i] = NULL;
2563                }
2564        if (netevent_registered && list_empty(&adapter_list)) {
2565                unregister_netevent_notifier(&cxgb4_netevent_nb);
2566                netevent_registered = false;
2567        }
2568        mutex_unlock(&uld_mutex);
2569}
2570
2571static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2572{
2573        unsigned int i;
2574
2575        mutex_lock(&uld_mutex);
2576        for (i = 0; i < CXGB4_ULD_MAX; i++)
2577                if (adap->uld_handle[i])
2578                        ulds[i].state_change(adap->uld_handle[i], new_state);
2579        mutex_unlock(&uld_mutex);
2580}
2581
2582/**
2583 *      cxgb4_register_uld - register an upper-layer driver
2584 *      @type: the ULD type
2585 *      @p: the ULD methods
2586 *
2587 *      Registers an upper-layer driver with this driver and notifies the ULD
2588 *      about any presently available devices that support its type.  Returns
2589 *      %-EBUSY if a ULD of the same type is already registered.
2590 */
2591int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2592{
2593        int ret = 0;
2594        struct adapter *adap;
2595
2596        if (type >= CXGB4_ULD_MAX)
2597                return -EINVAL;
2598        mutex_lock(&uld_mutex);
2599        if (ulds[type].add) {
2600                ret = -EBUSY;
2601                goto out;
2602        }
2603        ulds[type] = *p;
2604        list_for_each_entry(adap, &adapter_list, list_node)
2605                uld_attach(adap, type);
2606out:    mutex_unlock(&uld_mutex);
2607        return ret;
2608}
2609EXPORT_SYMBOL(cxgb4_register_uld);
2610
2611/**
2612 *      cxgb4_unregister_uld - unregister an upper-layer driver
2613 *      @type: the ULD type
2614 *
2615 *      Unregisters an existing upper-layer driver.
2616 */
2617int cxgb4_unregister_uld(enum cxgb4_uld type)
2618{
2619        struct adapter *adap;
2620
2621        if (type >= CXGB4_ULD_MAX)
2622                return -EINVAL;
2623        mutex_lock(&uld_mutex);
2624        list_for_each_entry(adap, &adapter_list, list_node)
2625                adap->uld_handle[type] = NULL;
2626        ulds[type].add = NULL;
2627        mutex_unlock(&uld_mutex);
2628        return 0;
2629}
2630EXPORT_SYMBOL(cxgb4_unregister_uld);
2631
2632/**
2633 *      cxgb_up - enable the adapter
2634 *      @adap: adapter being enabled
2635 *
2636 *      Called when the first port is enabled, this function performs the
2637 *      actions necessary to make an adapter operational, such as completing
2638 *      the initialization of HW modules, and enabling interrupts.
2639 *
2640 *      Must be called with the rtnl lock held.
2641 */
2642static int cxgb_up(struct adapter *adap)
2643{
2644        int err;
2645
2646        err = setup_sge_queues(adap);
2647        if (err)
2648                goto out;
2649        err = setup_rss(adap);
2650        if (err)
2651                goto freeq;
2652
2653        if (adap->flags & USING_MSIX) {
2654                name_msix_vecs(adap);
2655                err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2656                                  adap->msix_info[0].desc, adap);
2657                if (err)
2658                        goto irq_err;
2659
2660                err = request_msix_queue_irqs(adap);
2661                if (err) {
2662                        free_irq(adap->msix_info[0].vec, adap);
2663                        goto irq_err;
2664                }
2665        } else {
2666                err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2667                                  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2668                                  adap->port[0]->name, adap);
2669                if (err)
2670                        goto irq_err;
2671        }
2672        enable_rx(adap);
2673        t4_sge_start(adap);
2674        t4_intr_enable(adap);
2675        adap->flags |= FULL_INIT_DONE;
2676        notify_ulds(adap, CXGB4_STATE_UP);
2677 out:
2678        return err;
2679 irq_err:
2680        dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2681 freeq:
2682        t4_free_sge_resources(adap);
2683        goto out;
2684}
2685
2686static void cxgb_down(struct adapter *adapter)
2687{
2688        t4_intr_disable(adapter);
2689        cancel_work_sync(&adapter->tid_release_task);
2690        adapter->tid_release_task_busy = false;
2691        adapter->tid_release_head = NULL;
2692
2693        if (adapter->flags & USING_MSIX) {
2694                free_msix_queue_irqs(adapter);
2695                free_irq(adapter->msix_info[0].vec, adapter);
2696        } else
2697                free_irq(adapter->pdev->irq, adapter);
2698        quiesce_rx(adapter);
2699        t4_sge_stop(adapter);
2700        t4_free_sge_resources(adapter);
2701        adapter->flags &= ~FULL_INIT_DONE;
2702}
2703
2704/*
2705 * net_device operations
2706 */
2707static int cxgb_open(struct net_device *dev)
2708{
2709        int err;
2710        struct port_info *pi = netdev_priv(dev);
2711        struct adapter *adapter = pi->adapter;
2712
2713        netif_carrier_off(dev);
2714
2715        if (!(adapter->flags & FULL_INIT_DONE)) {
2716                err = cxgb_up(adapter);
2717                if (err < 0)
2718                        return err;
2719        }
2720
2721        err = link_start(dev);
2722        if (!err)
2723                netif_tx_start_all_queues(dev);
2724        return err;
2725}
2726
2727static int cxgb_close(struct net_device *dev)
2728{
2729        struct port_info *pi = netdev_priv(dev);
2730        struct adapter *adapter = pi->adapter;
2731
2732        netif_tx_stop_all_queues(dev);
2733        netif_carrier_off(dev);
2734        return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
2735}
2736
2737static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2738                                                struct rtnl_link_stats64 *ns)
2739{
2740        struct port_stats stats;
2741        struct port_info *p = netdev_priv(dev);
2742        struct adapter *adapter = p->adapter;
2743
2744        spin_lock(&adapter->stats_lock);
2745        t4_get_port_stats(adapter, p->tx_chan, &stats);
2746        spin_unlock(&adapter->stats_lock);
2747
2748        ns->tx_bytes   = stats.tx_octets;
2749        ns->tx_packets = stats.tx_frames;
2750        ns->rx_bytes   = stats.rx_octets;
2751        ns->rx_packets = stats.rx_frames;
2752        ns->multicast  = stats.rx_mcast_frames;
2753
2754        /* detailed rx_errors */
2755        ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2756                               stats.rx_runt;
2757        ns->rx_over_errors   = 0;
2758        ns->rx_crc_errors    = stats.rx_fcs_err;
2759        ns->rx_frame_errors  = stats.rx_symbol_err;
2760        ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
2761                               stats.rx_ovflow2 + stats.rx_ovflow3 +
2762                               stats.rx_trunc0 + stats.rx_trunc1 +
2763                               stats.rx_trunc2 + stats.rx_trunc3;
2764        ns->rx_missed_errors = 0;
2765
2766        /* detailed tx_errors */
2767        ns->tx_aborted_errors   = 0;
2768        ns->tx_carrier_errors   = 0;
2769        ns->tx_fifo_errors      = 0;
2770        ns->tx_heartbeat_errors = 0;
2771        ns->tx_window_errors    = 0;
2772
2773        ns->tx_errors = stats.tx_error_frames;
2774        ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2775                ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2776        return ns;
2777}
2778
2779static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2780{
2781        unsigned int mbox;
2782        int ret = 0, prtad, devad;
2783        struct port_info *pi = netdev_priv(dev);
2784        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2785
2786        switch (cmd) {
2787        case SIOCGMIIPHY:
2788                if (pi->mdio_addr < 0)
2789                        return -EOPNOTSUPP;
2790                data->phy_id = pi->mdio_addr;
2791                break;
2792        case SIOCGMIIREG:
2793        case SIOCSMIIREG:
2794                if (mdio_phy_id_is_c45(data->phy_id)) {
2795                        prtad = mdio_phy_id_prtad(data->phy_id);
2796                        devad = mdio_phy_id_devad(data->phy_id);
2797                } else if (data->phy_id < 32) {
2798                        prtad = data->phy_id;
2799                        devad = 0;
2800                        data->reg_num &= 0x1f;
2801                } else
2802                        return -EINVAL;
2803
2804                mbox = pi->adapter->fn;
2805                if (cmd == SIOCGMIIREG)
2806                        ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2807                                         data->reg_num, &data->val_out);
2808                else
2809                        ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2810                                         data->reg_num, data->val_in);
2811                break;
2812        default:
2813                return -EOPNOTSUPP;
2814        }
2815        return ret;
2816}
2817
2818static void cxgb_set_rxmode(struct net_device *dev)
2819{
2820        /* unfortunately we can't return errors to the stack */
2821        set_rxmode(dev, -1, false);
2822}
2823
2824static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2825{
2826        int ret;
2827        struct port_info *pi = netdev_priv(dev);
2828
2829        if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
2830                return -EINVAL;
2831        ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2832                            -1, -1, -1, true);
2833        if (!ret)
2834                dev->mtu = new_mtu;
2835        return ret;
2836}
2837
2838static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2839{
2840        int ret;
2841        struct sockaddr *addr = p;
2842        struct port_info *pi = netdev_priv(dev);
2843
2844        if (!is_valid_ether_addr(addr->sa_data))
2845                return -EINVAL;
2846
2847        ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2848                            pi->xact_addr_filt, addr->sa_data, true, true);
2849        if (ret < 0)
2850                return ret;
2851
2852        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2853        pi->xact_addr_filt = ret;
2854        return 0;
2855}
2856
2857#ifdef CONFIG_NET_POLL_CONTROLLER
2858static void cxgb_netpoll(struct net_device *dev)
2859{
2860        struct port_info *pi = netdev_priv(dev);
2861        struct adapter *adap = pi->adapter;
2862
2863        if (adap->flags & USING_MSIX) {
2864                int i;
2865                struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2866
2867                for (i = pi->nqsets; i; i--, rx++)
2868                        t4_sge_intr_msix(0, &rx->rspq);
2869        } else
2870                t4_intr_handler(adap)(0, adap);
2871}
2872#endif
2873
2874static const struct net_device_ops cxgb4_netdev_ops = {
2875        .ndo_open             = cxgb_open,
2876        .ndo_stop             = cxgb_close,
2877        .ndo_start_xmit       = t4_eth_xmit,
2878        .ndo_get_stats64      = cxgb_get_stats,
2879        .ndo_set_rx_mode      = cxgb_set_rxmode,
2880        .ndo_set_mac_address  = cxgb_set_mac_addr,
2881        .ndo_validate_addr    = eth_validate_addr,
2882        .ndo_do_ioctl         = cxgb_ioctl,
2883        .ndo_change_mtu       = cxgb_change_mtu,
2884#ifdef CONFIG_NET_POLL_CONTROLLER
2885        .ndo_poll_controller  = cxgb_netpoll,
2886#endif
2887};
2888
2889void t4_fatal_err(struct adapter *adap)
2890{
2891        t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2892        t4_intr_disable(adap);
2893        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2894}
2895
2896static void setup_memwin(struct adapter *adap)
2897{
2898        u32 bar0;
2899
2900        bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
2901        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2902                     (bar0 + MEMWIN0_BASE) | BIR(0) |
2903                     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2904        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2905                     (bar0 + MEMWIN1_BASE) | BIR(0) |
2906                     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2907        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2908                     (bar0 + MEMWIN2_BASE) | BIR(0) |
2909                     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2910        if (adap->vres.ocq.size) {
2911                unsigned int start, sz_kb;
2912
2913                start = pci_resource_start(adap->pdev, 2) +
2914                        OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2915                sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2916                t4_write_reg(adap,
2917                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2918                             start | BIR(1) | WINDOW(ilog2(sz_kb)));
2919                t4_write_reg(adap,
2920                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2921                             adap->vres.ocq.start);
2922                t4_read_reg(adap,
2923                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2924        }
2925}
2926
2927static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2928{
2929        u32 v;
2930        int ret;
2931
2932        /* get device capabilities */
2933        memset(c, 0, sizeof(*c));
2934        c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2935                               FW_CMD_REQUEST | FW_CMD_READ);
2936        c->retval_len16 = htonl(FW_LEN16(*c));
2937        ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
2938        if (ret < 0)
2939                return ret;
2940
2941        /* select capabilities we'll be using */
2942        if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2943                if (!vf_acls)
2944                        c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2945                else
2946                        c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2947        } else if (vf_acls) {
2948                dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2949                return ret;
2950        }
2951        c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2952                               FW_CMD_REQUEST | FW_CMD_WRITE);
2953        ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
2954        if (ret < 0)
2955                return ret;
2956
2957        ret = t4_config_glbl_rss(adap, adap->fn,
2958                                 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2959                                 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2960                                 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2961        if (ret < 0)
2962                return ret;
2963
2964        ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2965                          0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2966        if (ret < 0)
2967                return ret;
2968
2969        t4_sge_init(adap);
2970
2971        /* tweak some settings */
2972        t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2973        t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2974        t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2975        v = t4_read_reg(adap, TP_PIO_DATA);
2976        t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2977
2978        /* get basic stuff going */
2979        return t4_early_init(adap, adap->fn);
2980}
2981
2982/*
2983 * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
2984 */
2985#define MAX_ATIDS 8192U
2986
2987/*
2988 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2989 */
2990static int adap_init0(struct adapter *adap)
2991{
2992        int ret;
2993        u32 v, port_vec;
2994        enum dev_state state;
2995        u32 params[7], val[7];
2996        struct fw_caps_config_cmd c;
2997
2998        ret = t4_check_fw_version(adap);
2999        if (ret == -EINVAL || ret > 0) {
3000                if (upgrade_fw(adap) >= 0)             /* recache FW version */
3001                        ret = t4_check_fw_version(adap);
3002        }
3003        if (ret < 0)
3004                return ret;
3005
3006        /* contact FW, request master */
3007        ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
3008        if (ret < 0) {
3009                dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3010                        ret);
3011                return ret;
3012        }
3013
3014        /* reset device */
3015        ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
3016        if (ret < 0)
3017                goto bye;
3018
3019        for (v = 0; v < SGE_NTIMERS - 1; v++)
3020                adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
3021        adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3022        adap->sge.counter_val[0] = 1;
3023        for (v = 1; v < SGE_NCOUNTERS; v++)
3024                adap->sge.counter_val[v] = min(intr_cnt[v - 1],
3025                                               THRESHOLD_3_MASK);
3026#define FW_PARAM_DEV(param) \
3027        (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3028         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3029
3030        params[0] = FW_PARAM_DEV(CCLK);
3031        ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
3032        if (ret < 0)
3033                goto bye;
3034        adap->params.vpd.cclk = val[0];
3035
3036        ret = adap_init1(adap, &c);
3037        if (ret < 0)
3038                goto bye;
3039
3040#define FW_PARAM_PFVF(param) \
3041        (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3042         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3043         FW_PARAMS_PARAM_Y(adap->fn))
3044
3045        params[0] = FW_PARAM_DEV(PORTVEC);
3046        params[1] = FW_PARAM_PFVF(L2T_START);
3047        params[2] = FW_PARAM_PFVF(L2T_END);
3048        params[3] = FW_PARAM_PFVF(FILTER_START);
3049        params[4] = FW_PARAM_PFVF(FILTER_END);
3050        params[5] = FW_PARAM_PFVF(IQFLINT_START);
3051        params[6] = FW_PARAM_PFVF(EQ_START);
3052        ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3053        if (ret < 0)
3054                goto bye;
3055        port_vec = val[0];
3056        adap->tids.ftid_base = val[3];
3057        adap->tids.nftids = val[4] - val[3] + 1;
3058        adap->sge.ingr_start = val[5];
3059        adap->sge.egr_start = val[6];
3060
3061        if (c.ofldcaps) {
3062                /* query offload-related parameters */
3063                params[0] = FW_PARAM_DEV(NTID);
3064                params[1] = FW_PARAM_PFVF(SERVER_START);
3065                params[2] = FW_PARAM_PFVF(SERVER_END);
3066                params[3] = FW_PARAM_PFVF(TDDP_START);
3067                params[4] = FW_PARAM_PFVF(TDDP_END);
3068                params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3069                ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3070                                      val);
3071                if (ret < 0)
3072                        goto bye;
3073                adap->tids.ntids = val[0];
3074                adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3075                adap->tids.stid_base = val[1];
3076                adap->tids.nstids = val[2] - val[1] + 1;
3077                adap->vres.ddp.start = val[3];
3078                adap->vres.ddp.size = val[4] - val[3] + 1;
3079                adap->params.ofldq_wr_cred = val[5];
3080                adap->params.offload = 1;
3081        }
3082        if (c.rdmacaps) {
3083                params[0] = FW_PARAM_PFVF(STAG_START);
3084                params[1] = FW_PARAM_PFVF(STAG_END);
3085                params[2] = FW_PARAM_PFVF(RQ_START);
3086                params[3] = FW_PARAM_PFVF(RQ_END);
3087                params[4] = FW_PARAM_PFVF(PBL_START);
3088                params[5] = FW_PARAM_PFVF(PBL_END);
3089                ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3090                                      val);
3091                if (ret < 0)
3092                        goto bye;
3093                adap->vres.stag.start = val[0];
3094                adap->vres.stag.size = val[1] - val[0] + 1;
3095                adap->vres.rq.start = val[2];
3096                adap->vres.rq.size = val[3] - val[2] + 1;
3097                adap->vres.pbl.start = val[4];
3098                adap->vres.pbl.size = val[5] - val[4] + 1;
3099
3100                params[0] = FW_PARAM_PFVF(SQRQ_START);
3101                params[1] = FW_PARAM_PFVF(SQRQ_END);
3102                params[2] = FW_PARAM_PFVF(CQ_START);
3103                params[3] = FW_PARAM_PFVF(CQ_END);
3104                params[4] = FW_PARAM_PFVF(OCQ_START);
3105                params[5] = FW_PARAM_PFVF(OCQ_END);
3106                ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3107                                      val);
3108                if (ret < 0)
3109                        goto bye;
3110                adap->vres.qp.start = val[0];
3111                adap->vres.qp.size = val[1] - val[0] + 1;
3112                adap->vres.cq.start = val[2];
3113                adap->vres.cq.size = val[3] - val[2] + 1;
3114                adap->vres.ocq.start = val[4];
3115                adap->vres.ocq.size = val[5] - val[4] + 1;
3116        }
3117        if (c.iscsicaps) {
3118                params[0] = FW_PARAM_PFVF(ISCSI_START);
3119                params[1] = FW_PARAM_PFVF(ISCSI_END);
3120                ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3121                                      val);
3122                if (ret < 0)
3123                        goto bye;
3124                adap->vres.iscsi.start = val[0];
3125                adap->vres.iscsi.size = val[1] - val[0] + 1;
3126        }
3127#undef FW_PARAM_PFVF
3128#undef FW_PARAM_DEV
3129
3130        adap->params.nports = hweight32(port_vec);
3131        adap->params.portvec = port_vec;
3132        adap->flags |= FW_OK;
3133
3134        /* These are finalized by FW initialization, load their values now */
3135        v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3136        adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3137        t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3138        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3139                     adap->params.b_wnd);
3140
3141#ifdef CONFIG_PCI_IOV
3142        /*
3143         * Provision resource limits for Virtual Functions.  We currently
3144         * grant them all the same static resource limits except for the Port
3145         * Access Rights Mask which we're assigning based on the PF.  All of
3146         * the static provisioning stuff for both the PF and VF really needs
3147         * to be managed in a persistent manner for each device which the
3148         * firmware controls.
3149         */
3150        {
3151                int pf, vf;
3152
3153                for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3154                        if (num_vf[pf] <= 0)
3155                                continue;
3156
3157                        /* VF numbering starts at 1! */
3158                        for (vf = 1; vf <= num_vf[pf]; vf++) {
3159                                ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
3160                                                  VFRES_NEQ, VFRES_NETHCTRL,
3161                                                  VFRES_NIQFLINT, VFRES_NIQ,
3162                                                  VFRES_TC, VFRES_NVI,
3163                                                  FW_PFVF_CMD_CMASK_MASK,
3164                                                  pfvfres_pmask(adap, pf, vf),
3165                                                  VFRES_NEXACTF,
3166                                                  VFRES_R_CAPS, VFRES_WX_CAPS);
3167                                if (ret < 0)
3168                                        dev_warn(adap->pdev_dev, "failed to "
3169                                                 "provision pf/vf=%d/%d; "
3170                                                 "err=%d\n", pf, vf, ret);
3171                        }
3172                }
3173        }
3174#endif
3175
3176        setup_memwin(adap);
3177        return 0;
3178
3179        /*
3180         * If a command timed out or failed with EIO FW does not operate within
3181         * its spec or something catastrophic happened to HW/FW, stop issuing
3182         * commands.
3183         */
3184bye:    if (ret != -ETIMEDOUT && ret != -EIO)
3185                t4_fw_bye(adap, adap->fn);
3186        return ret;
3187}
3188
3189/* EEH callbacks */
3190
3191static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3192                                         pci_channel_state_t state)
3193{
3194        int i;
3195        struct adapter *adap = pci_get_drvdata(pdev);
3196
3197        if (!adap)
3198                goto out;
3199
3200        rtnl_lock();
3201        adap->flags &= ~FW_OK;
3202        notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3203        for_each_port(adap, i) {
3204                struct net_device *dev = adap->port[i];
3205
3206                netif_device_detach(dev);
3207                netif_carrier_off(dev);
3208        }
3209        if (adap->flags & FULL_INIT_DONE)
3210                cxgb_down(adap);
3211        rtnl_unlock();
3212        pci_disable_device(pdev);
3213out:    return state == pci_channel_io_perm_failure ?
3214                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3215}
3216
3217static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3218{
3219        int i, ret;
3220        struct fw_caps_config_cmd c;
3221        struct adapter *adap = pci_get_drvdata(pdev);
3222
3223        if (!adap) {
3224                pci_restore_state(pdev);
3225                pci_save_state(pdev);
3226                return PCI_ERS_RESULT_RECOVERED;
3227        }
3228
3229        if (pci_enable_device(pdev)) {
3230                dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3231                return PCI_ERS_RESULT_DISCONNECT;
3232        }
3233
3234        pci_set_master(pdev);
3235        pci_restore_state(pdev);
3236        pci_save_state(pdev);
3237        pci_cleanup_aer_uncorrect_error_status(pdev);
3238
3239        if (t4_wait_dev_ready(adap) < 0)
3240                return PCI_ERS_RESULT_DISCONNECT;
3241        if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
3242                return PCI_ERS_RESULT_DISCONNECT;
3243        adap->flags |= FW_OK;
3244        if (adap_init1(adap, &c))
3245                return PCI_ERS_RESULT_DISCONNECT;
3246
3247        for_each_port(adap, i) {
3248                struct port_info *p = adap2pinfo(adap, i);
3249
3250                ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3251                                  NULL, NULL);
3252                if (ret < 0)
3253                        return PCI_ERS_RESULT_DISCONNECT;
3254                p->viid = ret;
3255                p->xact_addr_filt = -1;
3256        }
3257
3258        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3259                     adap->params.b_wnd);
3260        setup_memwin(adap);
3261        if (cxgb_up(adap))
3262                return PCI_ERS_RESULT_DISCONNECT;
3263        return PCI_ERS_RESULT_RECOVERED;
3264}
3265
3266static void eeh_resume(struct pci_dev *pdev)
3267{
3268        int i;
3269        struct adapter *adap = pci_get_drvdata(pdev);
3270
3271        if (!adap)
3272                return;
3273
3274        rtnl_lock();
3275        for_each_port(adap, i) {
3276                struct net_device *dev = adap->port[i];
3277
3278                if (netif_running(dev)) {
3279                        link_start(dev);
3280                        cxgb_set_rxmode(dev);
3281                }
3282                netif_device_attach(dev);
3283        }
3284        rtnl_unlock();
3285}
3286
3287static struct pci_error_handlers cxgb4_eeh = {
3288        .error_detected = eeh_err_detected,
3289        .slot_reset     = eeh_slot_reset,
3290        .resume         = eeh_resume,
3291};
3292
3293static inline bool is_10g_port(const struct link_config *lc)
3294{
3295        return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3296}
3297
3298static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3299                             unsigned int size, unsigned int iqe_size)
3300{
3301        q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3302                         (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3303        q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3304        q->iqe_len = iqe_size;
3305        q->size = size;
3306}
3307
3308/*
3309 * Perform default configuration of DMA queues depending on the number and type
3310 * of ports we found and the number of available CPUs.  Most settings can be
3311 * modified by the admin prior to actual use.
3312 */
3313static void __devinit cfg_queues(struct adapter *adap)
3314{
3315        struct sge *s = &adap->sge;
3316        int i, q10g = 0, n10g = 0, qidx = 0;
3317
3318        for_each_port(adap, i)
3319                n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3320
3321        /*
3322         * We default to 1 queue per non-10G port and up to # of cores queues
3323         * per 10G port.
3324         */
3325        if (n10g)
3326                q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3327        if (q10g > num_online_cpus())
3328                q10g = num_online_cpus();
3329
3330        for_each_port(adap, i) {
3331                struct port_info *pi = adap2pinfo(adap, i);
3332
3333                pi->first_qset = qidx;
3334                pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3335                qidx += pi->nqsets;
3336        }
3337
3338        s->ethqsets = qidx;
3339        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
3340
3341        if (is_offload(adap)) {
3342                /*
3343                 * For offload we use 1 queue/channel if all ports are up to 1G,
3344                 * otherwise we divide all available queues amongst the channels
3345                 * capped by the number of available cores.
3346                 */
3347                if (n10g) {
3348                        i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3349                                  num_online_cpus());
3350                        s->ofldqsets = roundup(i, adap->params.nports);
3351                } else
3352                        s->ofldqsets = adap->params.nports;
3353                /* For RDMA one Rx queue per channel suffices */
3354                s->rdmaqs = adap->params.nports;
3355        }
3356
3357        for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3358                struct sge_eth_rxq *r = &s->ethrxq[i];
3359
3360                init_rspq(&r->rspq, 0, 0, 1024, 64);
3361                r->fl.size = 72;
3362        }
3363
3364        for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3365                s->ethtxq[i].q.size = 1024;
3366
3367        for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3368                s->ctrlq[i].q.size = 512;
3369
3370        for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3371                s->ofldtxq[i].q.size = 1024;
3372
3373        for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3374                struct sge_ofld_rxq *r = &s->ofldrxq[i];
3375
3376                init_rspq(&r->rspq, 0, 0, 1024, 64);
3377                r->rspq.uld = CXGB4_ULD_ISCSI;
3378                r->fl.size = 72;
3379        }
3380
3381        for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3382                struct sge_ofld_rxq *r = &s->rdmarxq[i];
3383
3384                init_rspq(&r->rspq, 0, 0, 511, 64);
3385                r->rspq.uld = CXGB4_ULD_RDMA;
3386                r->fl.size = 72;
3387        }
3388
3389        init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3390        init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3391}
3392
3393/*
3394 * Reduce the number of Ethernet queues across all ports to at most n.
3395 * n provides at least one queue per port.
3396 */
3397static void __devinit reduce_ethqs(struct adapter *adap, int n)
3398{
3399        int i;
3400        struct port_info *pi;
3401
3402        while (n < adap->sge.ethqsets)
3403                for_each_port(adap, i) {
3404                        pi = adap2pinfo(adap, i);
3405                        if (pi->nqsets > 1) {
3406                                pi->nqsets--;
3407                                adap->sge.ethqsets--;
3408                                if (adap->sge.ethqsets <= n)
3409                                        break;
3410                        }
3411                }
3412
3413        n = 0;
3414        for_each_port(adap, i) {
3415                pi = adap2pinfo(adap, i);
3416                pi->first_qset = n;
3417                n += pi->nqsets;
3418        }
3419}
3420
3421/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3422#define EXTRA_VECS 2
3423
3424static int __devinit enable_msix(struct adapter *adap)
3425{
3426        int ofld_need = 0;
3427        int i, err, want, need;
3428        struct sge *s = &adap->sge;
3429        unsigned int nchan = adap->params.nports;
3430        struct msix_entry entries[MAX_INGQ + 1];
3431
3432        for (i = 0; i < ARRAY_SIZE(entries); ++i)
3433                entries[i].entry = i;
3434
3435        want = s->max_ethqsets + EXTRA_VECS;
3436        if (is_offload(adap)) {
3437                want += s->rdmaqs + s->ofldqsets;
3438                /* need nchan for each possible ULD */
3439                ofld_need = 2 * nchan;
3440        }
3441        need = adap->params.nports + EXTRA_VECS + ofld_need;
3442
3443        while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3444                want = err;
3445
3446        if (!err) {
3447                /*
3448                 * Distribute available vectors to the various queue groups.
3449                 * Every group gets its minimum requirement and NIC gets top
3450                 * priority for leftovers.
3451                 */
3452                i = want - EXTRA_VECS - ofld_need;
3453                if (i < s->max_ethqsets) {
3454                        s->max_ethqsets = i;
3455                        if (i < s->ethqsets)
3456                                reduce_ethqs(adap, i);
3457                }
3458                if (is_offload(adap)) {
3459                        i = want - EXTRA_VECS - s->max_ethqsets;
3460                        i -= ofld_need - nchan;
3461                        s->ofldqsets = (i / nchan) * nchan;  /* round down */
3462                }
3463                for (i = 0; i < want; ++i)
3464                        adap->msix_info[i].vec = entries[i].vector;
3465        } else if (err > 0)
3466                dev_info(adap->pdev_dev,
3467                         "only %d MSI-X vectors left, not using MSI-X\n", err);
3468        return err;
3469}
3470
3471#undef EXTRA_VECS
3472
3473static int __devinit init_rss(struct adapter *adap)
3474{
3475        unsigned int i, j;
3476
3477        for_each_port(adap, i) {
3478                struct port_info *pi = adap2pinfo(adap, i);
3479
3480                pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3481                if (!pi->rss)
3482                        return -ENOMEM;
3483                for (j = 0; j < pi->rss_size; j++)
3484                        pi->rss[j] = j % pi->nqsets;
3485        }
3486        return 0;
3487}
3488
3489static void __devinit print_port_info(const struct net_device *dev)
3490{
3491        static const char *base[] = {
3492                "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3493                "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
3494        };
3495
3496        char buf[80];
3497        char *bufp = buf;
3498        const char *spd = "";
3499        const struct port_info *pi = netdev_priv(dev);
3500        const struct adapter *adap = pi->adapter;
3501
3502        if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3503                spd = " 2.5 GT/s";
3504        else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3505                spd = " 5 GT/s";
3506
3507        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3508                bufp += sprintf(bufp, "100/");
3509        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3510                bufp += sprintf(bufp, "1000/");
3511        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3512                bufp += sprintf(bufp, "10G/");
3513        if (bufp != buf)
3514                --bufp;
3515        sprintf(bufp, "BASE-%s", base[pi->port_type]);
3516
3517        netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3518                    adap->params.vpd.id, adap->params.rev, buf,
3519                    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3520                    (adap->flags & USING_MSIX) ? " MSI-X" :
3521                    (adap->flags & USING_MSI) ? " MSI" : "");
3522        netdev_info(dev, "S/N: %s, E/C: %s\n",
3523                    adap->params.vpd.sn, adap->params.vpd.ec);
3524}
3525
3526static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3527{
3528        u16 v;
3529        int pos;
3530
3531        pos = pci_pcie_cap(dev);
3532        if (pos > 0) {
3533                pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3534                v |= PCI_EXP_DEVCTL_RELAX_EN;
3535                pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3536        }
3537}
3538
3539/*
3540 * Free the following resources:
3541 * - memory used for tables
3542 * - MSI/MSI-X
3543 * - net devices
3544 * - resources FW is holding for us
3545 */
3546static void free_some_resources(struct adapter *adapter)
3547{
3548        unsigned int i;
3549
3550        t4_free_mem(adapter->l2t);
3551        t4_free_mem(adapter->tids.tid_tab);
3552        disable_msi(adapter);
3553
3554        for_each_port(adapter, i)
3555                if (adapter->port[i]) {
3556                        kfree(adap2pinfo(adapter, i)->rss);
3557                        free_netdev(adapter->port[i]);
3558                }
3559        if (adapter->flags & FW_OK)
3560                t4_fw_bye(adapter, adapter->fn);
3561}
3562
3563#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3564                   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3565
3566static int __devinit init_one(struct pci_dev *pdev,
3567                              const struct pci_device_id *ent)
3568{
3569        int func, i, err;
3570        struct port_info *pi;
3571        unsigned int highdma = 0;
3572        struct adapter *adapter = NULL;
3573
3574        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3575
3576        err = pci_request_regions(pdev, KBUILD_MODNAME);
3577        if (err) {
3578                /* Just info, some other driver may have claimed the device. */
3579                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3580                return err;
3581        }
3582
3583        /* We control everything through one PF */
3584        func = PCI_FUNC(pdev->devfn);
3585        if (func != ent->driver_data) {
3586                pci_save_state(pdev);        /* to restore SR-IOV later */
3587                goto sriov;
3588        }
3589
3590        err = pci_enable_device(pdev);
3591        if (err) {
3592                dev_err(&pdev->dev, "cannot enable PCI device\n");
3593                goto out_release_regions;
3594        }
3595
3596        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3597                highdma = NETIF_F_HIGHDMA;
3598                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3599                if (err) {
3600                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3601                                "coherent allocations\n");
3602                        goto out_disable_device;
3603                }
3604        } else {
3605                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3606                if (err) {
3607                        dev_err(&pdev->dev, "no usable DMA configuration\n");
3608                        goto out_disable_device;
3609                }
3610        }
3611
3612        pci_enable_pcie_error_reporting(pdev);
3613        enable_pcie_relaxed_ordering(pdev);
3614        pci_set_master(pdev);
3615        pci_save_state(pdev);
3616
3617        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3618        if (!adapter) {
3619                err = -ENOMEM;
3620                goto out_disable_device;
3621        }
3622
3623        adapter->regs = pci_ioremap_bar(pdev, 0);
3624        if (!adapter->regs) {
3625                dev_err(&pdev->dev, "cannot map device registers\n");
3626                err = -ENOMEM;
3627                goto out_free_adapter;
3628        }
3629
3630        adapter->pdev = pdev;
3631        adapter->pdev_dev = &pdev->dev;
3632        adapter->fn = func;
3633        adapter->msg_enable = dflt_msg_enable;
3634        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3635
3636        spin_lock_init(&adapter->stats_lock);
3637        spin_lock_init(&adapter->tid_release_lock);
3638
3639        INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3640
3641        err = t4_prep_adapter(adapter);
3642        if (err)
3643                goto out_unmap_bar;
3644        err = adap_init0(adapter);
3645        if (err)
3646                goto out_unmap_bar;
3647
3648        for_each_port(adapter, i) {
3649                struct net_device *netdev;
3650
3651                netdev = alloc_etherdev_mq(sizeof(struct port_info),
3652                                           MAX_ETH_QSETS);
3653                if (!netdev) {
3654                        err = -ENOMEM;
3655                        goto out_free_dev;
3656                }
3657
3658                SET_NETDEV_DEV(netdev, &pdev->dev);
3659
3660                adapter->port[i] = netdev;
3661                pi = netdev_priv(netdev);
3662                pi->adapter = adapter;
3663                pi->xact_addr_filt = -1;
3664                pi->rx_offload = RX_CSO;
3665                pi->port_id = i;
3666                netdev->irq = pdev->irq;
3667
3668                netdev->features |= NETIF_F_SG | TSO_FLAGS;
3669                netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3670                netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
3671                netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3672                netdev->vlan_features = netdev->features & VLAN_FEAT;
3673
3674                netdev->netdev_ops = &cxgb4_netdev_ops;
3675                SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3676        }
3677
3678        pci_set_drvdata(pdev, adapter);
3679
3680        if (adapter->flags & FW_OK) {
3681                err = t4_port_init(adapter, func, func, 0);
3682                if (err)
3683                        goto out_free_dev;
3684        }
3685
3686        /*
3687         * Configure queues and allocate tables now, they can be needed as
3688         * soon as the first register_netdev completes.
3689         */
3690        cfg_queues(adapter);
3691
3692        adapter->l2t = t4_init_l2t();
3693        if (!adapter->l2t) {
3694                /* We tolerate a lack of L2T, giving up some functionality */
3695                dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3696                adapter->params.offload = 0;
3697        }
3698
3699        if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3700                dev_warn(&pdev->dev, "could not allocate TID table, "
3701                         "continuing\n");
3702                adapter->params.offload = 0;
3703        }
3704
3705        /* See what interrupts we'll be using */
3706        if (msi > 1 && enable_msix(adapter) == 0)
3707                adapter->flags |= USING_MSIX;
3708        else if (msi > 0 && pci_enable_msi(pdev) == 0)
3709                adapter->flags |= USING_MSI;
3710
3711        err = init_rss(adapter);
3712        if (err)
3713                goto out_free_dev;
3714
3715        /*
3716         * The card is now ready to go.  If any errors occur during device
3717         * registration we do not fail the whole card but rather proceed only
3718         * with the ports we manage to register successfully.  However we must
3719         * register at least one net device.
3720         */
3721        for_each_port(adapter, i) {
3722                pi = adap2pinfo(adapter, i);
3723                netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3724                netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3725
3726                err = register_netdev(adapter->port[i]);
3727                if (err)
3728                        break;
3729                adapter->chan_map[pi->tx_chan] = i;
3730                print_port_info(adapter->port[i]);
3731        }
3732        if (i == 0) {
3733                dev_err(&pdev->dev, "could not register any net devices\n");
3734                goto out_free_dev;
3735        }
3736        if (err) {
3737                dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3738                err = 0;
3739        };
3740
3741        if (cxgb4_debugfs_root) {
3742                adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3743                                                           cxgb4_debugfs_root);
3744                setup_debugfs(adapter);
3745        }
3746
3747        if (is_offload(adapter))
3748                attach_ulds(adapter);
3749
3750sriov:
3751#ifdef CONFIG_PCI_IOV
3752        if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3753                if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3754                        dev_info(&pdev->dev,
3755                                 "instantiated %u virtual functions\n",
3756                                 num_vf[func]);
3757#endif
3758        return 0;
3759
3760 out_free_dev:
3761        free_some_resources(adapter);
3762 out_unmap_bar:
3763        iounmap(adapter->regs);
3764 out_free_adapter:
3765        kfree(adapter);
3766 out_disable_device:
3767        pci_disable_pcie_error_reporting(pdev);
3768        pci_disable_device(pdev);
3769 out_release_regions:
3770        pci_release_regions(pdev);
3771        pci_set_drvdata(pdev, NULL);
3772        return err;
3773}
3774
3775static void __devexit remove_one(struct pci_dev *pdev)
3776{
3777        struct adapter *adapter = pci_get_drvdata(pdev);
3778
3779        pci_disable_sriov(pdev);
3780
3781        if (adapter) {
3782                int i;
3783
3784                if (is_offload(adapter))
3785                        detach_ulds(adapter);
3786
3787                for_each_port(adapter, i)
3788                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
3789                                unregister_netdev(adapter->port[i]);
3790
3791                if (adapter->debugfs_root)
3792                        debugfs_remove_recursive(adapter->debugfs_root);
3793
3794                if (adapter->flags & FULL_INIT_DONE)
3795                        cxgb_down(adapter);
3796
3797                free_some_resources(adapter);
3798                iounmap(adapter->regs);
3799                kfree(adapter);
3800                pci_disable_pcie_error_reporting(pdev);
3801                pci_disable_device(pdev);
3802                pci_release_regions(pdev);
3803                pci_set_drvdata(pdev, NULL);
3804        } else
3805                pci_release_regions(pdev);
3806}
3807
3808static struct pci_driver cxgb4_driver = {
3809        .name     = KBUILD_MODNAME,
3810        .id_table = cxgb4_pci_tbl,
3811        .probe    = init_one,
3812        .remove   = __devexit_p(remove_one),
3813        .err_handler = &cxgb4_eeh,
3814};
3815
3816static int __init cxgb4_init_module(void)
3817{
3818        int ret;
3819
3820        /* Debugfs support is optional, just warn if this fails */
3821        cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3822        if (!cxgb4_debugfs_root)
3823                pr_warning("could not create debugfs entry, continuing\n");
3824
3825        ret = pci_register_driver(&cxgb4_driver);
3826        if (ret < 0)
3827                debugfs_remove(cxgb4_debugfs_root);
3828        return ret;
3829}
3830
3831static void __exit cxgb4_cleanup_module(void)
3832{
3833        pci_unregister_driver(&cxgb4_driver);
3834        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
3835}
3836
3837module_init(cxgb4_init_module);
3838module_exit(cxgb4_cleanup_module);
3839