linux/drivers/net/cxgb3/cxgb3_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/module.h>
  33#include <linux/moduleparam.h>
  34#include <linux/init.h>
  35#include <linux/pci.h>
  36#include <linux/dma-mapping.h>
  37#include <linux/netdevice.h>
  38#include <linux/etherdevice.h>
  39#include <linux/if_vlan.h>
  40#include <linux/mdio.h>
  41#include <linux/sockios.h>
  42#include <linux/workqueue.h>
  43#include <linux/proc_fs.h>
  44#include <linux/rtnetlink.h>
  45#include <linux/firmware.h>
  46#include <linux/log2.h>
  47#include <linux/stringify.h>
  48#include <linux/sched.h>
  49#include <linux/slab.h>
  50#include <asm/uaccess.h>
  51
  52#include "common.h"
  53#include "cxgb3_ioctl.h"
  54#include "regs.h"
  55#include "cxgb3_offload.h"
  56#include "version.h"
  57
  58#include "cxgb3_ctl_defs.h"
  59#include "t3_cpl.h"
  60#include "firmware_exports.h"
  61
  62enum {
  63        MAX_TXQ_ENTRIES = 16384,
  64        MAX_CTRL_TXQ_ENTRIES = 1024,
  65        MAX_RSPQ_ENTRIES = 16384,
  66        MAX_RX_BUFFERS = 16384,
  67        MAX_RX_JUMBO_BUFFERS = 16384,
  68        MIN_TXQ_ENTRIES = 4,
  69        MIN_CTRL_TXQ_ENTRIES = 4,
  70        MIN_RSPQ_ENTRIES = 32,
  71        MIN_FL_ENTRIES = 32
  72};
  73
  74#define PORT_MASK ((1 << MAX_NPORTS) - 1)
  75
  76#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  77                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  78                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  79
  80#define EEPROM_MAGIC 0x38E2F10C
  81
  82#define CH_DEVICE(devid, idx) \
  83        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  84
  85static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
  86        CH_DEVICE(0x20, 0),     /* PE9000 */
  87        CH_DEVICE(0x21, 1),     /* T302E */
  88        CH_DEVICE(0x22, 2),     /* T310E */
  89        CH_DEVICE(0x23, 3),     /* T320X */
  90        CH_DEVICE(0x24, 1),     /* T302X */
  91        CH_DEVICE(0x25, 3),     /* T320E */
  92        CH_DEVICE(0x26, 2),     /* T310X */
  93        CH_DEVICE(0x30, 2),     /* T3B10 */
  94        CH_DEVICE(0x31, 3),     /* T3B20 */
  95        CH_DEVICE(0x32, 1),     /* T3B02 */
  96        CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
  97        CH_DEVICE(0x36, 3),     /* S320E-CR */
  98        CH_DEVICE(0x37, 7),     /* N320E-G2 */
  99        {0,}
 100};
 101
 102MODULE_DESCRIPTION(DRV_DESC);
 103MODULE_AUTHOR("Chelsio Communications");
 104MODULE_LICENSE("Dual BSD/GPL");
 105MODULE_VERSION(DRV_VERSION);
 106MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 107
 108static int dflt_msg_enable = DFLT_MSG_ENABLE;
 109
 110module_param(dflt_msg_enable, int, 0644);
 111MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 112
 113/*
 114 * The driver uses the best interrupt scheme available on a platform in the
 115 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 116 * of these schemes the driver may consider as follows:
 117 *
 118 * msi = 2: choose from among all three options
 119 * msi = 1: only consider MSI and pin interrupts
 120 * msi = 0: force pin interrupts
 121 */
 122static int msi = 2;
 123
 124module_param(msi, int, 0644);
 125MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 126
 127/*
 128 * The driver enables offload as a default.
 129 * To disable it, use ofld_disable = 1.
 130 */
 131
 132static int ofld_disable = 0;
 133
 134module_param(ofld_disable, int, 0644);
 135MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 136
 137/*
 138 * We have work elements that we need to cancel when an interface is taken
 139 * down.  Normally the work elements would be executed by keventd but that
 140 * can deadlock because of linkwatch.  If our close method takes the rtnl
 141 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 142 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 143 * for our work to complete.  Get our own work queue to solve this.
 144 */
 145struct workqueue_struct *cxgb3_wq;
 146
 147/**
 148 *      link_report - show link status and link speed/duplex
 149 *      @p: the port whose settings are to be reported
 150 *
 151 *      Shows the link status, speed, and duplex of a port.
 152 */
 153static void link_report(struct net_device *dev)
 154{
 155        if (!netif_carrier_ok(dev))
 156                printk(KERN_INFO "%s: link down\n", dev->name);
 157        else {
 158                const char *s = "10Mbps";
 159                const struct port_info *p = netdev_priv(dev);
 160
 161                switch (p->link_config.speed) {
 162                case SPEED_10000:
 163                        s = "10Gbps";
 164                        break;
 165                case SPEED_1000:
 166                        s = "1000Mbps";
 167                        break;
 168                case SPEED_100:
 169                        s = "100Mbps";
 170                        break;
 171                }
 172
 173                printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
 174                       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 175        }
 176}
 177
 178static void enable_tx_fifo_drain(struct adapter *adapter,
 179                                 struct port_info *pi)
 180{
 181        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
 182                         F_ENDROPPKT);
 183        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
 184        t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
 185        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
 186}
 187
 188static void disable_tx_fifo_drain(struct adapter *adapter,
 189                                  struct port_info *pi)
 190{
 191        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 192                         F_ENDROPPKT, 0);
 193}
 194
 195void t3_os_link_fault(struct adapter *adap, int port_id, int state)
 196{
 197        struct net_device *dev = adap->port[port_id];
 198        struct port_info *pi = netdev_priv(dev);
 199
 200        if (state == netif_carrier_ok(dev))
 201                return;
 202
 203        if (state) {
 204                struct cmac *mac = &pi->mac;
 205
 206                netif_carrier_on(dev);
 207
 208                disable_tx_fifo_drain(adap, pi);
 209
 210                /* Clear local faults */
 211                t3_xgm_intr_disable(adap, pi->port_id);
 212                t3_read_reg(adap, A_XGM_INT_STATUS +
 213                                    pi->mac.offset);
 214                t3_write_reg(adap,
 215                             A_XGM_INT_CAUSE + pi->mac.offset,
 216                             F_XGM_INT);
 217
 218                t3_set_reg_field(adap,
 219                                 A_XGM_INT_ENABLE +
 220                                 pi->mac.offset,
 221                                 F_XGM_INT, F_XGM_INT);
 222                t3_xgm_intr_enable(adap, pi->port_id);
 223
 224                t3_mac_enable(mac, MAC_DIRECTION_TX);
 225        } else {
 226                netif_carrier_off(dev);
 227
 228                /* Flush TX FIFO */
 229                enable_tx_fifo_drain(adap, pi);
 230        }
 231        link_report(dev);
 232}
 233
 234/**
 235 *      t3_os_link_changed - handle link status changes
 236 *      @adapter: the adapter associated with the link change
 237 *      @port_id: the port index whose limk status has changed
 238 *      @link_stat: the new status of the link
 239 *      @speed: the new speed setting
 240 *      @duplex: the new duplex setting
 241 *      @pause: the new flow-control setting
 242 *
 243 *      This is the OS-dependent handler for link status changes.  The OS
 244 *      neutral handler takes care of most of the processing for these events,
 245 *      then calls this handler for any OS-specific processing.
 246 */
 247void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 248                        int speed, int duplex, int pause)
 249{
 250        struct net_device *dev = adapter->port[port_id];
 251        struct port_info *pi = netdev_priv(dev);
 252        struct cmac *mac = &pi->mac;
 253
 254        /* Skip changes from disabled ports. */
 255        if (!netif_running(dev))
 256                return;
 257
 258        if (link_stat != netif_carrier_ok(dev)) {
 259                if (link_stat) {
 260                        disable_tx_fifo_drain(adapter, pi);
 261
 262                        t3_mac_enable(mac, MAC_DIRECTION_RX);
 263
 264                        /* Clear local faults */
 265                        t3_xgm_intr_disable(adapter, pi->port_id);
 266                        t3_read_reg(adapter, A_XGM_INT_STATUS +
 267                                    pi->mac.offset);
 268                        t3_write_reg(adapter,
 269                                     A_XGM_INT_CAUSE + pi->mac.offset,
 270                                     F_XGM_INT);
 271
 272                        t3_set_reg_field(adapter,
 273                                         A_XGM_INT_ENABLE + pi->mac.offset,
 274                                         F_XGM_INT, F_XGM_INT);
 275                        t3_xgm_intr_enable(adapter, pi->port_id);
 276
 277                        netif_carrier_on(dev);
 278                } else {
 279                        netif_carrier_off(dev);
 280
 281                        t3_xgm_intr_disable(adapter, pi->port_id);
 282                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 283                        t3_set_reg_field(adapter,
 284                                         A_XGM_INT_ENABLE + pi->mac.offset,
 285                                         F_XGM_INT, 0);
 286
 287                        if (is_10G(adapter))
 288                                pi->phy.ops->power_down(&pi->phy, 1);
 289
 290                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 291                        t3_mac_disable(mac, MAC_DIRECTION_RX);
 292                        t3_link_start(&pi->phy, mac, &pi->link_config);
 293
 294                        /* Flush TX FIFO */
 295                        enable_tx_fifo_drain(adapter, pi);
 296                }
 297
 298                link_report(dev);
 299        }
 300}
 301
 302/**
 303 *      t3_os_phymod_changed - handle PHY module changes
 304 *      @phy: the PHY reporting the module change
 305 *      @mod_type: new module type
 306 *
 307 *      This is the OS-dependent handler for PHY module changes.  It is
 308 *      invoked when a PHY module is removed or inserted for any OS-specific
 309 *      processing.
 310 */
 311void t3_os_phymod_changed(struct adapter *adap, int port_id)
 312{
 313        static const char *mod_str[] = {
 314                NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 315        };
 316
 317        const struct net_device *dev = adap->port[port_id];
 318        const struct port_info *pi = netdev_priv(dev);
 319
 320        if (pi->phy.modtype == phy_modtype_none)
 321                printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
 322        else
 323                printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
 324                       mod_str[pi->phy.modtype]);
 325}
 326
 327static void cxgb_set_rxmode(struct net_device *dev)
 328{
 329        struct port_info *pi = netdev_priv(dev);
 330
 331        t3_mac_set_rx_mode(&pi->mac, dev);
 332}
 333
 334/**
 335 *      link_start - enable a port
 336 *      @dev: the device to enable
 337 *
 338 *      Performs the MAC and PHY actions needed to enable a port.
 339 */
 340static void link_start(struct net_device *dev)
 341{
 342        struct port_info *pi = netdev_priv(dev);
 343        struct cmac *mac = &pi->mac;
 344
 345        t3_mac_reset(mac);
 346        t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 347        t3_mac_set_mtu(mac, dev->mtu);
 348        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 349        t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 350        t3_mac_set_rx_mode(mac, dev);
 351        t3_link_start(&pi->phy, mac, &pi->link_config);
 352        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 353}
 354
 355static inline void cxgb_disable_msi(struct adapter *adapter)
 356{
 357        if (adapter->flags & USING_MSIX) {
 358                pci_disable_msix(adapter->pdev);
 359                adapter->flags &= ~USING_MSIX;
 360        } else if (adapter->flags & USING_MSI) {
 361                pci_disable_msi(adapter->pdev);
 362                adapter->flags &= ~USING_MSI;
 363        }
 364}
 365
 366/*
 367 * Interrupt handler for asynchronous events used with MSI-X.
 368 */
 369static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 370{
 371        t3_slow_intr_handler(cookie);
 372        return IRQ_HANDLED;
 373}
 374
 375/*
 376 * Name the MSI-X interrupts.
 377 */
 378static void name_msix_vecs(struct adapter *adap)
 379{
 380        int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 381
 382        snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 383        adap->msix_info[0].desc[n] = 0;
 384
 385        for_each_port(adap, j) {
 386                struct net_device *d = adap->port[j];
 387                const struct port_info *pi = netdev_priv(d);
 388
 389                for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 390                        snprintf(adap->msix_info[msi_idx].desc, n,
 391                                 "%s-%d", d->name, pi->first_qset + i);
 392                        adap->msix_info[msi_idx].desc[n] = 0;
 393                }
 394        }
 395}
 396
 397static int request_msix_data_irqs(struct adapter *adap)
 398{
 399        int i, j, err, qidx = 0;
 400
 401        for_each_port(adap, i) {
 402                int nqsets = adap2pinfo(adap, i)->nqsets;
 403
 404                for (j = 0; j < nqsets; ++j) {
 405                        err = request_irq(adap->msix_info[qidx + 1].vec,
 406                                          t3_intr_handler(adap,
 407                                                          adap->sge.qs[qidx].
 408                                                          rspq.polling), 0,
 409                                          adap->msix_info[qidx + 1].desc,
 410                                          &adap->sge.qs[qidx]);
 411                        if (err) {
 412                                while (--qidx >= 0)
 413                                        free_irq(adap->msix_info[qidx + 1].vec,
 414                                                 &adap->sge.qs[qidx]);
 415                                return err;
 416                        }
 417                        qidx++;
 418                }
 419        }
 420        return 0;
 421}
 422
 423static void free_irq_resources(struct adapter *adapter)
 424{
 425        if (adapter->flags & USING_MSIX) {
 426                int i, n = 0;
 427
 428                free_irq(adapter->msix_info[0].vec, adapter);
 429                for_each_port(adapter, i)
 430                        n += adap2pinfo(adapter, i)->nqsets;
 431
 432                for (i = 0; i < n; ++i)
 433                        free_irq(adapter->msix_info[i + 1].vec,
 434                                 &adapter->sge.qs[i]);
 435        } else
 436                free_irq(adapter->pdev->irq, adapter);
 437}
 438
 439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 440                              unsigned long n)
 441{
 442        int attempts = 10;
 443
 444        while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 445                if (!--attempts)
 446                        return -ETIMEDOUT;
 447                msleep(10);
 448        }
 449        return 0;
 450}
 451
 452static int init_tp_parity(struct adapter *adap)
 453{
 454        int i;
 455        struct sk_buff *skb;
 456        struct cpl_set_tcb_field *greq;
 457        unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 458
 459        t3_tp_set_offload_mode(adap, 1);
 460
 461        for (i = 0; i < 16; i++) {
 462                struct cpl_smt_write_req *req;
 463
 464                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 465                if (!skb)
 466                        skb = adap->nofail_skb;
 467                if (!skb)
 468                        goto alloc_skb_fail;
 469
 470                req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
 471                memset(req, 0, sizeof(*req));
 472                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 473                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 474                req->mtu_idx = NMTUS - 1;
 475                req->iff = i;
 476                t3_mgmt_tx(adap, skb);
 477                if (skb == adap->nofail_skb) {
 478                        await_mgmt_replies(adap, cnt, i + 1);
 479                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 480                        if (!adap->nofail_skb)
 481                                goto alloc_skb_fail;
 482                }
 483        }
 484
 485        for (i = 0; i < 2048; i++) {
 486                struct cpl_l2t_write_req *req;
 487
 488                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 489                if (!skb)
 490                        skb = adap->nofail_skb;
 491                if (!skb)
 492                        goto alloc_skb_fail;
 493
 494                req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
 495                memset(req, 0, sizeof(*req));
 496                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 497                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 498                req->params = htonl(V_L2T_W_IDX(i));
 499                t3_mgmt_tx(adap, skb);
 500                if (skb == adap->nofail_skb) {
 501                        await_mgmt_replies(adap, cnt, 16 + i + 1);
 502                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 503                        if (!adap->nofail_skb)
 504                                goto alloc_skb_fail;
 505                }
 506        }
 507
 508        for (i = 0; i < 2048; i++) {
 509                struct cpl_rte_write_req *req;
 510
 511                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 512                if (!skb)
 513                        skb = adap->nofail_skb;
 514                if (!skb)
 515                        goto alloc_skb_fail;
 516
 517                req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
 518                memset(req, 0, sizeof(*req));
 519                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 520                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 521                req->l2t_idx = htonl(V_L2T_W_IDX(i));
 522                t3_mgmt_tx(adap, skb);
 523                if (skb == adap->nofail_skb) {
 524                        await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
 525                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 526                        if (!adap->nofail_skb)
 527                                goto alloc_skb_fail;
 528                }
 529        }
 530
 531        skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 532        if (!skb)
 533                skb = adap->nofail_skb;
 534        if (!skb)
 535                goto alloc_skb_fail;
 536
 537        greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
 538        memset(greq, 0, sizeof(*greq));
 539        greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 540        OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 541        greq->mask = cpu_to_be64(1);
 542        t3_mgmt_tx(adap, skb);
 543
 544        i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 545        if (skb == adap->nofail_skb) {
 546                i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 547                adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 548        }
 549
 550        t3_tp_set_offload_mode(adap, 0);
 551        return i;
 552
 553alloc_skb_fail:
 554        t3_tp_set_offload_mode(adap, 0);
 555        return -ENOMEM;
 556}
 557
 558/**
 559 *      setup_rss - configure RSS
 560 *      @adap: the adapter
 561 *
 562 *      Sets up RSS to distribute packets to multiple receive queues.  We
 563 *      configure the RSS CPU lookup table to distribute to the number of HW
 564 *      receive queues, and the response queue lookup table to narrow that
 565 *      down to the response queues actually configured for each port.
 566 *      We always configure the RSS mapping for two ports since the mapping
 567 *      table has plenty of entries.
 568 */
 569static void setup_rss(struct adapter *adap)
 570{
 571        int i;
 572        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 573        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 574        u8 cpus[SGE_QSETS + 1];
 575        u16 rspq_map[RSS_TABLE_SIZE];
 576
 577        for (i = 0; i < SGE_QSETS; ++i)
 578                cpus[i] = i;
 579        cpus[SGE_QSETS] = 0xff; /* terminator */
 580
 581        for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 582                rspq_map[i] = i % nq0;
 583                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 584        }
 585
 586        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 587                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 588                      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 589}
 590
 591static void ring_dbs(struct adapter *adap)
 592{
 593        int i, j;
 594
 595        for (i = 0; i < SGE_QSETS; i++) {
 596                struct sge_qset *qs = &adap->sge.qs[i];
 597
 598                if (qs->adap)
 599                        for (j = 0; j < SGE_TXQ_PER_SET; j++)
 600                                t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
 601        }
 602}
 603
 604static void init_napi(struct adapter *adap)
 605{
 606        int i;
 607
 608        for (i = 0; i < SGE_QSETS; i++) {
 609                struct sge_qset *qs = &adap->sge.qs[i];
 610
 611                if (qs->adap)
 612                        netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 613                                       64);
 614        }
 615
 616        /*
 617         * netif_napi_add() can be called only once per napi_struct because it
 618         * adds each new napi_struct to a list.  Be careful not to call it a
 619         * second time, e.g., during EEH recovery, by making a note of it.
 620         */
 621        adap->flags |= NAPI_INIT;
 622}
 623
 624/*
 625 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 626 * both netdevices representing interfaces and the dummy ones for the extra
 627 * queues.
 628 */
 629static void quiesce_rx(struct adapter *adap)
 630{
 631        int i;
 632
 633        for (i = 0; i < SGE_QSETS; i++)
 634                if (adap->sge.qs[i].adap)
 635                        napi_disable(&adap->sge.qs[i].napi);
 636}
 637
 638static void enable_all_napi(struct adapter *adap)
 639{
 640        int i;
 641        for (i = 0; i < SGE_QSETS; i++)
 642                if (adap->sge.qs[i].adap)
 643                        napi_enable(&adap->sge.qs[i].napi);
 644}
 645
 646/**
 647 *      set_qset_lro - Turn a queue set's LRO capability on and off
 648 *      @dev: the device the qset is attached to
 649 *      @qset_idx: the queue set index
 650 *      @val: the LRO switch
 651 *
 652 *      Sets LRO on or off for a particular queue set.
 653 *      the device's features flag is updated to reflect the LRO
 654 *      capability when all queues belonging to the device are
 655 *      in the same state.
 656 */
 657static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
 658{
 659        struct port_info *pi = netdev_priv(dev);
 660        struct adapter *adapter = pi->adapter;
 661
 662        adapter->params.sge.qset[qset_idx].lro = !!val;
 663        adapter->sge.qs[qset_idx].lro_enabled = !!val;
 664}
 665
 666/**
 667 *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 668 *      @adap: the adapter
 669 *
 670 *      Determines how many sets of SGE queues to use and initializes them.
 671 *      We support multiple queue sets per port if we have MSI-X, otherwise
 672 *      just one queue set per port.
 673 */
 674static int setup_sge_qsets(struct adapter *adap)
 675{
 676        int i, j, err, irq_idx = 0, qset_idx = 0;
 677        unsigned int ntxq = SGE_TXQ_PER_SET;
 678
 679        if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 680                irq_idx = -1;
 681
 682        for_each_port(adap, i) {
 683                struct net_device *dev = adap->port[i];
 684                struct port_info *pi = netdev_priv(dev);
 685
 686                pi->qs = &adap->sge.qs[pi->first_qset];
 687                for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 688                        set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
 689                        err = t3_sge_alloc_qset(adap, qset_idx, 1,
 690                                (adap->flags & USING_MSIX) ? qset_idx + 1 :
 691                                                             irq_idx,
 692                                &adap->params.sge.qset[qset_idx], ntxq, dev,
 693                                netdev_get_tx_queue(dev, j));
 694                        if (err) {
 695                                t3_free_sge_resources(adap);
 696                                return err;
 697                        }
 698                }
 699        }
 700
 701        return 0;
 702}
 703
 704static ssize_t attr_show(struct device *d, char *buf,
 705                         ssize_t(*format) (struct net_device *, char *))
 706{
 707        ssize_t len;
 708
 709        /* Synchronize with ioctls that may shut down the device */
 710        rtnl_lock();
 711        len = (*format) (to_net_dev(d), buf);
 712        rtnl_unlock();
 713        return len;
 714}
 715
 716static ssize_t attr_store(struct device *d,
 717                          const char *buf, size_t len,
 718                          ssize_t(*set) (struct net_device *, unsigned int),
 719                          unsigned int min_val, unsigned int max_val)
 720{
 721        char *endp;
 722        ssize_t ret;
 723        unsigned int val;
 724
 725        if (!capable(CAP_NET_ADMIN))
 726                return -EPERM;
 727
 728        val = simple_strtoul(buf, &endp, 0);
 729        if (endp == buf || val < min_val || val > max_val)
 730                return -EINVAL;
 731
 732        rtnl_lock();
 733        ret = (*set) (to_net_dev(d), val);
 734        if (!ret)
 735                ret = len;
 736        rtnl_unlock();
 737        return ret;
 738}
 739
 740#define CXGB3_SHOW(name, val_expr) \
 741static ssize_t format_##name(struct net_device *dev, char *buf) \
 742{ \
 743        struct port_info *pi = netdev_priv(dev); \
 744        struct adapter *adap = pi->adapter; \
 745        return sprintf(buf, "%u\n", val_expr); \
 746} \
 747static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 748                           char *buf) \
 749{ \
 750        return attr_show(d, buf, format_##name); \
 751}
 752
 753static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 754{
 755        struct port_info *pi = netdev_priv(dev);
 756        struct adapter *adap = pi->adapter;
 757        int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 758
 759        if (adap->flags & FULL_INIT_DONE)
 760                return -EBUSY;
 761        if (val && adap->params.rev == 0)
 762                return -EINVAL;
 763        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 764            min_tids)
 765                return -EINVAL;
 766        adap->params.mc5.nfilters = val;
 767        return 0;
 768}
 769
 770static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 771                              const char *buf, size_t len)
 772{
 773        return attr_store(d, buf, len, set_nfilters, 0, ~0);
 774}
 775
 776static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 777{
 778        struct port_info *pi = netdev_priv(dev);
 779        struct adapter *adap = pi->adapter;
 780
 781        if (adap->flags & FULL_INIT_DONE)
 782                return -EBUSY;
 783        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 784            MC5_MIN_TIDS)
 785                return -EINVAL;
 786        adap->params.mc5.nservers = val;
 787        return 0;
 788}
 789
 790static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 791                              const char *buf, size_t len)
 792{
 793        return attr_store(d, buf, len, set_nservers, 0, ~0);
 794}
 795
 796#define CXGB3_ATTR_R(name, val_expr) \
 797CXGB3_SHOW(name, val_expr) \
 798static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 799
 800#define CXGB3_ATTR_RW(name, val_expr, store_method) \
 801CXGB3_SHOW(name, val_expr) \
 802static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
 803
 804CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 805CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 806CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 807
 808static struct attribute *cxgb3_attrs[] = {
 809        &dev_attr_cam_size.attr,
 810        &dev_attr_nfilters.attr,
 811        &dev_attr_nservers.attr,
 812        NULL
 813};
 814
 815static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
 816
 817static ssize_t tm_attr_show(struct device *d,
 818                            char *buf, int sched)
 819{
 820        struct port_info *pi = netdev_priv(to_net_dev(d));
 821        struct adapter *adap = pi->adapter;
 822        unsigned int v, addr, bpt, cpt;
 823        ssize_t len;
 824
 825        addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 826        rtnl_lock();
 827        t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 828        v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 829        if (sched & 1)
 830                v >>= 16;
 831        bpt = (v >> 8) & 0xff;
 832        cpt = v & 0xff;
 833        if (!cpt)
 834                len = sprintf(buf, "disabled\n");
 835        else {
 836                v = (adap->params.vpd.cclk * 1000) / cpt;
 837                len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 838        }
 839        rtnl_unlock();
 840        return len;
 841}
 842
 843static ssize_t tm_attr_store(struct device *d,
 844                             const char *buf, size_t len, int sched)
 845{
 846        struct port_info *pi = netdev_priv(to_net_dev(d));
 847        struct adapter *adap = pi->adapter;
 848        unsigned int val;
 849        char *endp;
 850        ssize_t ret;
 851
 852        if (!capable(CAP_NET_ADMIN))
 853                return -EPERM;
 854
 855        val = simple_strtoul(buf, &endp, 0);
 856        if (endp == buf || val > 10000000)
 857                return -EINVAL;
 858
 859        rtnl_lock();
 860        ret = t3_config_sched(adap, val, sched);
 861        if (!ret)
 862                ret = len;
 863        rtnl_unlock();
 864        return ret;
 865}
 866
 867#define TM_ATTR(name, sched) \
 868static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 869                           char *buf) \
 870{ \
 871        return tm_attr_show(d, buf, sched); \
 872} \
 873static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 874                            const char *buf, size_t len) \
 875{ \
 876        return tm_attr_store(d, buf, len, sched); \
 877} \
 878static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
 879
 880TM_ATTR(sched0, 0);
 881TM_ATTR(sched1, 1);
 882TM_ATTR(sched2, 2);
 883TM_ATTR(sched3, 3);
 884TM_ATTR(sched4, 4);
 885TM_ATTR(sched5, 5);
 886TM_ATTR(sched6, 6);
 887TM_ATTR(sched7, 7);
 888
 889static struct attribute *offload_attrs[] = {
 890        &dev_attr_sched0.attr,
 891        &dev_attr_sched1.attr,
 892        &dev_attr_sched2.attr,
 893        &dev_attr_sched3.attr,
 894        &dev_attr_sched4.attr,
 895        &dev_attr_sched5.attr,
 896        &dev_attr_sched6.attr,
 897        &dev_attr_sched7.attr,
 898        NULL
 899};
 900
 901static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
 902
 903/*
 904 * Sends an sk_buff to an offload queue driver
 905 * after dealing with any active network taps.
 906 */
 907static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 908{
 909        int ret;
 910
 911        local_bh_disable();
 912        ret = t3_offload_tx(tdev, skb);
 913        local_bh_enable();
 914        return ret;
 915}
 916
 917static int write_smt_entry(struct adapter *adapter, int idx)
 918{
 919        struct cpl_smt_write_req *req;
 920        struct port_info *pi = netdev_priv(adapter->port[idx]);
 921        struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 922
 923        if (!skb)
 924                return -ENOMEM;
 925
 926        req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
 927        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 928        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 929        req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 930        req->iff = idx;
 931        memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 932        memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 933        skb->priority = 1;
 934        offload_tx(&adapter->tdev, skb);
 935        return 0;
 936}
 937
 938static int init_smt(struct adapter *adapter)
 939{
 940        int i;
 941
 942        for_each_port(adapter, i)
 943            write_smt_entry(adapter, i);
 944        return 0;
 945}
 946
 947static void init_port_mtus(struct adapter *adapter)
 948{
 949        unsigned int mtus = adapter->port[0]->mtu;
 950
 951        if (adapter->port[1])
 952                mtus |= adapter->port[1]->mtu << 16;
 953        t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 954}
 955
 956static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 957                              int hi, int port)
 958{
 959        struct sk_buff *skb;
 960        struct mngt_pktsched_wr *req;
 961        int ret;
 962
 963        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 964        if (!skb)
 965                skb = adap->nofail_skb;
 966        if (!skb)
 967                return -ENOMEM;
 968
 969        req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
 970        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 971        req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 972        req->sched = sched;
 973        req->idx = qidx;
 974        req->min = lo;
 975        req->max = hi;
 976        req->binding = port;
 977        ret = t3_mgmt_tx(adap, skb);
 978        if (skb == adap->nofail_skb) {
 979                adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
 980                                             GFP_KERNEL);
 981                if (!adap->nofail_skb)
 982                        ret = -ENOMEM;
 983        }
 984
 985        return ret;
 986}
 987
 988static int bind_qsets(struct adapter *adap)
 989{
 990        int i, j, err = 0;
 991
 992        for_each_port(adap, i) {
 993                const struct port_info *pi = adap2pinfo(adap, i);
 994
 995                for (j = 0; j < pi->nqsets; ++j) {
 996                        int ret = send_pktsched_cmd(adap, 1,
 997                                                    pi->first_qset + j, -1,
 998                                                    -1, i);
 999                        if (ret)
1000                                err = ret;
1001                }
1002        }
1003
1004        return err;
1005}
1006
1007#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
1008        __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
1009#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
1010#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
1011        __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
1012#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
1013#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1014#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1015#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1016MODULE_FIRMWARE(FW_FNAME);
1017MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1018MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1019MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1020MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1021MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1022
1023static inline const char *get_edc_fw_name(int edc_idx)
1024{
1025        const char *fw_name = NULL;
1026
1027        switch (edc_idx) {
1028        case EDC_OPT_AEL2005:
1029                fw_name = AEL2005_OPT_EDC_NAME;
1030                break;
1031        case EDC_TWX_AEL2005:
1032                fw_name = AEL2005_TWX_EDC_NAME;
1033                break;
1034        case EDC_TWX_AEL2020:
1035                fw_name = AEL2020_TWX_EDC_NAME;
1036                break;
1037        }
1038        return fw_name;
1039}
1040
1041int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1042{
1043        struct adapter *adapter = phy->adapter;
1044        const struct firmware *fw;
1045        char buf[64];
1046        u32 csum;
1047        const __be32 *p;
1048        u16 *cache = phy->phy_cache;
1049        int i, ret;
1050
1051        snprintf(buf, sizeof(buf), get_edc_fw_name(edc_idx));
1052
1053        ret = request_firmware(&fw, buf, &adapter->pdev->dev);
1054        if (ret < 0) {
1055                dev_err(&adapter->pdev->dev,
1056                        "could not upgrade firmware: unable to load %s\n",
1057                        buf);
1058                return ret;
1059        }
1060
1061        /* check size, take checksum in account */
1062        if (fw->size > size + 4) {
1063                CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1064                       (unsigned int)fw->size, size + 4);
1065                ret = -EINVAL;
1066        }
1067
1068        /* compute checksum */
1069        p = (const __be32 *)fw->data;
1070        for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1071                csum += ntohl(p[i]);
1072
1073        if (csum != 0xffffffff) {
1074                CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1075                       csum);
1076                ret = -EINVAL;
1077        }
1078
1079        for (i = 0; i < size / 4 ; i++) {
1080                *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1081                *cache++ = be32_to_cpu(p[i]) & 0xffff;
1082        }
1083
1084        release_firmware(fw);
1085
1086        return ret;
1087}
1088
1089static int upgrade_fw(struct adapter *adap)
1090{
1091        int ret;
1092        const struct firmware *fw;
1093        struct device *dev = &adap->pdev->dev;
1094
1095        ret = request_firmware(&fw, FW_FNAME, dev);
1096        if (ret < 0) {
1097                dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1098                        FW_FNAME);
1099                return ret;
1100        }
1101        ret = t3_load_fw(adap, fw->data, fw->size);
1102        release_firmware(fw);
1103
1104        if (ret == 0)
1105                dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1106                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1107        else
1108                dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1109                        FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1110
1111        return ret;
1112}
1113
1114static inline char t3rev2char(struct adapter *adapter)
1115{
1116        char rev = 0;
1117
1118        switch(adapter->params.rev) {
1119        case T3_REV_B:
1120        case T3_REV_B2:
1121                rev = 'b';
1122                break;
1123        case T3_REV_C:
1124                rev = 'c';
1125                break;
1126        }
1127        return rev;
1128}
1129
1130static int update_tpsram(struct adapter *adap)
1131{
1132        const struct firmware *tpsram;
1133        char buf[64];
1134        struct device *dev = &adap->pdev->dev;
1135        int ret;
1136        char rev;
1137
1138        rev = t3rev2char(adap);
1139        if (!rev)
1140                return 0;
1141
1142        snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1143
1144        ret = request_firmware(&tpsram, buf, dev);
1145        if (ret < 0) {
1146                dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1147                        buf);
1148                return ret;
1149        }
1150
1151        ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1152        if (ret)
1153                goto release_tpsram;
1154
1155        ret = t3_set_proto_sram(adap, tpsram->data);
1156        if (ret == 0)
1157                dev_info(dev,
1158                         "successful update of protocol engine "
1159                         "to %d.%d.%d\n",
1160                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1161        else
1162                dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1163                        TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1164        if (ret)
1165                dev_err(dev, "loading protocol SRAM failed\n");
1166
1167release_tpsram:
1168        release_firmware(tpsram);
1169
1170        return ret;
1171}
1172
1173/**
1174 *      cxgb_up - enable the adapter
1175 *      @adapter: adapter being enabled
1176 *
1177 *      Called when the first port is enabled, this function performs the
1178 *      actions necessary to make an adapter operational, such as completing
1179 *      the initialization of HW modules, and enabling interrupts.
1180 *
1181 *      Must be called with the rtnl lock held.
1182 */
1183static int cxgb_up(struct adapter *adap)
1184{
1185        int err;
1186
1187        if (!(adap->flags & FULL_INIT_DONE)) {
1188                err = t3_check_fw_version(adap);
1189                if (err == -EINVAL) {
1190                        err = upgrade_fw(adap);
1191                        CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1192                                FW_VERSION_MAJOR, FW_VERSION_MINOR,
1193                                FW_VERSION_MICRO, err ? "failed" : "succeeded");
1194                }
1195
1196                err = t3_check_tpsram_version(adap);
1197                if (err == -EINVAL) {
1198                        err = update_tpsram(adap);
1199                        CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1200                                TP_VERSION_MAJOR, TP_VERSION_MINOR,
1201                                TP_VERSION_MICRO, err ? "failed" : "succeeded");
1202                }
1203
1204                /*
1205                 * Clear interrupts now to catch errors if t3_init_hw fails.
1206                 * We clear them again later as initialization may trigger
1207                 * conditions that can interrupt.
1208                 */
1209                t3_intr_clear(adap);
1210
1211                err = t3_init_hw(adap, 0);
1212                if (err)
1213                        goto out;
1214
1215                t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1216                t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1217
1218                err = setup_sge_qsets(adap);
1219                if (err)
1220                        goto out;
1221
1222                setup_rss(adap);
1223                if (!(adap->flags & NAPI_INIT))
1224                        init_napi(adap);
1225
1226                t3_start_sge_timers(adap);
1227                adap->flags |= FULL_INIT_DONE;
1228        }
1229
1230        t3_intr_clear(adap);
1231
1232        if (adap->flags & USING_MSIX) {
1233                name_msix_vecs(adap);
1234                err = request_irq(adap->msix_info[0].vec,
1235                                  t3_async_intr_handler, 0,
1236                                  adap->msix_info[0].desc, adap);
1237                if (err)
1238                        goto irq_err;
1239
1240                err = request_msix_data_irqs(adap);
1241                if (err) {
1242                        free_irq(adap->msix_info[0].vec, adap);
1243                        goto irq_err;
1244                }
1245        } else if ((err = request_irq(adap->pdev->irq,
1246                                      t3_intr_handler(adap,
1247                                                      adap->sge.qs[0].rspq.
1248                                                      polling),
1249                                      (adap->flags & USING_MSI) ?
1250                                       0 : IRQF_SHARED,
1251                                      adap->name, adap)))
1252                goto irq_err;
1253
1254        enable_all_napi(adap);
1255        t3_sge_start(adap);
1256        t3_intr_enable(adap);
1257
1258        if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1259            is_offload(adap) && init_tp_parity(adap) == 0)
1260                adap->flags |= TP_PARITY_INIT;
1261
1262        if (adap->flags & TP_PARITY_INIT) {
1263                t3_write_reg(adap, A_TP_INT_CAUSE,
1264                             F_CMCACHEPERR | F_ARPLUTPERR);
1265                t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1266        }
1267
1268        if (!(adap->flags & QUEUES_BOUND)) {
1269                int ret = bind_qsets(adap);
1270
1271                if (ret < 0) {
1272                        CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1273                        t3_intr_disable(adap);
1274                        free_irq_resources(adap);
1275                        err = ret;
1276                        goto out;
1277                }
1278                adap->flags |= QUEUES_BOUND;
1279        }
1280
1281out:
1282        return err;
1283irq_err:
1284        CH_ERR(adap, "request_irq failed, err %d\n", err);
1285        goto out;
1286}
1287
1288/*
1289 * Release resources when all the ports and offloading have been stopped.
1290 */
1291static void cxgb_down(struct adapter *adapter, int on_wq)
1292{
1293        t3_sge_stop(adapter);
1294        spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1295        t3_intr_disable(adapter);
1296        spin_unlock_irq(&adapter->work_lock);
1297
1298        free_irq_resources(adapter);
1299        quiesce_rx(adapter);
1300        t3_sge_stop(adapter);
1301        if (!on_wq)
1302                flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1303}
1304
1305static void schedule_chk_task(struct adapter *adap)
1306{
1307        unsigned int timeo;
1308
1309        timeo = adap->params.linkpoll_period ?
1310            (HZ * adap->params.linkpoll_period) / 10 :
1311            adap->params.stats_update_period * HZ;
1312        if (timeo)
1313                queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1314}
1315
1316static int offload_open(struct net_device *dev)
1317{
1318        struct port_info *pi = netdev_priv(dev);
1319        struct adapter *adapter = pi->adapter;
1320        struct t3cdev *tdev = dev2t3cdev(dev);
1321        int adap_up = adapter->open_device_map & PORT_MASK;
1322        int err;
1323
1324        if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1325                return 0;
1326
1327        if (!adap_up && (err = cxgb_up(adapter)) < 0)
1328                goto out;
1329
1330        t3_tp_set_offload_mode(adapter, 1);
1331        tdev->lldev = adapter->port[0];
1332        err = cxgb3_offload_activate(adapter);
1333        if (err)
1334                goto out;
1335
1336        init_port_mtus(adapter);
1337        t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1338                     adapter->params.b_wnd,
1339                     adapter->params.rev == 0 ?
1340                     adapter->port[0]->mtu : 0xffff);
1341        init_smt(adapter);
1342
1343        if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1344                dev_dbg(&dev->dev, "cannot create sysfs group\n");
1345
1346        /* Call back all registered clients */
1347        cxgb3_add_clients(tdev);
1348
1349out:
1350        /* restore them in case the offload module has changed them */
1351        if (err) {
1352                t3_tp_set_offload_mode(adapter, 0);
1353                clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1354                cxgb3_set_dummy_ops(tdev);
1355        }
1356        return err;
1357}
1358
1359static int offload_close(struct t3cdev *tdev)
1360{
1361        struct adapter *adapter = tdev2adap(tdev);
1362        struct t3c_data *td = T3C_DATA(tdev);
1363
1364        if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1365                return 0;
1366
1367        /* Call back all registered clients */
1368        cxgb3_remove_clients(tdev);
1369
1370        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1371
1372        /* Flush work scheduled while releasing TIDs */
1373        flush_work_sync(&td->tid_release_task);
1374
1375        tdev->lldev = NULL;
1376        cxgb3_set_dummy_ops(tdev);
1377        t3_tp_set_offload_mode(adapter, 0);
1378        clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1379
1380        if (!adapter->open_device_map)
1381                cxgb_down(adapter, 0);
1382
1383        cxgb3_offload_deactivate(adapter);
1384        return 0;
1385}
1386
1387static int cxgb_open(struct net_device *dev)
1388{
1389        struct port_info *pi = netdev_priv(dev);
1390        struct adapter *adapter = pi->adapter;
1391        int other_ports = adapter->open_device_map & PORT_MASK;
1392        int err;
1393
1394        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1395                return err;
1396
1397        set_bit(pi->port_id, &adapter->open_device_map);
1398        if (is_offload(adapter) && !ofld_disable) {
1399                err = offload_open(dev);
1400                if (err)
1401                        printk(KERN_WARNING
1402                               "Could not initialize offload capabilities\n");
1403        }
1404
1405        netif_set_real_num_tx_queues(dev, pi->nqsets);
1406        err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1407        if (err)
1408                return err;
1409        link_start(dev);
1410        t3_port_intr_enable(adapter, pi->port_id);
1411        netif_tx_start_all_queues(dev);
1412        if (!other_ports)
1413                schedule_chk_task(adapter);
1414
1415        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1416        return 0;
1417}
1418
1419static int __cxgb_close(struct net_device *dev, int on_wq)
1420{
1421        struct port_info *pi = netdev_priv(dev);
1422        struct adapter *adapter = pi->adapter;
1423
1424        
1425        if (!adapter->open_device_map)
1426                return 0;
1427
1428        /* Stop link fault interrupts */
1429        t3_xgm_intr_disable(adapter, pi->port_id);
1430        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1431
1432        t3_port_intr_disable(adapter, pi->port_id);
1433        netif_tx_stop_all_queues(dev);
1434        pi->phy.ops->power_down(&pi->phy, 1);
1435        netif_carrier_off(dev);
1436        t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1437
1438        spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1439        clear_bit(pi->port_id, &adapter->open_device_map);
1440        spin_unlock_irq(&adapter->work_lock);
1441
1442        if (!(adapter->open_device_map & PORT_MASK))
1443                cancel_delayed_work_sync(&adapter->adap_check_task);
1444
1445        if (!adapter->open_device_map)
1446                cxgb_down(adapter, on_wq);
1447
1448        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1449        return 0;
1450}
1451
1452static int cxgb_close(struct net_device *dev)
1453{
1454        return __cxgb_close(dev, 0);
1455}
1456
1457static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1458{
1459        struct port_info *pi = netdev_priv(dev);
1460        struct adapter *adapter = pi->adapter;
1461        struct net_device_stats *ns = &pi->netstats;
1462        const struct mac_stats *pstats;
1463
1464        spin_lock(&adapter->stats_lock);
1465        pstats = t3_mac_update_stats(&pi->mac);
1466        spin_unlock(&adapter->stats_lock);
1467
1468        ns->tx_bytes = pstats->tx_octets;
1469        ns->tx_packets = pstats->tx_frames;
1470        ns->rx_bytes = pstats->rx_octets;
1471        ns->rx_packets = pstats->rx_frames;
1472        ns->multicast = pstats->rx_mcast_frames;
1473
1474        ns->tx_errors = pstats->tx_underrun;
1475        ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1476            pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1477            pstats->rx_fifo_ovfl;
1478
1479        /* detailed rx_errors */
1480        ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1481        ns->rx_over_errors = 0;
1482        ns->rx_crc_errors = pstats->rx_fcs_errs;
1483        ns->rx_frame_errors = pstats->rx_symbol_errs;
1484        ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1485        ns->rx_missed_errors = pstats->rx_cong_drops;
1486
1487        /* detailed tx_errors */
1488        ns->tx_aborted_errors = 0;
1489        ns->tx_carrier_errors = 0;
1490        ns->tx_fifo_errors = pstats->tx_underrun;
1491        ns->tx_heartbeat_errors = 0;
1492        ns->tx_window_errors = 0;
1493        return ns;
1494}
1495
1496static u32 get_msglevel(struct net_device *dev)
1497{
1498        struct port_info *pi = netdev_priv(dev);
1499        struct adapter *adapter = pi->adapter;
1500
1501        return adapter->msg_enable;
1502}
1503
1504static void set_msglevel(struct net_device *dev, u32 val)
1505{
1506        struct port_info *pi = netdev_priv(dev);
1507        struct adapter *adapter = pi->adapter;
1508
1509        adapter->msg_enable = val;
1510}
1511
1512static char stats_strings[][ETH_GSTRING_LEN] = {
1513        "TxOctetsOK         ",
1514        "TxFramesOK         ",
1515        "TxMulticastFramesOK",
1516        "TxBroadcastFramesOK",
1517        "TxPauseFrames      ",
1518        "TxUnderrun         ",
1519        "TxExtUnderrun      ",
1520
1521        "TxFrames64         ",
1522        "TxFrames65To127    ",
1523        "TxFrames128To255   ",
1524        "TxFrames256To511   ",
1525        "TxFrames512To1023  ",
1526        "TxFrames1024To1518 ",
1527        "TxFrames1519ToMax  ",
1528
1529        "RxOctetsOK         ",
1530        "RxFramesOK         ",
1531        "RxMulticastFramesOK",
1532        "RxBroadcastFramesOK",
1533        "RxPauseFrames      ",
1534        "RxFCSErrors        ",
1535        "RxSymbolErrors     ",
1536        "RxShortErrors      ",
1537        "RxJabberErrors     ",
1538        "RxLengthErrors     ",
1539        "RxFIFOoverflow     ",
1540
1541        "RxFrames64         ",
1542        "RxFrames65To127    ",
1543        "RxFrames128To255   ",
1544        "RxFrames256To511   ",
1545        "RxFrames512To1023  ",
1546        "RxFrames1024To1518 ",
1547        "RxFrames1519ToMax  ",
1548
1549        "PhyFIFOErrors      ",
1550        "TSO                ",
1551        "VLANextractions    ",
1552        "VLANinsertions     ",
1553        "TxCsumOffload      ",
1554        "RxCsumGood         ",
1555        "LroAggregated      ",
1556        "LroFlushed         ",
1557        "LroNoDesc          ",
1558        "RxDrops            ",
1559
1560        "CheckTXEnToggled   ",
1561        "CheckResets        ",
1562
1563        "LinkFaults         ",
1564};
1565
1566static int get_sset_count(struct net_device *dev, int sset)
1567{
1568        switch (sset) {
1569        case ETH_SS_STATS:
1570                return ARRAY_SIZE(stats_strings);
1571        default:
1572                return -EOPNOTSUPP;
1573        }
1574}
1575
1576#define T3_REGMAP_SIZE (3 * 1024)
1577
1578static int get_regs_len(struct net_device *dev)
1579{
1580        return T3_REGMAP_SIZE;
1581}
1582
1583static int get_eeprom_len(struct net_device *dev)
1584{
1585        return EEPROMSIZE;
1586}
1587
1588static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1589{
1590        struct port_info *pi = netdev_priv(dev);
1591        struct adapter *adapter = pi->adapter;
1592        u32 fw_vers = 0;
1593        u32 tp_vers = 0;
1594
1595        spin_lock(&adapter->stats_lock);
1596        t3_get_fw_version(adapter, &fw_vers);
1597        t3_get_tp_version(adapter, &tp_vers);
1598        spin_unlock(&adapter->stats_lock);
1599
1600        strcpy(info->driver, DRV_NAME);
1601        strcpy(info->version, DRV_VERSION);
1602        strcpy(info->bus_info, pci_name(adapter->pdev));
1603        if (!fw_vers)
1604                strcpy(info->fw_version, "N/A");
1605        else {
1606                snprintf(info->fw_version, sizeof(info->fw_version),
1607                         "%s %u.%u.%u TP %u.%u.%u",
1608                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1609                         G_FW_VERSION_MAJOR(fw_vers),
1610                         G_FW_VERSION_MINOR(fw_vers),
1611                         G_FW_VERSION_MICRO(fw_vers),
1612                         G_TP_VERSION_MAJOR(tp_vers),
1613                         G_TP_VERSION_MINOR(tp_vers),
1614                         G_TP_VERSION_MICRO(tp_vers));
1615        }
1616}
1617
1618static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1619{
1620        if (stringset == ETH_SS_STATS)
1621                memcpy(data, stats_strings, sizeof(stats_strings));
1622}
1623
1624static unsigned long collect_sge_port_stats(struct adapter *adapter,
1625                                            struct port_info *p, int idx)
1626{
1627        int i;
1628        unsigned long tot = 0;
1629
1630        for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1631                tot += adapter->sge.qs[i].port_stats[idx];
1632        return tot;
1633}
1634
1635static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1636                      u64 *data)
1637{
1638        struct port_info *pi = netdev_priv(dev);
1639        struct adapter *adapter = pi->adapter;
1640        const struct mac_stats *s;
1641
1642        spin_lock(&adapter->stats_lock);
1643        s = t3_mac_update_stats(&pi->mac);
1644        spin_unlock(&adapter->stats_lock);
1645
1646        *data++ = s->tx_octets;
1647        *data++ = s->tx_frames;
1648        *data++ = s->tx_mcast_frames;
1649        *data++ = s->tx_bcast_frames;
1650        *data++ = s->tx_pause;
1651        *data++ = s->tx_underrun;
1652        *data++ = s->tx_fifo_urun;
1653
1654        *data++ = s->tx_frames_64;
1655        *data++ = s->tx_frames_65_127;
1656        *data++ = s->tx_frames_128_255;
1657        *data++ = s->tx_frames_256_511;
1658        *data++ = s->tx_frames_512_1023;
1659        *data++ = s->tx_frames_1024_1518;
1660        *data++ = s->tx_frames_1519_max;
1661
1662        *data++ = s->rx_octets;
1663        *data++ = s->rx_frames;
1664        *data++ = s->rx_mcast_frames;
1665        *data++ = s->rx_bcast_frames;
1666        *data++ = s->rx_pause;
1667        *data++ = s->rx_fcs_errs;
1668        *data++ = s->rx_symbol_errs;
1669        *data++ = s->rx_short;
1670        *data++ = s->rx_jabber;
1671        *data++ = s->rx_too_long;
1672        *data++ = s->rx_fifo_ovfl;
1673
1674        *data++ = s->rx_frames_64;
1675        *data++ = s->rx_frames_65_127;
1676        *data++ = s->rx_frames_128_255;
1677        *data++ = s->rx_frames_256_511;
1678        *data++ = s->rx_frames_512_1023;
1679        *data++ = s->rx_frames_1024_1518;
1680        *data++ = s->rx_frames_1519_max;
1681
1682        *data++ = pi->phy.fifo_errors;
1683
1684        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1685        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1686        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1687        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1688        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1689        *data++ = 0;
1690        *data++ = 0;
1691        *data++ = 0;
1692        *data++ = s->rx_cong_drops;
1693
1694        *data++ = s->num_toggled;
1695        *data++ = s->num_resets;
1696
1697        *data++ = s->link_faults;
1698}
1699
1700static inline void reg_block_dump(struct adapter *ap, void *buf,
1701                                  unsigned int start, unsigned int end)
1702{
1703        u32 *p = buf + start;
1704
1705        for (; start <= end; start += sizeof(u32))
1706                *p++ = t3_read_reg(ap, start);
1707}
1708
1709static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1710                     void *buf)
1711{
1712        struct port_info *pi = netdev_priv(dev);
1713        struct adapter *ap = pi->adapter;
1714
1715        /*
1716         * Version scheme:
1717         * bits 0..9: chip version
1718         * bits 10..15: chip revision
1719         * bit 31: set for PCIe cards
1720         */
1721        regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1722
1723        /*
1724         * We skip the MAC statistics registers because they are clear-on-read.
1725         * Also reading multi-register stats would need to synchronize with the
1726         * periodic mac stats accumulation.  Hard to justify the complexity.
1727         */
1728        memset(buf, 0, T3_REGMAP_SIZE);
1729        reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1730        reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1731        reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1732        reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1733        reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1734        reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1735                       XGM_REG(A_XGM_SERDES_STAT3, 1));
1736        reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1737                       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1738}
1739
1740static int restart_autoneg(struct net_device *dev)
1741{
1742        struct port_info *p = netdev_priv(dev);
1743
1744        if (!netif_running(dev))
1745                return -EAGAIN;
1746        if (p->link_config.autoneg != AUTONEG_ENABLE)
1747                return -EINVAL;
1748        p->phy.ops->autoneg_restart(&p->phy);
1749        return 0;
1750}
1751
1752static int cxgb3_phys_id(struct net_device *dev, u32 data)
1753{
1754        struct port_info *pi = netdev_priv(dev);
1755        struct adapter *adapter = pi->adapter;
1756        int i;
1757
1758        if (data == 0)
1759                data = 2;
1760
1761        for (i = 0; i < data * 2; i++) {
1762                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1763                                 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1764                if (msleep_interruptible(500))
1765                        break;
1766        }
1767        t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1768                         F_GPIO0_OUT_VAL);
1769        return 0;
1770}
1771
1772static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1773{
1774        struct port_info *p = netdev_priv(dev);
1775
1776        cmd->supported = p->link_config.supported;
1777        cmd->advertising = p->link_config.advertising;
1778
1779        if (netif_carrier_ok(dev)) {
1780                cmd->speed = p->link_config.speed;
1781                cmd->duplex = p->link_config.duplex;
1782        } else {
1783                cmd->speed = -1;
1784                cmd->duplex = -1;
1785        }
1786
1787        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1788        cmd->phy_address = p->phy.mdio.prtad;
1789        cmd->transceiver = XCVR_EXTERNAL;
1790        cmd->autoneg = p->link_config.autoneg;
1791        cmd->maxtxpkt = 0;
1792        cmd->maxrxpkt = 0;
1793        return 0;
1794}
1795
1796static int speed_duplex_to_caps(int speed, int duplex)
1797{
1798        int cap = 0;
1799
1800        switch (speed) {
1801        case SPEED_10:
1802                if (duplex == DUPLEX_FULL)
1803                        cap = SUPPORTED_10baseT_Full;
1804                else
1805                        cap = SUPPORTED_10baseT_Half;
1806                break;
1807        case SPEED_100:
1808                if (duplex == DUPLEX_FULL)
1809                        cap = SUPPORTED_100baseT_Full;
1810                else
1811                        cap = SUPPORTED_100baseT_Half;
1812                break;
1813        case SPEED_1000:
1814                if (duplex == DUPLEX_FULL)
1815                        cap = SUPPORTED_1000baseT_Full;
1816                else
1817                        cap = SUPPORTED_1000baseT_Half;
1818                break;
1819        case SPEED_10000:
1820                if (duplex == DUPLEX_FULL)
1821                        cap = SUPPORTED_10000baseT_Full;
1822        }
1823        return cap;
1824}
1825
1826#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1827                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1828                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1829                      ADVERTISED_10000baseT_Full)
1830
1831static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1832{
1833        struct port_info *p = netdev_priv(dev);
1834        struct link_config *lc = &p->link_config;
1835
1836        if (!(lc->supported & SUPPORTED_Autoneg)) {
1837                /*
1838                 * PHY offers a single speed/duplex.  See if that's what's
1839                 * being requested.
1840                 */
1841                if (cmd->autoneg == AUTONEG_DISABLE) {
1842                        int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1843                        if (lc->supported & cap)
1844                                return 0;
1845                }
1846                return -EINVAL;
1847        }
1848
1849        if (cmd->autoneg == AUTONEG_DISABLE) {
1850                int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1851
1852                if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1853                        return -EINVAL;
1854                lc->requested_speed = cmd->speed;
1855                lc->requested_duplex = cmd->duplex;
1856                lc->advertising = 0;
1857        } else {
1858                cmd->advertising &= ADVERTISED_MASK;
1859                cmd->advertising &= lc->supported;
1860                if (!cmd->advertising)
1861                        return -EINVAL;
1862                lc->requested_speed = SPEED_INVALID;
1863                lc->requested_duplex = DUPLEX_INVALID;
1864                lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1865        }
1866        lc->autoneg = cmd->autoneg;
1867        if (netif_running(dev))
1868                t3_link_start(&p->phy, &p->mac, lc);
1869        return 0;
1870}
1871
1872static void get_pauseparam(struct net_device *dev,
1873                           struct ethtool_pauseparam *epause)
1874{
1875        struct port_info *p = netdev_priv(dev);
1876
1877        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1878        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1879        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1880}
1881
1882static int set_pauseparam(struct net_device *dev,
1883                          struct ethtool_pauseparam *epause)
1884{
1885        struct port_info *p = netdev_priv(dev);
1886        struct link_config *lc = &p->link_config;
1887
1888        if (epause->autoneg == AUTONEG_DISABLE)
1889                lc->requested_fc = 0;
1890        else if (lc->supported & SUPPORTED_Autoneg)
1891                lc->requested_fc = PAUSE_AUTONEG;
1892        else
1893                return -EINVAL;
1894
1895        if (epause->rx_pause)
1896                lc->requested_fc |= PAUSE_RX;
1897        if (epause->tx_pause)
1898                lc->requested_fc |= PAUSE_TX;
1899        if (lc->autoneg == AUTONEG_ENABLE) {
1900                if (netif_running(dev))
1901                        t3_link_start(&p->phy, &p->mac, lc);
1902        } else {
1903                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1904                if (netif_running(dev))
1905                        t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1906        }
1907        return 0;
1908}
1909
1910static u32 get_rx_csum(struct net_device *dev)
1911{
1912        struct port_info *p = netdev_priv(dev);
1913
1914        return p->rx_offload & T3_RX_CSUM;
1915}
1916
1917static int set_rx_csum(struct net_device *dev, u32 data)
1918{
1919        struct port_info *p = netdev_priv(dev);
1920
1921        if (data) {
1922                p->rx_offload |= T3_RX_CSUM;
1923        } else {
1924                int i;
1925
1926                p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1927                for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1928                        set_qset_lro(dev, i, 0);
1929        }
1930        return 0;
1931}
1932
1933static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1934{
1935        struct port_info *pi = netdev_priv(dev);
1936        struct adapter *adapter = pi->adapter;
1937        const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1938
1939        e->rx_max_pending = MAX_RX_BUFFERS;
1940        e->rx_mini_max_pending = 0;
1941        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1942        e->tx_max_pending = MAX_TXQ_ENTRIES;
1943
1944        e->rx_pending = q->fl_size;
1945        e->rx_mini_pending = q->rspq_size;
1946        e->rx_jumbo_pending = q->jumbo_size;
1947        e->tx_pending = q->txq_size[0];
1948}
1949
1950static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1951{
1952        struct port_info *pi = netdev_priv(dev);
1953        struct adapter *adapter = pi->adapter;
1954        struct qset_params *q;
1955        int i;
1956
1957        if (e->rx_pending > MAX_RX_BUFFERS ||
1958            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1959            e->tx_pending > MAX_TXQ_ENTRIES ||
1960            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1961            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1962            e->rx_pending < MIN_FL_ENTRIES ||
1963            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1964            e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1965                return -EINVAL;
1966
1967        if (adapter->flags & FULL_INIT_DONE)
1968                return -EBUSY;
1969
1970        q = &adapter->params.sge.qset[pi->first_qset];
1971        for (i = 0; i < pi->nqsets; ++i, ++q) {
1972                q->rspq_size = e->rx_mini_pending;
1973                q->fl_size = e->rx_pending;
1974                q->jumbo_size = e->rx_jumbo_pending;
1975                q->txq_size[0] = e->tx_pending;
1976                q->txq_size[1] = e->tx_pending;
1977                q->txq_size[2] = e->tx_pending;
1978        }
1979        return 0;
1980}
1981
1982static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1983{
1984        struct port_info *pi = netdev_priv(dev);
1985        struct adapter *adapter = pi->adapter;
1986        struct qset_params *qsp = &adapter->params.sge.qset[0];
1987        struct sge_qset *qs = &adapter->sge.qs[0];
1988
1989        if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1990                return -EINVAL;
1991
1992        qsp->coalesce_usecs = c->rx_coalesce_usecs;
1993        t3_update_qset_coalesce(qs, qsp);
1994        return 0;
1995}
1996
1997static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1998{
1999        struct port_info *pi = netdev_priv(dev);
2000        struct adapter *adapter = pi->adapter;
2001        struct qset_params *q = adapter->params.sge.qset;
2002
2003        c->rx_coalesce_usecs = q->coalesce_usecs;
2004        return 0;
2005}
2006
2007static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2008                      u8 * data)
2009{
2010        struct port_info *pi = netdev_priv(dev);
2011        struct adapter *adapter = pi->adapter;
2012        int i, err = 0;
2013
2014        u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2015        if (!buf)
2016                return -ENOMEM;
2017
2018        e->magic = EEPROM_MAGIC;
2019        for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2020                err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2021
2022        if (!err)
2023                memcpy(data, buf + e->offset, e->len);
2024        kfree(buf);
2025        return err;
2026}
2027
2028static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2029                      u8 * data)
2030{
2031        struct port_info *pi = netdev_priv(dev);
2032        struct adapter *adapter = pi->adapter;
2033        u32 aligned_offset, aligned_len;
2034        __le32 *p;
2035        u8 *buf;
2036        int err;
2037
2038        if (eeprom->magic != EEPROM_MAGIC)
2039                return -EINVAL;
2040
2041        aligned_offset = eeprom->offset & ~3;
2042        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2043
2044        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2045                buf = kmalloc(aligned_len, GFP_KERNEL);
2046                if (!buf)
2047                        return -ENOMEM;
2048                err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2049                if (!err && aligned_len > 4)
2050                        err = t3_seeprom_read(adapter,
2051                                              aligned_offset + aligned_len - 4,
2052                                              (__le32 *) & buf[aligned_len - 4]);
2053                if (err)
2054                        goto out;
2055                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2056        } else
2057                buf = data;
2058
2059        err = t3_seeprom_wp(adapter, 0);
2060        if (err)
2061                goto out;
2062
2063        for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2064                err = t3_seeprom_write(adapter, aligned_offset, *p);
2065                aligned_offset += 4;
2066        }
2067
2068        if (!err)
2069                err = t3_seeprom_wp(adapter, 1);
2070out:
2071        if (buf != data)
2072                kfree(buf);
2073        return err;
2074}
2075
2076static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2077{
2078        wol->supported = 0;
2079        wol->wolopts = 0;
2080        memset(&wol->sopass, 0, sizeof(wol->sopass));
2081}
2082
2083static const struct ethtool_ops cxgb_ethtool_ops = {
2084        .get_settings = get_settings,
2085        .set_settings = set_settings,
2086        .get_drvinfo = get_drvinfo,
2087        .get_msglevel = get_msglevel,
2088        .set_msglevel = set_msglevel,
2089        .get_ringparam = get_sge_param,
2090        .set_ringparam = set_sge_param,
2091        .get_coalesce = get_coalesce,
2092        .set_coalesce = set_coalesce,
2093        .get_eeprom_len = get_eeprom_len,
2094        .get_eeprom = get_eeprom,
2095        .set_eeprom = set_eeprom,
2096        .get_pauseparam = get_pauseparam,
2097        .set_pauseparam = set_pauseparam,
2098        .get_rx_csum = get_rx_csum,
2099        .set_rx_csum = set_rx_csum,
2100        .set_tx_csum = ethtool_op_set_tx_csum,
2101        .set_sg = ethtool_op_set_sg,
2102        .get_link = ethtool_op_get_link,
2103        .get_strings = get_strings,
2104        .phys_id = cxgb3_phys_id,
2105        .nway_reset = restart_autoneg,
2106        .get_sset_count = get_sset_count,
2107        .get_ethtool_stats = get_stats,
2108        .get_regs_len = get_regs_len,
2109        .get_regs = get_regs,
2110        .get_wol = get_wol,
2111        .set_tso = ethtool_op_set_tso,
2112};
2113
2114static int in_range(int val, int lo, int hi)
2115{
2116        return val < 0 || (val <= hi && val >= lo);
2117}
2118
2119static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2120{
2121        struct port_info *pi = netdev_priv(dev);
2122        struct adapter *adapter = pi->adapter;
2123        u32 cmd;
2124        int ret;
2125
2126        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2127                return -EFAULT;
2128
2129        switch (cmd) {
2130        case CHELSIO_SET_QSET_PARAMS:{
2131                int i;
2132                struct qset_params *q;
2133                struct ch_qset_params t;
2134                int q1 = pi->first_qset;
2135                int nqsets = pi->nqsets;
2136
2137                if (!capable(CAP_NET_ADMIN))
2138                        return -EPERM;
2139                if (copy_from_user(&t, useraddr, sizeof(t)))
2140                        return -EFAULT;
2141                if (t.qset_idx >= SGE_QSETS)
2142                        return -EINVAL;
2143                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2144                    !in_range(t.cong_thres, 0, 255) ||
2145                    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2146                              MAX_TXQ_ENTRIES) ||
2147                    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2148                              MAX_TXQ_ENTRIES) ||
2149                    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2150                              MAX_CTRL_TXQ_ENTRIES) ||
2151                    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2152                              MAX_RX_BUFFERS) ||
2153                    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2154                              MAX_RX_JUMBO_BUFFERS) ||
2155                    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2156                              MAX_RSPQ_ENTRIES))
2157                        return -EINVAL;
2158
2159                if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2160                        for_each_port(adapter, i) {
2161                                pi = adap2pinfo(adapter, i);
2162                                if (t.qset_idx >= pi->first_qset &&
2163                                    t.qset_idx < pi->first_qset + pi->nqsets &&
2164                                    !(pi->rx_offload & T3_RX_CSUM))
2165                                        return -EINVAL;
2166                        }
2167
2168                if ((adapter->flags & FULL_INIT_DONE) &&
2169                        (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2170                        t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2171                        t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2172                        t.polling >= 0 || t.cong_thres >= 0))
2173                        return -EBUSY;
2174
2175                /* Allow setting of any available qset when offload enabled */
2176                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2177                        q1 = 0;
2178                        for_each_port(adapter, i) {
2179                                pi = adap2pinfo(adapter, i);
2180                                nqsets += pi->first_qset + pi->nqsets;
2181                        }
2182                }
2183
2184                if (t.qset_idx < q1)
2185                        return -EINVAL;
2186                if (t.qset_idx > q1 + nqsets - 1)
2187                        return -EINVAL;
2188
2189                q = &adapter->params.sge.qset[t.qset_idx];
2190
2191                if (t.rspq_size >= 0)
2192                        q->rspq_size = t.rspq_size;
2193                if (t.fl_size[0] >= 0)
2194                        q->fl_size = t.fl_size[0];
2195                if (t.fl_size[1] >= 0)
2196                        q->jumbo_size = t.fl_size[1];
2197                if (t.txq_size[0] >= 0)
2198                        q->txq_size[0] = t.txq_size[0];
2199                if (t.txq_size[1] >= 0)
2200                        q->txq_size[1] = t.txq_size[1];
2201                if (t.txq_size[2] >= 0)
2202                        q->txq_size[2] = t.txq_size[2];
2203                if (t.cong_thres >= 0)
2204                        q->cong_thres = t.cong_thres;
2205                if (t.intr_lat >= 0) {
2206                        struct sge_qset *qs =
2207                                &adapter->sge.qs[t.qset_idx];
2208
2209                        q->coalesce_usecs = t.intr_lat;
2210                        t3_update_qset_coalesce(qs, q);
2211                }
2212                if (t.polling >= 0) {
2213                        if (adapter->flags & USING_MSIX)
2214                                q->polling = t.polling;
2215                        else {
2216                                /* No polling with INTx for T3A */
2217                                if (adapter->params.rev == 0 &&
2218                                        !(adapter->flags & USING_MSI))
2219                                        t.polling = 0;
2220
2221                                for (i = 0; i < SGE_QSETS; i++) {
2222                                        q = &adapter->params.sge.
2223                                                qset[i];
2224                                        q->polling = t.polling;
2225                                }
2226                        }
2227                }
2228                if (t.lro >= 0)
2229                        set_qset_lro(dev, t.qset_idx, t.lro);
2230
2231                break;
2232        }
2233        case CHELSIO_GET_QSET_PARAMS:{
2234                struct qset_params *q;
2235                struct ch_qset_params t;
2236                int q1 = pi->first_qset;
2237                int nqsets = pi->nqsets;
2238                int i;
2239
2240                if (copy_from_user(&t, useraddr, sizeof(t)))
2241                        return -EFAULT;
2242
2243                /* Display qsets for all ports when offload enabled */
2244                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2245                        q1 = 0;
2246                        for_each_port(adapter, i) {
2247                                pi = adap2pinfo(adapter, i);
2248                                nqsets = pi->first_qset + pi->nqsets;
2249                        }
2250                }
2251
2252                if (t.qset_idx >= nqsets)
2253                        return -EINVAL;
2254
2255                q = &adapter->params.sge.qset[q1 + t.qset_idx];
2256                t.rspq_size = q->rspq_size;
2257                t.txq_size[0] = q->txq_size[0];
2258                t.txq_size[1] = q->txq_size[1];
2259                t.txq_size[2] = q->txq_size[2];
2260                t.fl_size[0] = q->fl_size;
2261                t.fl_size[1] = q->jumbo_size;
2262                t.polling = q->polling;
2263                t.lro = q->lro;
2264                t.intr_lat = q->coalesce_usecs;
2265                t.cong_thres = q->cong_thres;
2266                t.qnum = q1;
2267
2268                if (adapter->flags & USING_MSIX)
2269                        t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2270                else
2271                        t.vector = adapter->pdev->irq;
2272
2273                if (copy_to_user(useraddr, &t, sizeof(t)))
2274                        return -EFAULT;
2275                break;
2276        }
2277        case CHELSIO_SET_QSET_NUM:{
2278                struct ch_reg edata;
2279                unsigned int i, first_qset = 0, other_qsets = 0;
2280
2281                if (!capable(CAP_NET_ADMIN))
2282                        return -EPERM;
2283                if (adapter->flags & FULL_INIT_DONE)
2284                        return -EBUSY;
2285                if (copy_from_user(&edata, useraddr, sizeof(edata)))
2286                        return -EFAULT;
2287                if (edata.val < 1 ||
2288                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2289                        return -EINVAL;
2290
2291                for_each_port(adapter, i)
2292                        if (adapter->port[i] && adapter->port[i] != dev)
2293                                other_qsets += adap2pinfo(adapter, i)->nqsets;
2294
2295                if (edata.val + other_qsets > SGE_QSETS)
2296                        return -EINVAL;
2297
2298                pi->nqsets = edata.val;
2299
2300                for_each_port(adapter, i)
2301                        if (adapter->port[i]) {
2302                                pi = adap2pinfo(adapter, i);
2303                                pi->first_qset = first_qset;
2304                                first_qset += pi->nqsets;
2305                        }
2306                break;
2307        }
2308        case CHELSIO_GET_QSET_NUM:{
2309                struct ch_reg edata;
2310
2311                memset(&edata, 0, sizeof(struct ch_reg));
2312
2313                edata.cmd = CHELSIO_GET_QSET_NUM;
2314                edata.val = pi->nqsets;
2315                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2316                        return -EFAULT;
2317                break;
2318        }
2319        case CHELSIO_LOAD_FW:{
2320                u8 *fw_data;
2321                struct ch_mem_range t;
2322
2323                if (!capable(CAP_SYS_RAWIO))
2324                        return -EPERM;
2325                if (copy_from_user(&t, useraddr, sizeof(t)))
2326                        return -EFAULT;
2327                /* Check t.len sanity ? */
2328                fw_data = memdup_user(useraddr + sizeof(t), t.len);
2329                if (IS_ERR(fw_data))
2330                        return PTR_ERR(fw_data);
2331
2332                ret = t3_load_fw(adapter, fw_data, t.len);
2333                kfree(fw_data);
2334                if (ret)
2335                        return ret;
2336                break;
2337        }
2338        case CHELSIO_SETMTUTAB:{
2339                struct ch_mtus m;
2340                int i;
2341
2342                if (!is_offload(adapter))
2343                        return -EOPNOTSUPP;
2344                if (!capable(CAP_NET_ADMIN))
2345                        return -EPERM;
2346                if (offload_running(adapter))
2347                        return -EBUSY;
2348                if (copy_from_user(&m, useraddr, sizeof(m)))
2349                        return -EFAULT;
2350                if (m.nmtus != NMTUS)
2351                        return -EINVAL;
2352                if (m.mtus[0] < 81)     /* accommodate SACK */
2353                        return -EINVAL;
2354
2355                /* MTUs must be in ascending order */
2356                for (i = 1; i < NMTUS; ++i)
2357                        if (m.mtus[i] < m.mtus[i - 1])
2358                                return -EINVAL;
2359
2360                memcpy(adapter->params.mtus, m.mtus,
2361                        sizeof(adapter->params.mtus));
2362                break;
2363        }
2364        case CHELSIO_GET_PM:{
2365                struct tp_params *p = &adapter->params.tp;
2366                struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2367
2368                if (!is_offload(adapter))
2369                        return -EOPNOTSUPP;
2370                m.tx_pg_sz = p->tx_pg_size;
2371                m.tx_num_pg = p->tx_num_pgs;
2372                m.rx_pg_sz = p->rx_pg_size;
2373                m.rx_num_pg = p->rx_num_pgs;
2374                m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2375                if (copy_to_user(useraddr, &m, sizeof(m)))
2376                        return -EFAULT;
2377                break;
2378        }
2379        case CHELSIO_SET_PM:{
2380                struct ch_pm m;
2381                struct tp_params *p = &adapter->params.tp;
2382
2383                if (!is_offload(adapter))
2384                        return -EOPNOTSUPP;
2385                if (!capable(CAP_NET_ADMIN))
2386                        return -EPERM;
2387                if (adapter->flags & FULL_INIT_DONE)
2388                        return -EBUSY;
2389                if (copy_from_user(&m, useraddr, sizeof(m)))
2390                        return -EFAULT;
2391                if (!is_power_of_2(m.rx_pg_sz) ||
2392                        !is_power_of_2(m.tx_pg_sz))
2393                        return -EINVAL; /* not power of 2 */
2394                if (!(m.rx_pg_sz & 0x14000))
2395                        return -EINVAL; /* not 16KB or 64KB */
2396                if (!(m.tx_pg_sz & 0x1554000))
2397                        return -EINVAL;
2398                if (m.tx_num_pg == -1)
2399                        m.tx_num_pg = p->tx_num_pgs;
2400                if (m.rx_num_pg == -1)
2401                        m.rx_num_pg = p->rx_num_pgs;
2402                if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2403                        return -EINVAL;
2404                if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2405                        m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2406                        return -EINVAL;
2407                p->rx_pg_size = m.rx_pg_sz;
2408                p->tx_pg_size = m.tx_pg_sz;
2409                p->rx_num_pgs = m.rx_num_pg;
2410                p->tx_num_pgs = m.tx_num_pg;
2411                break;
2412        }
2413        case CHELSIO_GET_MEM:{
2414                struct ch_mem_range t;
2415                struct mc7 *mem;
2416                u64 buf[32];
2417
2418                if (!is_offload(adapter))
2419                        return -EOPNOTSUPP;
2420                if (!(adapter->flags & FULL_INIT_DONE))
2421                        return -EIO;    /* need the memory controllers */
2422                if (copy_from_user(&t, useraddr, sizeof(t)))
2423                        return -EFAULT;
2424                if ((t.addr & 7) || (t.len & 7))
2425                        return -EINVAL;
2426                if (t.mem_id == MEM_CM)
2427                        mem = &adapter->cm;
2428                else if (t.mem_id == MEM_PMRX)
2429                        mem = &adapter->pmrx;
2430                else if (t.mem_id == MEM_PMTX)
2431                        mem = &adapter->pmtx;
2432                else
2433                        return -EINVAL;
2434
2435                /*
2436                 * Version scheme:
2437                 * bits 0..9: chip version
2438                 * bits 10..15: chip revision
2439                 */
2440                t.version = 3 | (adapter->params.rev << 10);
2441                if (copy_to_user(useraddr, &t, sizeof(t)))
2442                        return -EFAULT;
2443
2444                /*
2445                 * Read 256 bytes at a time as len can be large and we don't
2446                 * want to use huge intermediate buffers.
2447                 */
2448                useraddr += sizeof(t);  /* advance to start of buffer */
2449                while (t.len) {
2450                        unsigned int chunk =
2451                                min_t(unsigned int, t.len, sizeof(buf));
2452
2453                        ret =
2454                                t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2455                                                buf);
2456                        if (ret)
2457                                return ret;
2458                        if (copy_to_user(useraddr, buf, chunk))
2459                                return -EFAULT;
2460                        useraddr += chunk;
2461                        t.addr += chunk;
2462                        t.len -= chunk;
2463                }
2464                break;
2465        }
2466        case CHELSIO_SET_TRACE_FILTER:{
2467                struct ch_trace t;
2468                const struct trace_params *tp;
2469
2470                if (!capable(CAP_NET_ADMIN))
2471                        return -EPERM;
2472                if (!offload_running(adapter))
2473                        return -EAGAIN;
2474                if (copy_from_user(&t, useraddr, sizeof(t)))
2475                        return -EFAULT;
2476
2477                tp = (const struct trace_params *)&t.sip;
2478                if (t.config_tx)
2479                        t3_config_trace_filter(adapter, tp, 0,
2480                                                t.invert_match,
2481                                                t.trace_tx);
2482                if (t.config_rx)
2483                        t3_config_trace_filter(adapter, tp, 1,
2484                                                t.invert_match,
2485                                                t.trace_rx);
2486                break;
2487        }
2488        default:
2489                return -EOPNOTSUPP;
2490        }
2491        return 0;
2492}
2493
2494static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2495{
2496        struct mii_ioctl_data *data = if_mii(req);
2497        struct port_info *pi = netdev_priv(dev);
2498        struct adapter *adapter = pi->adapter;
2499
2500        switch (cmd) {
2501        case SIOCGMIIREG:
2502        case SIOCSMIIREG:
2503                /* Convert phy_id from older PRTAD/DEVAD format */
2504                if (is_10G(adapter) &&
2505                    !mdio_phy_id_is_c45(data->phy_id) &&
2506                    (data->phy_id & 0x1f00) &&
2507                    !(data->phy_id & 0xe0e0))
2508                        data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2509                                                       data->phy_id & 0x1f);
2510                /* FALLTHRU */
2511        case SIOCGMIIPHY:
2512                return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2513        case SIOCCHIOCTL:
2514                return cxgb_extension_ioctl(dev, req->ifr_data);
2515        default:
2516                return -EOPNOTSUPP;
2517        }
2518}
2519
2520static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2521{
2522        struct port_info *pi = netdev_priv(dev);
2523        struct adapter *adapter = pi->adapter;
2524        int ret;
2525
2526        if (new_mtu < 81)       /* accommodate SACK */
2527                return -EINVAL;
2528        if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2529                return ret;
2530        dev->mtu = new_mtu;
2531        init_port_mtus(adapter);
2532        if (adapter->params.rev == 0 && offload_running(adapter))
2533                t3_load_mtus(adapter, adapter->params.mtus,
2534                             adapter->params.a_wnd, adapter->params.b_wnd,
2535                             adapter->port[0]->mtu);
2536        return 0;
2537}
2538
2539static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2540{
2541        struct port_info *pi = netdev_priv(dev);
2542        struct adapter *adapter = pi->adapter;
2543        struct sockaddr *addr = p;
2544
2545        if (!is_valid_ether_addr(addr->sa_data))
2546                return -EINVAL;
2547
2548        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2549        t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2550        if (offload_running(adapter))
2551                write_smt_entry(adapter, pi->port_id);
2552        return 0;
2553}
2554
2555/**
2556 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2557 * @adap: the adapter
2558 * @p: the port
2559 *
2560 * Ensures that current Rx processing on any of the queues associated with
2561 * the given port completes before returning.  We do this by acquiring and
2562 * releasing the locks of the response queues associated with the port.
2563 */
2564static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2565{
2566        int i;
2567
2568        for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2569                struct sge_rspq *q = &adap->sge.qs[i].rspq;
2570
2571                spin_lock_irq(&q->lock);
2572                spin_unlock_irq(&q->lock);
2573        }
2574}
2575
2576static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2577{
2578        struct port_info *pi = netdev_priv(dev);
2579        struct adapter *adapter = pi->adapter;
2580
2581        pi->vlan_grp = grp;
2582        if (adapter->params.rev > 0)
2583                t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2584        else {
2585                /* single control for all ports */
2586                unsigned int i, have_vlans = 0;
2587                for_each_port(adapter, i)
2588                    have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2589
2590                t3_set_vlan_accel(adapter, 1, have_vlans);
2591        }
2592        t3_synchronize_rx(adapter, pi);
2593}
2594
2595#ifdef CONFIG_NET_POLL_CONTROLLER
2596static void cxgb_netpoll(struct net_device *dev)
2597{
2598        struct port_info *pi = netdev_priv(dev);
2599        struct adapter *adapter = pi->adapter;
2600        int qidx;
2601
2602        for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2603                struct sge_qset *qs = &adapter->sge.qs[qidx];
2604                void *source;
2605
2606                if (adapter->flags & USING_MSIX)
2607                        source = qs;
2608                else
2609                        source = adapter;
2610
2611                t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2612        }
2613}
2614#endif
2615
2616/*
2617 * Periodic accumulation of MAC statistics.
2618 */
2619static void mac_stats_update(struct adapter *adapter)
2620{
2621        int i;
2622
2623        for_each_port(adapter, i) {
2624                struct net_device *dev = adapter->port[i];
2625                struct port_info *p = netdev_priv(dev);
2626
2627                if (netif_running(dev)) {
2628                        spin_lock(&adapter->stats_lock);
2629                        t3_mac_update_stats(&p->mac);
2630                        spin_unlock(&adapter->stats_lock);
2631                }
2632        }
2633}
2634
2635static void check_link_status(struct adapter *adapter)
2636{
2637        int i;
2638
2639        for_each_port(adapter, i) {
2640                struct net_device *dev = adapter->port[i];
2641                struct port_info *p = netdev_priv(dev);
2642                int link_fault;
2643
2644                spin_lock_irq(&adapter->work_lock);
2645                link_fault = p->link_fault;
2646                spin_unlock_irq(&adapter->work_lock);
2647
2648                if (link_fault) {
2649                        t3_link_fault(adapter, i);
2650                        continue;
2651                }
2652
2653                if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2654                        t3_xgm_intr_disable(adapter, i);
2655                        t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2656
2657                        t3_link_changed(adapter, i);
2658                        t3_xgm_intr_enable(adapter, i);
2659                }
2660        }
2661}
2662
2663static void check_t3b2_mac(struct adapter *adapter)
2664{
2665        int i;
2666
2667        if (!rtnl_trylock())    /* synchronize with ifdown */
2668                return;
2669
2670        for_each_port(adapter, i) {
2671                struct net_device *dev = adapter->port[i];
2672                struct port_info *p = netdev_priv(dev);
2673                int status;
2674
2675                if (!netif_running(dev))
2676                        continue;
2677
2678                status = 0;
2679                if (netif_running(dev) && netif_carrier_ok(dev))
2680                        status = t3b2_mac_watchdog_task(&p->mac);
2681                if (status == 1)
2682                        p->mac.stats.num_toggled++;
2683                else if (status == 2) {
2684                        struct cmac *mac = &p->mac;
2685
2686                        t3_mac_set_mtu(mac, dev->mtu);
2687                        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2688                        cxgb_set_rxmode(dev);
2689                        t3_link_start(&p->phy, mac, &p->link_config);
2690                        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2691                        t3_port_intr_enable(adapter, p->port_id);
2692                        p->mac.stats.num_resets++;
2693                }
2694        }
2695        rtnl_unlock();
2696}
2697
2698
2699static void t3_adap_check_task(struct work_struct *work)
2700{
2701        struct adapter *adapter = container_of(work, struct adapter,
2702                                               adap_check_task.work);
2703        const struct adapter_params *p = &adapter->params;
2704        int port;
2705        unsigned int v, status, reset;
2706
2707        adapter->check_task_cnt++;
2708
2709        check_link_status(adapter);
2710
2711        /* Accumulate MAC stats if needed */
2712        if (!p->linkpoll_period ||
2713            (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2714            p->stats_update_period) {
2715                mac_stats_update(adapter);
2716                adapter->check_task_cnt = 0;
2717        }
2718
2719        if (p->rev == T3_REV_B2)
2720                check_t3b2_mac(adapter);
2721
2722        /*
2723         * Scan the XGMAC's to check for various conditions which we want to
2724         * monitor in a periodic polling manner rather than via an interrupt
2725         * condition.  This is used for conditions which would otherwise flood
2726         * the system with interrupts and we only really need to know that the
2727         * conditions are "happening" ...  For each condition we count the
2728         * detection of the condition and reset it for the next polling loop.
2729         */
2730        for_each_port(adapter, port) {
2731                struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2732                u32 cause;
2733
2734                cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2735                reset = 0;
2736                if (cause & F_RXFIFO_OVERFLOW) {
2737                        mac->stats.rx_fifo_ovfl++;
2738                        reset |= F_RXFIFO_OVERFLOW;
2739                }
2740
2741                t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2742        }
2743
2744        /*
2745         * We do the same as above for FL_EMPTY interrupts.
2746         */
2747        status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2748        reset = 0;
2749
2750        if (status & F_FLEMPTY) {
2751                struct sge_qset *qs = &adapter->sge.qs[0];
2752                int i = 0;
2753
2754                reset |= F_FLEMPTY;
2755
2756                v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2757                    0xffff;
2758
2759                while (v) {
2760                        qs->fl[i].empty += (v & 1);
2761                        if (i)
2762                                qs++;
2763                        i ^= 1;
2764                        v >>= 1;
2765                }
2766        }
2767
2768        t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2769
2770        /* Schedule the next check update if any port is active. */
2771        spin_lock_irq(&adapter->work_lock);
2772        if (adapter->open_device_map & PORT_MASK)
2773                schedule_chk_task(adapter);
2774        spin_unlock_irq(&adapter->work_lock);
2775}
2776
2777static void db_full_task(struct work_struct *work)
2778{
2779        struct adapter *adapter = container_of(work, struct adapter,
2780                                               db_full_task);
2781
2782        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2783}
2784
2785static void db_empty_task(struct work_struct *work)
2786{
2787        struct adapter *adapter = container_of(work, struct adapter,
2788                                               db_empty_task);
2789
2790        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2791}
2792
2793static void db_drop_task(struct work_struct *work)
2794{
2795        struct adapter *adapter = container_of(work, struct adapter,
2796                                               db_drop_task);
2797        unsigned long delay = 1000;
2798        unsigned short r;
2799
2800        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2801
2802        /*
2803         * Sleep a while before ringing the driver qset dbs.
2804         * The delay is between 1000-2023 usecs.
2805         */
2806        get_random_bytes(&r, 2);
2807        delay += r & 1023;
2808        set_current_state(TASK_UNINTERRUPTIBLE);
2809        schedule_timeout(usecs_to_jiffies(delay));
2810        ring_dbs(adapter);
2811}
2812
2813/*
2814 * Processes external (PHY) interrupts in process context.
2815 */
2816static void ext_intr_task(struct work_struct *work)
2817{
2818        struct adapter *adapter = container_of(work, struct adapter,
2819                                               ext_intr_handler_task);
2820        int i;
2821
2822        /* Disable link fault interrupts */
2823        for_each_port(adapter, i) {
2824                struct net_device *dev = adapter->port[i];
2825                struct port_info *p = netdev_priv(dev);
2826
2827                t3_xgm_intr_disable(adapter, i);
2828                t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2829        }
2830
2831        /* Re-enable link fault interrupts */
2832        t3_phy_intr_handler(adapter);
2833
2834        for_each_port(adapter, i)
2835                t3_xgm_intr_enable(adapter, i);
2836
2837        /* Now reenable external interrupts */
2838        spin_lock_irq(&adapter->work_lock);
2839        if (adapter->slow_intr_mask) {
2840                adapter->slow_intr_mask |= F_T3DBG;
2841                t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2842                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2843                             adapter->slow_intr_mask);
2844        }
2845        spin_unlock_irq(&adapter->work_lock);
2846}
2847
2848/*
2849 * Interrupt-context handler for external (PHY) interrupts.
2850 */
2851void t3_os_ext_intr_handler(struct adapter *adapter)
2852{
2853        /*
2854         * Schedule a task to handle external interrupts as they may be slow
2855         * and we use a mutex to protect MDIO registers.  We disable PHY
2856         * interrupts in the meantime and let the task reenable them when
2857         * it's done.
2858         */
2859        spin_lock(&adapter->work_lock);
2860        if (adapter->slow_intr_mask) {
2861                adapter->slow_intr_mask &= ~F_T3DBG;
2862                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2863                             adapter->slow_intr_mask);
2864                queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2865        }
2866        spin_unlock(&adapter->work_lock);
2867}
2868
2869void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2870{
2871        struct net_device *netdev = adapter->port[port_id];
2872        struct port_info *pi = netdev_priv(netdev);
2873
2874        spin_lock(&adapter->work_lock);
2875        pi->link_fault = 1;
2876        spin_unlock(&adapter->work_lock);
2877}
2878
2879static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2880{
2881        int i, ret = 0;
2882
2883        if (is_offload(adapter) &&
2884            test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2885                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2886                offload_close(&adapter->tdev);
2887        }
2888
2889        /* Stop all ports */
2890        for_each_port(adapter, i) {
2891                struct net_device *netdev = adapter->port[i];
2892
2893                if (netif_running(netdev))
2894                        __cxgb_close(netdev, on_wq);
2895        }
2896
2897        /* Stop SGE timers */
2898        t3_stop_sge_timers(adapter);
2899
2900        adapter->flags &= ~FULL_INIT_DONE;
2901
2902        if (reset)
2903                ret = t3_reset_adapter(adapter);
2904
2905        pci_disable_device(adapter->pdev);
2906
2907        return ret;
2908}
2909
2910static int t3_reenable_adapter(struct adapter *adapter)
2911{
2912        if (pci_enable_device(adapter->pdev)) {
2913                dev_err(&adapter->pdev->dev,
2914                        "Cannot re-enable PCI device after reset.\n");
2915                goto err;
2916        }
2917        pci_set_master(adapter->pdev);
2918        pci_restore_state(adapter->pdev);
2919        pci_save_state(adapter->pdev);
2920
2921        /* Free sge resources */
2922        t3_free_sge_resources(adapter);
2923
2924        if (t3_replay_prep_adapter(adapter))
2925                goto err;
2926
2927        return 0;
2928err:
2929        return -1;
2930}
2931
2932static void t3_resume_ports(struct adapter *adapter)
2933{
2934        int i;
2935
2936        /* Restart the ports */
2937        for_each_port(adapter, i) {
2938                struct net_device *netdev = adapter->port[i];
2939
2940                if (netif_running(netdev)) {
2941                        if (cxgb_open(netdev)) {
2942                                dev_err(&adapter->pdev->dev,
2943                                        "can't bring device back up"
2944                                        " after reset\n");
2945                                continue;
2946                        }
2947                }
2948        }
2949
2950        if (is_offload(adapter) && !ofld_disable)
2951                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2952}
2953
2954/*
2955 * processes a fatal error.
2956 * Bring the ports down, reset the chip, bring the ports back up.
2957 */
2958static void fatal_error_task(struct work_struct *work)
2959{
2960        struct adapter *adapter = container_of(work, struct adapter,
2961                                               fatal_error_handler_task);
2962        int err = 0;
2963
2964        rtnl_lock();
2965        err = t3_adapter_error(adapter, 1, 1);
2966        if (!err)
2967                err = t3_reenable_adapter(adapter);
2968        if (!err)
2969                t3_resume_ports(adapter);
2970
2971        CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2972        rtnl_unlock();
2973}
2974
2975void t3_fatal_err(struct adapter *adapter)
2976{
2977        unsigned int fw_status[4];
2978
2979        if (adapter->flags & FULL_INIT_DONE) {
2980                t3_sge_stop(adapter);
2981                t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2982                t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2983                t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2984                t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2985
2986                spin_lock(&adapter->work_lock);
2987                t3_intr_disable(adapter);
2988                queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2989                spin_unlock(&adapter->work_lock);
2990        }
2991        CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2992        if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2993                CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2994                         fw_status[0], fw_status[1],
2995                         fw_status[2], fw_status[3]);
2996}
2997
2998/**
2999 * t3_io_error_detected - called when PCI error is detected
3000 * @pdev: Pointer to PCI device
3001 * @state: The current pci connection state
3002 *
3003 * This function is called after a PCI bus error affecting
3004 * this device has been detected.
3005 */
3006static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3007                                             pci_channel_state_t state)
3008{
3009        struct adapter *adapter = pci_get_drvdata(pdev);
3010
3011        if (state == pci_channel_io_perm_failure)
3012                return PCI_ERS_RESULT_DISCONNECT;
3013
3014        t3_adapter_error(adapter, 0, 0);
3015
3016        /* Request a slot reset. */
3017        return PCI_ERS_RESULT_NEED_RESET;
3018}
3019
3020/**
3021 * t3_io_slot_reset - called after the pci bus has been reset.
3022 * @pdev: Pointer to PCI device
3023 *
3024 * Restart the card from scratch, as if from a cold-boot.
3025 */
3026static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3027{
3028        struct adapter *adapter = pci_get_drvdata(pdev);
3029
3030        if (!t3_reenable_adapter(adapter))
3031                return PCI_ERS_RESULT_RECOVERED;
3032
3033        return PCI_ERS_RESULT_DISCONNECT;
3034}
3035
3036/**
3037 * t3_io_resume - called when traffic can start flowing again.
3038 * @pdev: Pointer to PCI device
3039 *
3040 * This callback is called when the error recovery driver tells us that
3041 * its OK to resume normal operation.
3042 */
3043static void t3_io_resume(struct pci_dev *pdev)
3044{
3045        struct adapter *adapter = pci_get_drvdata(pdev);
3046
3047        CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3048                 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3049
3050        t3_resume_ports(adapter);
3051}
3052
3053static struct pci_error_handlers t3_err_handler = {
3054        .error_detected = t3_io_error_detected,
3055        .slot_reset = t3_io_slot_reset,
3056        .resume = t3_io_resume,
3057};
3058
3059/*
3060 * Set the number of qsets based on the number of CPUs and the number of ports,
3061 * not to exceed the number of available qsets, assuming there are enough qsets
3062 * per port in HW.
3063 */
3064static void set_nqsets(struct adapter *adap)
3065{
3066        int i, j = 0;
3067        int num_cpus = num_online_cpus();
3068        int hwports = adap->params.nports;
3069        int nqsets = adap->msix_nvectors - 1;
3070
3071        if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3072                if (hwports == 2 &&
3073                    (hwports * nqsets > SGE_QSETS ||
3074                     num_cpus >= nqsets / hwports))
3075                        nqsets /= hwports;
3076                if (nqsets > num_cpus)
3077                        nqsets = num_cpus;
3078                if (nqsets < 1 || hwports == 4)
3079                        nqsets = 1;
3080        } else
3081                nqsets = 1;
3082
3083        for_each_port(adap, i) {
3084                struct port_info *pi = adap2pinfo(adap, i);
3085
3086                pi->first_qset = j;
3087                pi->nqsets = nqsets;
3088                j = pi->first_qset + nqsets;
3089
3090                dev_info(&adap->pdev->dev,
3091                         "Port %d using %d queue sets.\n", i, nqsets);
3092        }
3093}
3094
3095static int __devinit cxgb_enable_msix(struct adapter *adap)
3096{
3097        struct msix_entry entries[SGE_QSETS + 1];
3098        int vectors;
3099        int i, err;
3100
3101        vectors = ARRAY_SIZE(entries);
3102        for (i = 0; i < vectors; ++i)
3103                entries[i].entry = i;
3104
3105        while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3106                vectors = err;
3107
3108        if (err < 0)
3109                pci_disable_msix(adap->pdev);
3110
3111        if (!err && vectors < (adap->params.nports + 1)) {
3112                pci_disable_msix(adap->pdev);
3113                err = -1;
3114        }
3115
3116        if (!err) {
3117                for (i = 0; i < vectors; ++i)
3118                        adap->msix_info[i].vec = entries[i].vector;
3119                adap->msix_nvectors = vectors;
3120        }
3121
3122        return err;
3123}
3124
3125static void __devinit print_port_info(struct adapter *adap,
3126                                      const struct adapter_info *ai)
3127{
3128        static const char *pci_variant[] = {
3129                "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3130        };
3131
3132        int i;
3133        char buf[80];
3134
3135        if (is_pcie(adap))
3136                snprintf(buf, sizeof(buf), "%s x%d",
3137                         pci_variant[adap->params.pci.variant],
3138                         adap->params.pci.width);
3139        else
3140                snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3141                         pci_variant[adap->params.pci.variant],
3142                         adap->params.pci.speed, adap->params.pci.width);
3143
3144        for_each_port(adap, i) {
3145                struct net_device *dev = adap->port[i];
3146                const struct port_info *pi = netdev_priv(dev);
3147
3148                if (!test_bit(i, &adap->registered_device_map))
3149                        continue;
3150                printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
3151                       dev->name, ai->desc, pi->phy.desc,
3152                       is_offload(adap) ? "R" : "", adap->params.rev, buf,
3153                       (adap->flags & USING_MSIX) ? " MSI-X" :
3154                       (adap->flags & USING_MSI) ? " MSI" : "");
3155                if (adap->name == dev->name && adap->params.vpd.mclk)
3156                        printk(KERN_INFO
3157                               "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3158                               adap->name, t3_mc7_size(&adap->cm) >> 20,
3159                               t3_mc7_size(&adap->pmtx) >> 20,
3160                               t3_mc7_size(&adap->pmrx) >> 20,
3161                               adap->params.vpd.sn);
3162        }
3163}
3164
3165static const struct net_device_ops cxgb_netdev_ops = {
3166        .ndo_open               = cxgb_open,
3167        .ndo_stop               = cxgb_close,
3168        .ndo_start_xmit         = t3_eth_xmit,
3169        .ndo_get_stats          = cxgb_get_stats,
3170        .ndo_validate_addr      = eth_validate_addr,
3171        .ndo_set_multicast_list = cxgb_set_rxmode,
3172        .ndo_do_ioctl           = cxgb_ioctl,
3173        .ndo_change_mtu         = cxgb_change_mtu,
3174        .ndo_set_mac_address    = cxgb_set_mac_addr,
3175        .ndo_vlan_rx_register   = vlan_rx_register,
3176#ifdef CONFIG_NET_POLL_CONTROLLER
3177        .ndo_poll_controller    = cxgb_netpoll,
3178#endif
3179};
3180
3181static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3182{
3183        struct port_info *pi = netdev_priv(dev);
3184
3185        memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3186        pi->iscsic.mac_addr[3] |= 0x80;
3187}
3188
3189static int __devinit init_one(struct pci_dev *pdev,
3190                              const struct pci_device_id *ent)
3191{
3192        static int version_printed;
3193
3194        int i, err, pci_using_dac = 0;
3195        resource_size_t mmio_start, mmio_len;
3196        const struct adapter_info *ai;
3197        struct adapter *adapter = NULL;
3198        struct port_info *pi;
3199
3200        if (!version_printed) {
3201                printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3202                ++version_printed;
3203        }
3204
3205        if (!cxgb3_wq) {
3206                cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3207                if (!cxgb3_wq) {
3208                        printk(KERN_ERR DRV_NAME
3209                               ": cannot initialize work queue\n");
3210                        return -ENOMEM;
3211                }
3212        }
3213
3214        err = pci_enable_device(pdev);
3215        if (err) {
3216                dev_err(&pdev->dev, "cannot enable PCI device\n");
3217                goto out;
3218        }
3219
3220        err = pci_request_regions(pdev, DRV_NAME);
3221        if (err) {
3222                /* Just info, some other driver may have claimed the device. */
3223                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3224                goto out_disable_device;
3225        }
3226
3227        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3228                pci_using_dac = 1;
3229                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3230                if (err) {
3231                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3232                               "coherent allocations\n");
3233                        goto out_release_regions;
3234                }
3235        } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3236                dev_err(&pdev->dev, "no usable DMA configuration\n");
3237                goto out_release_regions;
3238        }
3239
3240        pci_set_master(pdev);
3241        pci_save_state(pdev);
3242
3243        mmio_start = pci_resource_start(pdev, 0);
3244        mmio_len = pci_resource_len(pdev, 0);
3245        ai = t3_get_adapter_info(ent->driver_data);
3246
3247        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3248        if (!adapter) {
3249                err = -ENOMEM;
3250                goto out_release_regions;
3251        }
3252
3253        adapter->nofail_skb =
3254                alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3255        if (!adapter->nofail_skb) {
3256                dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3257                err = -ENOMEM;
3258                goto out_free_adapter;
3259        }
3260
3261        adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3262        if (!adapter->regs) {
3263                dev_err(&pdev->dev, "cannot map device registers\n");
3264                err = -ENOMEM;
3265                goto out_free_adapter;
3266        }
3267
3268        adapter->pdev = pdev;
3269        adapter->name = pci_name(pdev);
3270        adapter->msg_enable = dflt_msg_enable;
3271        adapter->mmio_len = mmio_len;
3272
3273        mutex_init(&adapter->mdio_lock);
3274        spin_lock_init(&adapter->work_lock);
3275        spin_lock_init(&adapter->stats_lock);
3276
3277        INIT_LIST_HEAD(&adapter->adapter_list);
3278        INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3279        INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3280
3281        INIT_WORK(&adapter->db_full_task, db_full_task);
3282        INIT_WORK(&adapter->db_empty_task, db_empty_task);
3283        INIT_WORK(&adapter->db_drop_task, db_drop_task);
3284
3285        INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3286
3287        for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3288                struct net_device *netdev;
3289
3290                netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3291                if (!netdev) {
3292                        err = -ENOMEM;
3293                        goto out_free_dev;
3294                }
3295
3296                SET_NETDEV_DEV(netdev, &pdev->dev);
3297
3298                adapter->port[i] = netdev;
3299                pi = netdev_priv(netdev);
3300                pi->adapter = adapter;
3301                pi->rx_offload = T3_RX_CSUM | T3_LRO;
3302                pi->port_id = i;
3303                netif_carrier_off(netdev);
3304                netdev->irq = pdev->irq;
3305                netdev->mem_start = mmio_start;
3306                netdev->mem_end = mmio_start + mmio_len - 1;
3307                netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3308                netdev->features |= NETIF_F_GRO;
3309                if (pci_using_dac)
3310                        netdev->features |= NETIF_F_HIGHDMA;
3311
3312                netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3313                netdev->netdev_ops = &cxgb_netdev_ops;
3314                SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3315        }
3316
3317        pci_set_drvdata(pdev, adapter);
3318        if (t3_prep_adapter(adapter, ai, 1) < 0) {
3319                err = -ENODEV;
3320                goto out_free_dev;
3321        }
3322
3323        /*
3324         * The card is now ready to go.  If any errors occur during device
3325         * registration we do not fail the whole card but rather proceed only
3326         * with the ports we manage to register successfully.  However we must
3327         * register at least one net device.
3328         */
3329        for_each_port(adapter, i) {
3330                err = register_netdev(adapter->port[i]);
3331                if (err)
3332                        dev_warn(&pdev->dev,
3333                                 "cannot register net device %s, skipping\n",
3334                                 adapter->port[i]->name);
3335                else {
3336                        /*
3337                         * Change the name we use for messages to the name of
3338                         * the first successfully registered interface.
3339                         */
3340                        if (!adapter->registered_device_map)
3341                                adapter->name = adapter->port[i]->name;
3342
3343                        __set_bit(i, &adapter->registered_device_map);
3344                }
3345        }
3346        if (!adapter->registered_device_map) {
3347                dev_err(&pdev->dev, "could not register any net devices\n");
3348                goto out_free_dev;
3349        }
3350
3351        for_each_port(adapter, i)
3352                cxgb3_init_iscsi_mac(adapter->port[i]);
3353
3354        /* Driver's ready. Reflect it on LEDs */
3355        t3_led_ready(adapter);
3356
3357        if (is_offload(adapter)) {
3358                __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3359                cxgb3_adapter_ofld(adapter);
3360        }
3361
3362        /* See what interrupts we'll be using */
3363        if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3364                adapter->flags |= USING_MSIX;
3365        else if (msi > 0 && pci_enable_msi(pdev) == 0)
3366                adapter->flags |= USING_MSI;
3367
3368        set_nqsets(adapter);
3369
3370        err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3371                                 &cxgb3_attr_group);
3372
3373        print_port_info(adapter, ai);
3374        return 0;
3375
3376out_free_dev:
3377        iounmap(adapter->regs);
3378        for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3379                if (adapter->port[i])
3380                        free_netdev(adapter->port[i]);
3381
3382out_free_adapter:
3383        kfree(adapter);
3384
3385out_release_regions:
3386        pci_release_regions(pdev);
3387out_disable_device:
3388        pci_disable_device(pdev);
3389        pci_set_drvdata(pdev, NULL);
3390out:
3391        return err;
3392}
3393
3394static void __devexit remove_one(struct pci_dev *pdev)
3395{
3396        struct adapter *adapter = pci_get_drvdata(pdev);
3397
3398        if (adapter) {
3399                int i;
3400
3401                t3_sge_stop(adapter);
3402                sysfs_remove_group(&adapter->port[0]->dev.kobj,
3403                                   &cxgb3_attr_group);
3404
3405                if (is_offload(adapter)) {
3406                        cxgb3_adapter_unofld(adapter);
3407                        if (test_bit(OFFLOAD_DEVMAP_BIT,
3408                                     &adapter->open_device_map))
3409                                offload_close(&adapter->tdev);
3410                }
3411
3412                for_each_port(adapter, i)
3413                    if (test_bit(i, &adapter->registered_device_map))
3414                        unregister_netdev(adapter->port[i]);
3415
3416                t3_stop_sge_timers(adapter);
3417                t3_free_sge_resources(adapter);
3418                cxgb_disable_msi(adapter);
3419
3420                for_each_port(adapter, i)
3421                        if (adapter->port[i])
3422                                free_netdev(adapter->port[i]);
3423
3424                iounmap(adapter->regs);
3425                if (adapter->nofail_skb)
3426                        kfree_skb(adapter->nofail_skb);
3427                kfree(adapter);
3428                pci_release_regions(pdev);
3429                pci_disable_device(pdev);
3430                pci_set_drvdata(pdev, NULL);
3431        }
3432}
3433
3434static struct pci_driver driver = {
3435        .name = DRV_NAME,
3436        .id_table = cxgb3_pci_tbl,
3437        .probe = init_one,
3438        .remove = __devexit_p(remove_one),
3439        .err_handler = &t3_err_handler,
3440};
3441
3442static int __init cxgb3_init_module(void)
3443{
3444        int ret;
3445
3446        cxgb3_offload_init();
3447
3448        ret = pci_register_driver(&driver);
3449        return ret;
3450}
3451
3452static void __exit cxgb3_cleanup_module(void)
3453{
3454        pci_unregister_driver(&driver);
3455        if (cxgb3_wq)
3456                destroy_workqueue(cxgb3_wq);
3457}
3458
3459module_init(cxgb3_init_module);
3460module_exit(cxgb3_cleanup_module);
3461