linux/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34
  35#include <linux/module.h>
  36#include <linux/moduleparam.h>
  37#include <linux/init.h>
  38#include <linux/pci.h>
  39#include <linux/dma-mapping.h>
  40#include <linux/netdevice.h>
  41#include <linux/etherdevice.h>
  42#include <linux/if_vlan.h>
  43#include <linux/mdio.h>
  44#include <linux/sockios.h>
  45#include <linux/workqueue.h>
  46#include <linux/proc_fs.h>
  47#include <linux/rtnetlink.h>
  48#include <linux/firmware.h>
  49#include <linux/log2.h>
  50#include <linux/stringify.h>
  51#include <linux/sched.h>
  52#include <linux/slab.h>
  53#include <asm/uaccess.h>
  54
  55#include "common.h"
  56#include "cxgb3_ioctl.h"
  57#include "regs.h"
  58#include "cxgb3_offload.h"
  59#include "version.h"
  60
  61#include "cxgb3_ctl_defs.h"
  62#include "t3_cpl.h"
  63#include "firmware_exports.h"
  64
  65enum {
  66        MAX_TXQ_ENTRIES = 16384,
  67        MAX_CTRL_TXQ_ENTRIES = 1024,
  68        MAX_RSPQ_ENTRIES = 16384,
  69        MAX_RX_BUFFERS = 16384,
  70        MAX_RX_JUMBO_BUFFERS = 16384,
  71        MIN_TXQ_ENTRIES = 4,
  72        MIN_CTRL_TXQ_ENTRIES = 4,
  73        MIN_RSPQ_ENTRIES = 32,
  74        MIN_FL_ENTRIES = 32
  75};
  76
  77#define PORT_MASK ((1 << MAX_NPORTS) - 1)
  78
  79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  80                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  81                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  82
  83#define EEPROM_MAGIC 0x38E2F10C
  84
  85#define CH_DEVICE(devid, idx) \
  86        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  87
  88static const struct pci_device_id cxgb3_pci_tbl[] = {
  89        CH_DEVICE(0x20, 0),     /* PE9000 */
  90        CH_DEVICE(0x21, 1),     /* T302E */
  91        CH_DEVICE(0x22, 2),     /* T310E */
  92        CH_DEVICE(0x23, 3),     /* T320X */
  93        CH_DEVICE(0x24, 1),     /* T302X */
  94        CH_DEVICE(0x25, 3),     /* T320E */
  95        CH_DEVICE(0x26, 2),     /* T310X */
  96        CH_DEVICE(0x30, 2),     /* T3B10 */
  97        CH_DEVICE(0x31, 3),     /* T3B20 */
  98        CH_DEVICE(0x32, 1),     /* T3B02 */
  99        CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
 100        CH_DEVICE(0x36, 3),     /* S320E-CR */
 101        CH_DEVICE(0x37, 7),     /* N320E-G2 */
 102        {0,}
 103};
 104
 105MODULE_DESCRIPTION(DRV_DESC);
 106MODULE_AUTHOR("Chelsio Communications");
 107MODULE_LICENSE("Dual BSD/GPL");
 108MODULE_VERSION(DRV_VERSION);
 109MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 110
 111static int dflt_msg_enable = DFLT_MSG_ENABLE;
 112
 113module_param(dflt_msg_enable, int, 0644);
 114MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 115
 116/*
 117 * The driver uses the best interrupt scheme available on a platform in the
 118 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 119 * of these schemes the driver may consider as follows:
 120 *
 121 * msi = 2: choose from among all three options
 122 * msi = 1: only consider MSI and pin interrupts
 123 * msi = 0: force pin interrupts
 124 */
 125static int msi = 2;
 126
 127module_param(msi, int, 0644);
 128MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 129
 130/*
 131 * The driver enables offload as a default.
 132 * To disable it, use ofld_disable = 1.
 133 */
 134
 135static int ofld_disable = 0;
 136
 137module_param(ofld_disable, int, 0644);
 138MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 139
 140/*
 141 * We have work elements that we need to cancel when an interface is taken
 142 * down.  Normally the work elements would be executed by keventd but that
 143 * can deadlock because of linkwatch.  If our close method takes the rtnl
 144 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 145 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 146 * for our work to complete.  Get our own work queue to solve this.
 147 */
 148struct workqueue_struct *cxgb3_wq;
 149
 150/**
 151 *      link_report - show link status and link speed/duplex
 152 *      @p: the port whose settings are to be reported
 153 *
 154 *      Shows the link status, speed, and duplex of a port.
 155 */
 156static void link_report(struct net_device *dev)
 157{
 158        if (!netif_carrier_ok(dev))
 159                netdev_info(dev, "link down\n");
 160        else {
 161                const char *s = "10Mbps";
 162                const struct port_info *p = netdev_priv(dev);
 163
 164                switch (p->link_config.speed) {
 165                case SPEED_10000:
 166                        s = "10Gbps";
 167                        break;
 168                case SPEED_1000:
 169                        s = "1000Mbps";
 170                        break;
 171                case SPEED_100:
 172                        s = "100Mbps";
 173                        break;
 174                }
 175
 176                netdev_info(dev, "link up, %s, %s-duplex\n",
 177                            s, p->link_config.duplex == DUPLEX_FULL
 178                            ? "full" : "half");
 179        }
 180}
 181
 182static void enable_tx_fifo_drain(struct adapter *adapter,
 183                                 struct port_info *pi)
 184{
 185        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
 186                         F_ENDROPPKT);
 187        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
 188        t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
 189        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
 190}
 191
 192static void disable_tx_fifo_drain(struct adapter *adapter,
 193                                  struct port_info *pi)
 194{
 195        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 196                         F_ENDROPPKT, 0);
 197}
 198
 199void t3_os_link_fault(struct adapter *adap, int port_id, int state)
 200{
 201        struct net_device *dev = adap->port[port_id];
 202        struct port_info *pi = netdev_priv(dev);
 203
 204        if (state == netif_carrier_ok(dev))
 205                return;
 206
 207        if (state) {
 208                struct cmac *mac = &pi->mac;
 209
 210                netif_carrier_on(dev);
 211
 212                disable_tx_fifo_drain(adap, pi);
 213
 214                /* Clear local faults */
 215                t3_xgm_intr_disable(adap, pi->port_id);
 216                t3_read_reg(adap, A_XGM_INT_STATUS +
 217                                    pi->mac.offset);
 218                t3_write_reg(adap,
 219                             A_XGM_INT_CAUSE + pi->mac.offset,
 220                             F_XGM_INT);
 221
 222                t3_set_reg_field(adap,
 223                                 A_XGM_INT_ENABLE +
 224                                 pi->mac.offset,
 225                                 F_XGM_INT, F_XGM_INT);
 226                t3_xgm_intr_enable(adap, pi->port_id);
 227
 228                t3_mac_enable(mac, MAC_DIRECTION_TX);
 229        } else {
 230                netif_carrier_off(dev);
 231
 232                /* Flush TX FIFO */
 233                enable_tx_fifo_drain(adap, pi);
 234        }
 235        link_report(dev);
 236}
 237
 238/**
 239 *      t3_os_link_changed - handle link status changes
 240 *      @adapter: the adapter associated with the link change
 241 *      @port_id: the port index whose limk status has changed
 242 *      @link_stat: the new status of the link
 243 *      @speed: the new speed setting
 244 *      @duplex: the new duplex setting
 245 *      @pause: the new flow-control setting
 246 *
 247 *      This is the OS-dependent handler for link status changes.  The OS
 248 *      neutral handler takes care of most of the processing for these events,
 249 *      then calls this handler for any OS-specific processing.
 250 */
 251void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 252                        int speed, int duplex, int pause)
 253{
 254        struct net_device *dev = adapter->port[port_id];
 255        struct port_info *pi = netdev_priv(dev);
 256        struct cmac *mac = &pi->mac;
 257
 258        /* Skip changes from disabled ports. */
 259        if (!netif_running(dev))
 260                return;
 261
 262        if (link_stat != netif_carrier_ok(dev)) {
 263                if (link_stat) {
 264                        disable_tx_fifo_drain(adapter, pi);
 265
 266                        t3_mac_enable(mac, MAC_DIRECTION_RX);
 267
 268                        /* Clear local faults */
 269                        t3_xgm_intr_disable(adapter, pi->port_id);
 270                        t3_read_reg(adapter, A_XGM_INT_STATUS +
 271                                    pi->mac.offset);
 272                        t3_write_reg(adapter,
 273                                     A_XGM_INT_CAUSE + pi->mac.offset,
 274                                     F_XGM_INT);
 275
 276                        t3_set_reg_field(adapter,
 277                                         A_XGM_INT_ENABLE + pi->mac.offset,
 278                                         F_XGM_INT, F_XGM_INT);
 279                        t3_xgm_intr_enable(adapter, pi->port_id);
 280
 281                        netif_carrier_on(dev);
 282                } else {
 283                        netif_carrier_off(dev);
 284
 285                        t3_xgm_intr_disable(adapter, pi->port_id);
 286                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 287                        t3_set_reg_field(adapter,
 288                                         A_XGM_INT_ENABLE + pi->mac.offset,
 289                                         F_XGM_INT, 0);
 290
 291                        if (is_10G(adapter))
 292                                pi->phy.ops->power_down(&pi->phy, 1);
 293
 294                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 295                        t3_mac_disable(mac, MAC_DIRECTION_RX);
 296                        t3_link_start(&pi->phy, mac, &pi->link_config);
 297
 298                        /* Flush TX FIFO */
 299                        enable_tx_fifo_drain(adapter, pi);
 300                }
 301
 302                link_report(dev);
 303        }
 304}
 305
 306/**
 307 *      t3_os_phymod_changed - handle PHY module changes
 308 *      @phy: the PHY reporting the module change
 309 *      @mod_type: new module type
 310 *
 311 *      This is the OS-dependent handler for PHY module changes.  It is
 312 *      invoked when a PHY module is removed or inserted for any OS-specific
 313 *      processing.
 314 */
 315void t3_os_phymod_changed(struct adapter *adap, int port_id)
 316{
 317        static const char *mod_str[] = {
 318                NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 319        };
 320
 321        const struct net_device *dev = adap->port[port_id];
 322        const struct port_info *pi = netdev_priv(dev);
 323
 324        if (pi->phy.modtype == phy_modtype_none)
 325                netdev_info(dev, "PHY module unplugged\n");
 326        else
 327                netdev_info(dev, "%s PHY module inserted\n",
 328                            mod_str[pi->phy.modtype]);
 329}
 330
 331static void cxgb_set_rxmode(struct net_device *dev)
 332{
 333        struct port_info *pi = netdev_priv(dev);
 334
 335        t3_mac_set_rx_mode(&pi->mac, dev);
 336}
 337
 338/**
 339 *      link_start - enable a port
 340 *      @dev: the device to enable
 341 *
 342 *      Performs the MAC and PHY actions needed to enable a port.
 343 */
 344static void link_start(struct net_device *dev)
 345{
 346        struct port_info *pi = netdev_priv(dev);
 347        struct cmac *mac = &pi->mac;
 348
 349        t3_mac_reset(mac);
 350        t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 351        t3_mac_set_mtu(mac, dev->mtu);
 352        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 353        t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 354        t3_mac_set_rx_mode(mac, dev);
 355        t3_link_start(&pi->phy, mac, &pi->link_config);
 356        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 357}
 358
 359static inline void cxgb_disable_msi(struct adapter *adapter)
 360{
 361        if (adapter->flags & USING_MSIX) {
 362                pci_disable_msix(adapter->pdev);
 363                adapter->flags &= ~USING_MSIX;
 364        } else if (adapter->flags & USING_MSI) {
 365                pci_disable_msi(adapter->pdev);
 366                adapter->flags &= ~USING_MSI;
 367        }
 368}
 369
 370/*
 371 * Interrupt handler for asynchronous events used with MSI-X.
 372 */
 373static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 374{
 375        t3_slow_intr_handler(cookie);
 376        return IRQ_HANDLED;
 377}
 378
 379/*
 380 * Name the MSI-X interrupts.
 381 */
 382static void name_msix_vecs(struct adapter *adap)
 383{
 384        int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 385
 386        snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 387        adap->msix_info[0].desc[n] = 0;
 388
 389        for_each_port(adap, j) {
 390                struct net_device *d = adap->port[j];
 391                const struct port_info *pi = netdev_priv(d);
 392
 393                for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 394                        snprintf(adap->msix_info[msi_idx].desc, n,
 395                                 "%s-%d", d->name, pi->first_qset + i);
 396                        adap->msix_info[msi_idx].desc[n] = 0;
 397                }
 398        }
 399}
 400
 401static int request_msix_data_irqs(struct adapter *adap)
 402{
 403        int i, j, err, qidx = 0;
 404
 405        for_each_port(adap, i) {
 406                int nqsets = adap2pinfo(adap, i)->nqsets;
 407
 408                for (j = 0; j < nqsets; ++j) {
 409                        err = request_irq(adap->msix_info[qidx + 1].vec,
 410                                          t3_intr_handler(adap,
 411                                                          adap->sge.qs[qidx].
 412                                                          rspq.polling), 0,
 413                                          adap->msix_info[qidx + 1].desc,
 414                                          &adap->sge.qs[qidx]);
 415                        if (err) {
 416                                while (--qidx >= 0)
 417                                        free_irq(adap->msix_info[qidx + 1].vec,
 418                                                 &adap->sge.qs[qidx]);
 419                                return err;
 420                        }
 421                        qidx++;
 422                }
 423        }
 424        return 0;
 425}
 426
 427static void free_irq_resources(struct adapter *adapter)
 428{
 429        if (adapter->flags & USING_MSIX) {
 430                int i, n = 0;
 431
 432                free_irq(adapter->msix_info[0].vec, adapter);
 433                for_each_port(adapter, i)
 434                        n += adap2pinfo(adapter, i)->nqsets;
 435
 436                for (i = 0; i < n; ++i)
 437                        free_irq(adapter->msix_info[i + 1].vec,
 438                                 &adapter->sge.qs[i]);
 439        } else
 440                free_irq(adapter->pdev->irq, adapter);
 441}
 442
 443static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 444                              unsigned long n)
 445{
 446        int attempts = 10;
 447
 448        while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 449                if (!--attempts)
 450                        return -ETIMEDOUT;
 451                msleep(10);
 452        }
 453        return 0;
 454}
 455
 456static int init_tp_parity(struct adapter *adap)
 457{
 458        int i;
 459        struct sk_buff *skb;
 460        struct cpl_set_tcb_field *greq;
 461        unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 462
 463        t3_tp_set_offload_mode(adap, 1);
 464
 465        for (i = 0; i < 16; i++) {
 466                struct cpl_smt_write_req *req;
 467
 468                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 469                if (!skb)
 470                        skb = adap->nofail_skb;
 471                if (!skb)
 472                        goto alloc_skb_fail;
 473
 474                req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
 475                memset(req, 0, sizeof(*req));
 476                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 477                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 478                req->mtu_idx = NMTUS - 1;
 479                req->iff = i;
 480                t3_mgmt_tx(adap, skb);
 481                if (skb == adap->nofail_skb) {
 482                        await_mgmt_replies(adap, cnt, i + 1);
 483                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 484                        if (!adap->nofail_skb)
 485                                goto alloc_skb_fail;
 486                }
 487        }
 488
 489        for (i = 0; i < 2048; i++) {
 490                struct cpl_l2t_write_req *req;
 491
 492                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 493                if (!skb)
 494                        skb = adap->nofail_skb;
 495                if (!skb)
 496                        goto alloc_skb_fail;
 497
 498                req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
 499                memset(req, 0, sizeof(*req));
 500                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 501                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 502                req->params = htonl(V_L2T_W_IDX(i));
 503                t3_mgmt_tx(adap, skb);
 504                if (skb == adap->nofail_skb) {
 505                        await_mgmt_replies(adap, cnt, 16 + i + 1);
 506                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 507                        if (!adap->nofail_skb)
 508                                goto alloc_skb_fail;
 509                }
 510        }
 511
 512        for (i = 0; i < 2048; i++) {
 513                struct cpl_rte_write_req *req;
 514
 515                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 516                if (!skb)
 517                        skb = adap->nofail_skb;
 518                if (!skb)
 519                        goto alloc_skb_fail;
 520
 521                req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
 522                memset(req, 0, sizeof(*req));
 523                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 524                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 525                req->l2t_idx = htonl(V_L2T_W_IDX(i));
 526                t3_mgmt_tx(adap, skb);
 527                if (skb == adap->nofail_skb) {
 528                        await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
 529                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 530                        if (!adap->nofail_skb)
 531                                goto alloc_skb_fail;
 532                }
 533        }
 534
 535        skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 536        if (!skb)
 537                skb = adap->nofail_skb;
 538        if (!skb)
 539                goto alloc_skb_fail;
 540
 541        greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
 542        memset(greq, 0, sizeof(*greq));
 543        greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 544        OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 545        greq->mask = cpu_to_be64(1);
 546        t3_mgmt_tx(adap, skb);
 547
 548        i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 549        if (skb == adap->nofail_skb) {
 550                i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 551                adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 552        }
 553
 554        t3_tp_set_offload_mode(adap, 0);
 555        return i;
 556
 557alloc_skb_fail:
 558        t3_tp_set_offload_mode(adap, 0);
 559        return -ENOMEM;
 560}
 561
 562/**
 563 *      setup_rss - configure RSS
 564 *      @adap: the adapter
 565 *
 566 *      Sets up RSS to distribute packets to multiple receive queues.  We
 567 *      configure the RSS CPU lookup table to distribute to the number of HW
 568 *      receive queues, and the response queue lookup table to narrow that
 569 *      down to the response queues actually configured for each port.
 570 *      We always configure the RSS mapping for two ports since the mapping
 571 *      table has plenty of entries.
 572 */
 573static void setup_rss(struct adapter *adap)
 574{
 575        int i;
 576        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 577        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 578        u8 cpus[SGE_QSETS + 1];
 579        u16 rspq_map[RSS_TABLE_SIZE + 1];
 580
 581        for (i = 0; i < SGE_QSETS; ++i)
 582                cpus[i] = i;
 583        cpus[SGE_QSETS] = 0xff; /* terminator */
 584
 585        for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 586                rspq_map[i] = i % nq0;
 587                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 588        }
 589        rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 590
 591        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 592                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 593                      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 594}
 595
 596static void ring_dbs(struct adapter *adap)
 597{
 598        int i, j;
 599
 600        for (i = 0; i < SGE_QSETS; i++) {
 601                struct sge_qset *qs = &adap->sge.qs[i];
 602
 603                if (qs->adap)
 604                        for (j = 0; j < SGE_TXQ_PER_SET; j++)
 605                                t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
 606        }
 607}
 608
 609static void init_napi(struct adapter *adap)
 610{
 611        int i;
 612
 613        for (i = 0; i < SGE_QSETS; i++) {
 614                struct sge_qset *qs = &adap->sge.qs[i];
 615
 616                if (qs->adap)
 617                        netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 618                                       64);
 619        }
 620
 621        /*
 622         * netif_napi_add() can be called only once per napi_struct because it
 623         * adds each new napi_struct to a list.  Be careful not to call it a
 624         * second time, e.g., during EEH recovery, by making a note of it.
 625         */
 626        adap->flags |= NAPI_INIT;
 627}
 628
 629/*
 630 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 631 * both netdevices representing interfaces and the dummy ones for the extra
 632 * queues.
 633 */
 634static void quiesce_rx(struct adapter *adap)
 635{
 636        int i;
 637
 638        for (i = 0; i < SGE_QSETS; i++)
 639                if (adap->sge.qs[i].adap)
 640                        napi_disable(&adap->sge.qs[i].napi);
 641}
 642
 643static void enable_all_napi(struct adapter *adap)
 644{
 645        int i;
 646        for (i = 0; i < SGE_QSETS; i++)
 647                if (adap->sge.qs[i].adap)
 648                        napi_enable(&adap->sge.qs[i].napi);
 649}
 650
 651/**
 652 *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 653 *      @adap: the adapter
 654 *
 655 *      Determines how many sets of SGE queues to use and initializes them.
 656 *      We support multiple queue sets per port if we have MSI-X, otherwise
 657 *      just one queue set per port.
 658 */
 659static int setup_sge_qsets(struct adapter *adap)
 660{
 661        int i, j, err, irq_idx = 0, qset_idx = 0;
 662        unsigned int ntxq = SGE_TXQ_PER_SET;
 663
 664        if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 665                irq_idx = -1;
 666
 667        for_each_port(adap, i) {
 668                struct net_device *dev = adap->port[i];
 669                struct port_info *pi = netdev_priv(dev);
 670
 671                pi->qs = &adap->sge.qs[pi->first_qset];
 672                for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 673                        err = t3_sge_alloc_qset(adap, qset_idx, 1,
 674                                (adap->flags & USING_MSIX) ? qset_idx + 1 :
 675                                                             irq_idx,
 676                                &adap->params.sge.qset[qset_idx], ntxq, dev,
 677                                netdev_get_tx_queue(dev, j));
 678                        if (err) {
 679                                t3_free_sge_resources(adap);
 680                                return err;
 681                        }
 682                }
 683        }
 684
 685        return 0;
 686}
 687
 688static ssize_t attr_show(struct device *d, char *buf,
 689                         ssize_t(*format) (struct net_device *, char *))
 690{
 691        ssize_t len;
 692
 693        /* Synchronize with ioctls that may shut down the device */
 694        rtnl_lock();
 695        len = (*format) (to_net_dev(d), buf);
 696        rtnl_unlock();
 697        return len;
 698}
 699
 700static ssize_t attr_store(struct device *d,
 701                          const char *buf, size_t len,
 702                          ssize_t(*set) (struct net_device *, unsigned int),
 703                          unsigned int min_val, unsigned int max_val)
 704{
 705        ssize_t ret;
 706        unsigned int val;
 707
 708        if (!capable(CAP_NET_ADMIN))
 709                return -EPERM;
 710
 711        ret = kstrtouint(buf, 0, &val);
 712        if (ret)
 713                return ret;
 714        if (val < min_val || val > max_val)
 715                return -EINVAL;
 716
 717        rtnl_lock();
 718        ret = (*set) (to_net_dev(d), val);
 719        if (!ret)
 720                ret = len;
 721        rtnl_unlock();
 722        return ret;
 723}
 724
 725#define CXGB3_SHOW(name, val_expr) \
 726static ssize_t format_##name(struct net_device *dev, char *buf) \
 727{ \
 728        struct port_info *pi = netdev_priv(dev); \
 729        struct adapter *adap = pi->adapter; \
 730        return sprintf(buf, "%u\n", val_expr); \
 731} \
 732static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 733                           char *buf) \
 734{ \
 735        return attr_show(d, buf, format_##name); \
 736}
 737
 738static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 739{
 740        struct port_info *pi = netdev_priv(dev);
 741        struct adapter *adap = pi->adapter;
 742        int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 743
 744        if (adap->flags & FULL_INIT_DONE)
 745                return -EBUSY;
 746        if (val && adap->params.rev == 0)
 747                return -EINVAL;
 748        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 749            min_tids)
 750                return -EINVAL;
 751        adap->params.mc5.nfilters = val;
 752        return 0;
 753}
 754
 755static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 756                              const char *buf, size_t len)
 757{
 758        return attr_store(d, buf, len, set_nfilters, 0, ~0);
 759}
 760
 761static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 762{
 763        struct port_info *pi = netdev_priv(dev);
 764        struct adapter *adap = pi->adapter;
 765
 766        if (adap->flags & FULL_INIT_DONE)
 767                return -EBUSY;
 768        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 769            MC5_MIN_TIDS)
 770                return -EINVAL;
 771        adap->params.mc5.nservers = val;
 772        return 0;
 773}
 774
 775static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 776                              const char *buf, size_t len)
 777{
 778        return attr_store(d, buf, len, set_nservers, 0, ~0);
 779}
 780
 781#define CXGB3_ATTR_R(name, val_expr) \
 782CXGB3_SHOW(name, val_expr) \
 783static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 784
 785#define CXGB3_ATTR_RW(name, val_expr, store_method) \
 786CXGB3_SHOW(name, val_expr) \
 787static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
 788
 789CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 790CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 791CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 792
 793static struct attribute *cxgb3_attrs[] = {
 794        &dev_attr_cam_size.attr,
 795        &dev_attr_nfilters.attr,
 796        &dev_attr_nservers.attr,
 797        NULL
 798};
 799
 800static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
 801
 802static ssize_t tm_attr_show(struct device *d,
 803                            char *buf, int sched)
 804{
 805        struct port_info *pi = netdev_priv(to_net_dev(d));
 806        struct adapter *adap = pi->adapter;
 807        unsigned int v, addr, bpt, cpt;
 808        ssize_t len;
 809
 810        addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 811        rtnl_lock();
 812        t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 813        v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 814        if (sched & 1)
 815                v >>= 16;
 816        bpt = (v >> 8) & 0xff;
 817        cpt = v & 0xff;
 818        if (!cpt)
 819                len = sprintf(buf, "disabled\n");
 820        else {
 821                v = (adap->params.vpd.cclk * 1000) / cpt;
 822                len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 823        }
 824        rtnl_unlock();
 825        return len;
 826}
 827
 828static ssize_t tm_attr_store(struct device *d,
 829                             const char *buf, size_t len, int sched)
 830{
 831        struct port_info *pi = netdev_priv(to_net_dev(d));
 832        struct adapter *adap = pi->adapter;
 833        unsigned int val;
 834        ssize_t ret;
 835
 836        if (!capable(CAP_NET_ADMIN))
 837                return -EPERM;
 838
 839        ret = kstrtouint(buf, 0, &val);
 840        if (ret)
 841                return ret;
 842        if (val > 10000000)
 843                return -EINVAL;
 844
 845        rtnl_lock();
 846        ret = t3_config_sched(adap, val, sched);
 847        if (!ret)
 848                ret = len;
 849        rtnl_unlock();
 850        return ret;
 851}
 852
 853#define TM_ATTR(name, sched) \
 854static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 855                           char *buf) \
 856{ \
 857        return tm_attr_show(d, buf, sched); \
 858} \
 859static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 860                            const char *buf, size_t len) \
 861{ \
 862        return tm_attr_store(d, buf, len, sched); \
 863} \
 864static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
 865
 866TM_ATTR(sched0, 0);
 867TM_ATTR(sched1, 1);
 868TM_ATTR(sched2, 2);
 869TM_ATTR(sched3, 3);
 870TM_ATTR(sched4, 4);
 871TM_ATTR(sched5, 5);
 872TM_ATTR(sched6, 6);
 873TM_ATTR(sched7, 7);
 874
 875static struct attribute *offload_attrs[] = {
 876        &dev_attr_sched0.attr,
 877        &dev_attr_sched1.attr,
 878        &dev_attr_sched2.attr,
 879        &dev_attr_sched3.attr,
 880        &dev_attr_sched4.attr,
 881        &dev_attr_sched5.attr,
 882        &dev_attr_sched6.attr,
 883        &dev_attr_sched7.attr,
 884        NULL
 885};
 886
 887static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
 888
 889/*
 890 * Sends an sk_buff to an offload queue driver
 891 * after dealing with any active network taps.
 892 */
 893static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 894{
 895        int ret;
 896
 897        local_bh_disable();
 898        ret = t3_offload_tx(tdev, skb);
 899        local_bh_enable();
 900        return ret;
 901}
 902
 903static int write_smt_entry(struct adapter *adapter, int idx)
 904{
 905        struct cpl_smt_write_req *req;
 906        struct port_info *pi = netdev_priv(adapter->port[idx]);
 907        struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 908
 909        if (!skb)
 910                return -ENOMEM;
 911
 912        req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
 913        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 914        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 915        req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 916        req->iff = idx;
 917        memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 918        memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 919        skb->priority = 1;
 920        offload_tx(&adapter->tdev, skb);
 921        return 0;
 922}
 923
 924static int init_smt(struct adapter *adapter)
 925{
 926        int i;
 927
 928        for_each_port(adapter, i)
 929            write_smt_entry(adapter, i);
 930        return 0;
 931}
 932
 933static void init_port_mtus(struct adapter *adapter)
 934{
 935        unsigned int mtus = adapter->port[0]->mtu;
 936
 937        if (adapter->port[1])
 938                mtus |= adapter->port[1]->mtu << 16;
 939        t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 940}
 941
 942static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 943                              int hi, int port)
 944{
 945        struct sk_buff *skb;
 946        struct mngt_pktsched_wr *req;
 947        int ret;
 948
 949        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 950        if (!skb)
 951                skb = adap->nofail_skb;
 952        if (!skb)
 953                return -ENOMEM;
 954
 955        req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
 956        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 957        req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 958        req->sched = sched;
 959        req->idx = qidx;
 960        req->min = lo;
 961        req->max = hi;
 962        req->binding = port;
 963        ret = t3_mgmt_tx(adap, skb);
 964        if (skb == adap->nofail_skb) {
 965                adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
 966                                             GFP_KERNEL);
 967                if (!adap->nofail_skb)
 968                        ret = -ENOMEM;
 969        }
 970
 971        return ret;
 972}
 973
 974static int bind_qsets(struct adapter *adap)
 975{
 976        int i, j, err = 0;
 977
 978        for_each_port(adap, i) {
 979                const struct port_info *pi = adap2pinfo(adap, i);
 980
 981                for (j = 0; j < pi->nqsets; ++j) {
 982                        int ret = send_pktsched_cmd(adap, 1,
 983                                                    pi->first_qset + j, -1,
 984                                                    -1, i);
 985                        if (ret)
 986                                err = ret;
 987                }
 988        }
 989
 990        return err;
 991}
 992
 993#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
 994        __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
 995#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
 996#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
 997        __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
 998#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
 999#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1000#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1001#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1002MODULE_FIRMWARE(FW_FNAME);
1003MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1004MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1005MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1006MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1007MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1008
1009static inline const char *get_edc_fw_name(int edc_idx)
1010{
1011        const char *fw_name = NULL;
1012
1013        switch (edc_idx) {
1014        case EDC_OPT_AEL2005:
1015                fw_name = AEL2005_OPT_EDC_NAME;
1016                break;
1017        case EDC_TWX_AEL2005:
1018                fw_name = AEL2005_TWX_EDC_NAME;
1019                break;
1020        case EDC_TWX_AEL2020:
1021                fw_name = AEL2020_TWX_EDC_NAME;
1022                break;
1023        }
1024        return fw_name;
1025}
1026
1027int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1028{
1029        struct adapter *adapter = phy->adapter;
1030        const struct firmware *fw;
1031        const char *fw_name;
1032        u32 csum;
1033        const __be32 *p;
1034        u16 *cache = phy->phy_cache;
1035        int i, ret = -EINVAL;
1036
1037        fw_name = get_edc_fw_name(edc_idx);
1038        if (fw_name)
1039                ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1040        if (ret < 0) {
1041                dev_err(&adapter->pdev->dev,
1042                        "could not upgrade firmware: unable to load %s\n",
1043                        fw_name);
1044                return ret;
1045        }
1046
1047        /* check size, take checksum in account */
1048        if (fw->size > size + 4) {
1049                CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1050                       (unsigned int)fw->size, size + 4);
1051                ret = -EINVAL;
1052        }
1053
1054        /* compute checksum */
1055        p = (const __be32 *)fw->data;
1056        for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1057                csum += ntohl(p[i]);
1058
1059        if (csum != 0xffffffff) {
1060                CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1061                       csum);
1062                ret = -EINVAL;
1063        }
1064
1065        for (i = 0; i < size / 4 ; i++) {
1066                *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1067                *cache++ = be32_to_cpu(p[i]) & 0xffff;
1068        }
1069
1070        release_firmware(fw);
1071
1072        return ret;
1073}
1074
1075static int upgrade_fw(struct adapter *adap)
1076{
1077        int ret;
1078        const struct firmware *fw;
1079        struct device *dev = &adap->pdev->dev;
1080
1081        ret = request_firmware(&fw, FW_FNAME, dev);
1082        if (ret < 0) {
1083                dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1084                        FW_FNAME);
1085                return ret;
1086        }
1087        ret = t3_load_fw(adap, fw->data, fw->size);
1088        release_firmware(fw);
1089
1090        if (ret == 0)
1091                dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1092                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1093        else
1094                dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1095                        FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1096
1097        return ret;
1098}
1099
1100static inline char t3rev2char(struct adapter *adapter)
1101{
1102        char rev = 0;
1103
1104        switch(adapter->params.rev) {
1105        case T3_REV_B:
1106        case T3_REV_B2:
1107                rev = 'b';
1108                break;
1109        case T3_REV_C:
1110                rev = 'c';
1111                break;
1112        }
1113        return rev;
1114}
1115
1116static int update_tpsram(struct adapter *adap)
1117{
1118        const struct firmware *tpsram;
1119        char buf[64];
1120        struct device *dev = &adap->pdev->dev;
1121        int ret;
1122        char rev;
1123
1124        rev = t3rev2char(adap);
1125        if (!rev)
1126                return 0;
1127
1128        snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1129
1130        ret = request_firmware(&tpsram, buf, dev);
1131        if (ret < 0) {
1132                dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1133                        buf);
1134                return ret;
1135        }
1136
1137        ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1138        if (ret)
1139                goto release_tpsram;
1140
1141        ret = t3_set_proto_sram(adap, tpsram->data);
1142        if (ret == 0)
1143                dev_info(dev,
1144                         "successful update of protocol engine "
1145                         "to %d.%d.%d\n",
1146                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1147        else
1148                dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1149                        TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1150        if (ret)
1151                dev_err(dev, "loading protocol SRAM failed\n");
1152
1153release_tpsram:
1154        release_firmware(tpsram);
1155
1156        return ret;
1157}
1158
1159/**
1160 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1161 * @adap: the adapter
1162 * @p: the port
1163 *
1164 * Ensures that current Rx processing on any of the queues associated with
1165 * the given port completes before returning.  We do this by acquiring and
1166 * releasing the locks of the response queues associated with the port.
1167 */
1168static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1169{
1170        int i;
1171
1172        for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1173                struct sge_rspq *q = &adap->sge.qs[i].rspq;
1174
1175                spin_lock_irq(&q->lock);
1176                spin_unlock_irq(&q->lock);
1177        }
1178}
1179
1180static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1181{
1182        struct port_info *pi = netdev_priv(dev);
1183        struct adapter *adapter = pi->adapter;
1184
1185        if (adapter->params.rev > 0) {
1186                t3_set_vlan_accel(adapter, 1 << pi->port_id,
1187                                  features & NETIF_F_HW_VLAN_CTAG_RX);
1188        } else {
1189                /* single control for all ports */
1190                unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1191
1192                for_each_port(adapter, i)
1193                        have_vlans |=
1194                                adapter->port[i]->features &
1195                                NETIF_F_HW_VLAN_CTAG_RX;
1196
1197                t3_set_vlan_accel(adapter, 1, have_vlans);
1198        }
1199        t3_synchronize_rx(adapter, pi);
1200}
1201
1202/**
1203 *      cxgb_up - enable the adapter
1204 *      @adapter: adapter being enabled
1205 *
1206 *      Called when the first port is enabled, this function performs the
1207 *      actions necessary to make an adapter operational, such as completing
1208 *      the initialization of HW modules, and enabling interrupts.
1209 *
1210 *      Must be called with the rtnl lock held.
1211 */
1212static int cxgb_up(struct adapter *adap)
1213{
1214        int i, err;
1215
1216        if (!(adap->flags & FULL_INIT_DONE)) {
1217                err = t3_check_fw_version(adap);
1218                if (err == -EINVAL) {
1219                        err = upgrade_fw(adap);
1220                        CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1221                                FW_VERSION_MAJOR, FW_VERSION_MINOR,
1222                                FW_VERSION_MICRO, err ? "failed" : "succeeded");
1223                }
1224
1225                err = t3_check_tpsram_version(adap);
1226                if (err == -EINVAL) {
1227                        err = update_tpsram(adap);
1228                        CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1229                                TP_VERSION_MAJOR, TP_VERSION_MINOR,
1230                                TP_VERSION_MICRO, err ? "failed" : "succeeded");
1231                }
1232
1233                /*
1234                 * Clear interrupts now to catch errors if t3_init_hw fails.
1235                 * We clear them again later as initialization may trigger
1236                 * conditions that can interrupt.
1237                 */
1238                t3_intr_clear(adap);
1239
1240                err = t3_init_hw(adap, 0);
1241                if (err)
1242                        goto out;
1243
1244                t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1245                t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1246
1247                err = setup_sge_qsets(adap);
1248                if (err)
1249                        goto out;
1250
1251                for_each_port(adap, i)
1252                        cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1253
1254                setup_rss(adap);
1255                if (!(adap->flags & NAPI_INIT))
1256                        init_napi(adap);
1257
1258                t3_start_sge_timers(adap);
1259                adap->flags |= FULL_INIT_DONE;
1260        }
1261
1262        t3_intr_clear(adap);
1263
1264        if (adap->flags & USING_MSIX) {
1265                name_msix_vecs(adap);
1266                err = request_irq(adap->msix_info[0].vec,
1267                                  t3_async_intr_handler, 0,
1268                                  adap->msix_info[0].desc, adap);
1269                if (err)
1270                        goto irq_err;
1271
1272                err = request_msix_data_irqs(adap);
1273                if (err) {
1274                        free_irq(adap->msix_info[0].vec, adap);
1275                        goto irq_err;
1276                }
1277        } else if ((err = request_irq(adap->pdev->irq,
1278                                      t3_intr_handler(adap,
1279                                                      adap->sge.qs[0].rspq.
1280                                                      polling),
1281                                      (adap->flags & USING_MSI) ?
1282                                       0 : IRQF_SHARED,
1283                                      adap->name, adap)))
1284                goto irq_err;
1285
1286        enable_all_napi(adap);
1287        t3_sge_start(adap);
1288        t3_intr_enable(adap);
1289
1290        if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1291            is_offload(adap) && init_tp_parity(adap) == 0)
1292                adap->flags |= TP_PARITY_INIT;
1293
1294        if (adap->flags & TP_PARITY_INIT) {
1295                t3_write_reg(adap, A_TP_INT_CAUSE,
1296                             F_CMCACHEPERR | F_ARPLUTPERR);
1297                t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1298        }
1299
1300        if (!(adap->flags & QUEUES_BOUND)) {
1301                int ret = bind_qsets(adap);
1302
1303                if (ret < 0) {
1304                        CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1305                        t3_intr_disable(adap);
1306                        free_irq_resources(adap);
1307                        err = ret;
1308                        goto out;
1309                }
1310                adap->flags |= QUEUES_BOUND;
1311        }
1312
1313out:
1314        return err;
1315irq_err:
1316        CH_ERR(adap, "request_irq failed, err %d\n", err);
1317        goto out;
1318}
1319
1320/*
1321 * Release resources when all the ports and offloading have been stopped.
1322 */
1323static void cxgb_down(struct adapter *adapter, int on_wq)
1324{
1325        t3_sge_stop(adapter);
1326        spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1327        t3_intr_disable(adapter);
1328        spin_unlock_irq(&adapter->work_lock);
1329
1330        free_irq_resources(adapter);
1331        quiesce_rx(adapter);
1332        t3_sge_stop(adapter);
1333        if (!on_wq)
1334                flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1335}
1336
1337static void schedule_chk_task(struct adapter *adap)
1338{
1339        unsigned int timeo;
1340
1341        timeo = adap->params.linkpoll_period ?
1342            (HZ * adap->params.linkpoll_period) / 10 :
1343            adap->params.stats_update_period * HZ;
1344        if (timeo)
1345                queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1346}
1347
1348static int offload_open(struct net_device *dev)
1349{
1350        struct port_info *pi = netdev_priv(dev);
1351        struct adapter *adapter = pi->adapter;
1352        struct t3cdev *tdev = dev2t3cdev(dev);
1353        int adap_up = adapter->open_device_map & PORT_MASK;
1354        int err;
1355
1356        if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1357                return 0;
1358
1359        if (!adap_up && (err = cxgb_up(adapter)) < 0)
1360                goto out;
1361
1362        t3_tp_set_offload_mode(adapter, 1);
1363        tdev->lldev = adapter->port[0];
1364        err = cxgb3_offload_activate(adapter);
1365        if (err)
1366                goto out;
1367
1368        init_port_mtus(adapter);
1369        t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1370                     adapter->params.b_wnd,
1371                     adapter->params.rev == 0 ?
1372                     adapter->port[0]->mtu : 0xffff);
1373        init_smt(adapter);
1374
1375        if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1376                dev_dbg(&dev->dev, "cannot create sysfs group\n");
1377
1378        /* Call back all registered clients */
1379        cxgb3_add_clients(tdev);
1380
1381out:
1382        /* restore them in case the offload module has changed them */
1383        if (err) {
1384                t3_tp_set_offload_mode(adapter, 0);
1385                clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1386                cxgb3_set_dummy_ops(tdev);
1387        }
1388        return err;
1389}
1390
1391static int offload_close(struct t3cdev *tdev)
1392{
1393        struct adapter *adapter = tdev2adap(tdev);
1394        struct t3c_data *td = T3C_DATA(tdev);
1395
1396        if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1397                return 0;
1398
1399        /* Call back all registered clients */
1400        cxgb3_remove_clients(tdev);
1401
1402        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1403
1404        /* Flush work scheduled while releasing TIDs */
1405        flush_work(&td->tid_release_task);
1406
1407        tdev->lldev = NULL;
1408        cxgb3_set_dummy_ops(tdev);
1409        t3_tp_set_offload_mode(adapter, 0);
1410        clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1411
1412        if (!adapter->open_device_map)
1413                cxgb_down(adapter, 0);
1414
1415        cxgb3_offload_deactivate(adapter);
1416        return 0;
1417}
1418
1419static int cxgb_open(struct net_device *dev)
1420{
1421        struct port_info *pi = netdev_priv(dev);
1422        struct adapter *adapter = pi->adapter;
1423        int other_ports = adapter->open_device_map & PORT_MASK;
1424        int err;
1425
1426        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1427                return err;
1428
1429        set_bit(pi->port_id, &adapter->open_device_map);
1430        if (is_offload(adapter) && !ofld_disable) {
1431                err = offload_open(dev);
1432                if (err)
1433                        pr_warn("Could not initialize offload capabilities\n");
1434        }
1435
1436        netif_set_real_num_tx_queues(dev, pi->nqsets);
1437        err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1438        if (err)
1439                return err;
1440        link_start(dev);
1441        t3_port_intr_enable(adapter, pi->port_id);
1442        netif_tx_start_all_queues(dev);
1443        if (!other_ports)
1444                schedule_chk_task(adapter);
1445
1446        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1447        return 0;
1448}
1449
1450static int __cxgb_close(struct net_device *dev, int on_wq)
1451{
1452        struct port_info *pi = netdev_priv(dev);
1453        struct adapter *adapter = pi->adapter;
1454
1455        
1456        if (!adapter->open_device_map)
1457                return 0;
1458
1459        /* Stop link fault interrupts */
1460        t3_xgm_intr_disable(adapter, pi->port_id);
1461        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1462
1463        t3_port_intr_disable(adapter, pi->port_id);
1464        netif_tx_stop_all_queues(dev);
1465        pi->phy.ops->power_down(&pi->phy, 1);
1466        netif_carrier_off(dev);
1467        t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1468
1469        spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1470        clear_bit(pi->port_id, &adapter->open_device_map);
1471        spin_unlock_irq(&adapter->work_lock);
1472
1473        if (!(adapter->open_device_map & PORT_MASK))
1474                cancel_delayed_work_sync(&adapter->adap_check_task);
1475
1476        if (!adapter->open_device_map)
1477                cxgb_down(adapter, on_wq);
1478
1479        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1480        return 0;
1481}
1482
1483static int cxgb_close(struct net_device *dev)
1484{
1485        return __cxgb_close(dev, 0);
1486}
1487
1488static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1489{
1490        struct port_info *pi = netdev_priv(dev);
1491        struct adapter *adapter = pi->adapter;
1492        struct net_device_stats *ns = &pi->netstats;
1493        const struct mac_stats *pstats;
1494
1495        spin_lock(&adapter->stats_lock);
1496        pstats = t3_mac_update_stats(&pi->mac);
1497        spin_unlock(&adapter->stats_lock);
1498
1499        ns->tx_bytes = pstats->tx_octets;
1500        ns->tx_packets = pstats->tx_frames;
1501        ns->rx_bytes = pstats->rx_octets;
1502        ns->rx_packets = pstats->rx_frames;
1503        ns->multicast = pstats->rx_mcast_frames;
1504
1505        ns->tx_errors = pstats->tx_underrun;
1506        ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1507            pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1508            pstats->rx_fifo_ovfl;
1509
1510        /* detailed rx_errors */
1511        ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1512        ns->rx_over_errors = 0;
1513        ns->rx_crc_errors = pstats->rx_fcs_errs;
1514        ns->rx_frame_errors = pstats->rx_symbol_errs;
1515        ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1516        ns->rx_missed_errors = pstats->rx_cong_drops;
1517
1518        /* detailed tx_errors */
1519        ns->tx_aborted_errors = 0;
1520        ns->tx_carrier_errors = 0;
1521        ns->tx_fifo_errors = pstats->tx_underrun;
1522        ns->tx_heartbeat_errors = 0;
1523        ns->tx_window_errors = 0;
1524        return ns;
1525}
1526
1527static u32 get_msglevel(struct net_device *dev)
1528{
1529        struct port_info *pi = netdev_priv(dev);
1530        struct adapter *adapter = pi->adapter;
1531
1532        return adapter->msg_enable;
1533}
1534
1535static void set_msglevel(struct net_device *dev, u32 val)
1536{
1537        struct port_info *pi = netdev_priv(dev);
1538        struct adapter *adapter = pi->adapter;
1539
1540        adapter->msg_enable = val;
1541}
1542
1543static char stats_strings[][ETH_GSTRING_LEN] = {
1544        "TxOctetsOK         ",
1545        "TxFramesOK         ",
1546        "TxMulticastFramesOK",
1547        "TxBroadcastFramesOK",
1548        "TxPauseFrames      ",
1549        "TxUnderrun         ",
1550        "TxExtUnderrun      ",
1551
1552        "TxFrames64         ",
1553        "TxFrames65To127    ",
1554        "TxFrames128To255   ",
1555        "TxFrames256To511   ",
1556        "TxFrames512To1023  ",
1557        "TxFrames1024To1518 ",
1558        "TxFrames1519ToMax  ",
1559
1560        "RxOctetsOK         ",
1561        "RxFramesOK         ",
1562        "RxMulticastFramesOK",
1563        "RxBroadcastFramesOK",
1564        "RxPauseFrames      ",
1565        "RxFCSErrors        ",
1566        "RxSymbolErrors     ",
1567        "RxShortErrors      ",
1568        "RxJabberErrors     ",
1569        "RxLengthErrors     ",
1570        "RxFIFOoverflow     ",
1571
1572        "RxFrames64         ",
1573        "RxFrames65To127    ",
1574        "RxFrames128To255   ",
1575        "RxFrames256To511   ",
1576        "RxFrames512To1023  ",
1577        "RxFrames1024To1518 ",
1578        "RxFrames1519ToMax  ",
1579
1580        "PhyFIFOErrors      ",
1581        "TSO                ",
1582        "VLANextractions    ",
1583        "VLANinsertions     ",
1584        "TxCsumOffload      ",
1585        "RxCsumGood         ",
1586        "LroAggregated      ",
1587        "LroFlushed         ",
1588        "LroNoDesc          ",
1589        "RxDrops            ",
1590
1591        "CheckTXEnToggled   ",
1592        "CheckResets        ",
1593
1594        "LinkFaults         ",
1595};
1596
1597static int get_sset_count(struct net_device *dev, int sset)
1598{
1599        switch (sset) {
1600        case ETH_SS_STATS:
1601                return ARRAY_SIZE(stats_strings);
1602        default:
1603                return -EOPNOTSUPP;
1604        }
1605}
1606
1607#define T3_REGMAP_SIZE (3 * 1024)
1608
1609static int get_regs_len(struct net_device *dev)
1610{
1611        return T3_REGMAP_SIZE;
1612}
1613
1614static int get_eeprom_len(struct net_device *dev)
1615{
1616        return EEPROMSIZE;
1617}
1618
1619static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1620{
1621        struct port_info *pi = netdev_priv(dev);
1622        struct adapter *adapter = pi->adapter;
1623        u32 fw_vers = 0;
1624        u32 tp_vers = 0;
1625
1626        spin_lock(&adapter->stats_lock);
1627        t3_get_fw_version(adapter, &fw_vers);
1628        t3_get_tp_version(adapter, &tp_vers);
1629        spin_unlock(&adapter->stats_lock);
1630
1631        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1632        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1633        strlcpy(info->bus_info, pci_name(adapter->pdev),
1634                sizeof(info->bus_info));
1635        if (fw_vers)
1636                snprintf(info->fw_version, sizeof(info->fw_version),
1637                         "%s %u.%u.%u TP %u.%u.%u",
1638                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1639                         G_FW_VERSION_MAJOR(fw_vers),
1640                         G_FW_VERSION_MINOR(fw_vers),
1641                         G_FW_VERSION_MICRO(fw_vers),
1642                         G_TP_VERSION_MAJOR(tp_vers),
1643                         G_TP_VERSION_MINOR(tp_vers),
1644                         G_TP_VERSION_MICRO(tp_vers));
1645}
1646
1647static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1648{
1649        if (stringset == ETH_SS_STATS)
1650                memcpy(data, stats_strings, sizeof(stats_strings));
1651}
1652
1653static unsigned long collect_sge_port_stats(struct adapter *adapter,
1654                                            struct port_info *p, int idx)
1655{
1656        int i;
1657        unsigned long tot = 0;
1658
1659        for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1660                tot += adapter->sge.qs[i].port_stats[idx];
1661        return tot;
1662}
1663
1664static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1665                      u64 *data)
1666{
1667        struct port_info *pi = netdev_priv(dev);
1668        struct adapter *adapter = pi->adapter;
1669        const struct mac_stats *s;
1670
1671        spin_lock(&adapter->stats_lock);
1672        s = t3_mac_update_stats(&pi->mac);
1673        spin_unlock(&adapter->stats_lock);
1674
1675        *data++ = s->tx_octets;
1676        *data++ = s->tx_frames;
1677        *data++ = s->tx_mcast_frames;
1678        *data++ = s->tx_bcast_frames;
1679        *data++ = s->tx_pause;
1680        *data++ = s->tx_underrun;
1681        *data++ = s->tx_fifo_urun;
1682
1683        *data++ = s->tx_frames_64;
1684        *data++ = s->tx_frames_65_127;
1685        *data++ = s->tx_frames_128_255;
1686        *data++ = s->tx_frames_256_511;
1687        *data++ = s->tx_frames_512_1023;
1688        *data++ = s->tx_frames_1024_1518;
1689        *data++ = s->tx_frames_1519_max;
1690
1691        *data++ = s->rx_octets;
1692        *data++ = s->rx_frames;
1693        *data++ = s->rx_mcast_frames;
1694        *data++ = s->rx_bcast_frames;
1695        *data++ = s->rx_pause;
1696        *data++ = s->rx_fcs_errs;
1697        *data++ = s->rx_symbol_errs;
1698        *data++ = s->rx_short;
1699        *data++ = s->rx_jabber;
1700        *data++ = s->rx_too_long;
1701        *data++ = s->rx_fifo_ovfl;
1702
1703        *data++ = s->rx_frames_64;
1704        *data++ = s->rx_frames_65_127;
1705        *data++ = s->rx_frames_128_255;
1706        *data++ = s->rx_frames_256_511;
1707        *data++ = s->rx_frames_512_1023;
1708        *data++ = s->rx_frames_1024_1518;
1709        *data++ = s->rx_frames_1519_max;
1710
1711        *data++ = pi->phy.fifo_errors;
1712
1713        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1714        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1715        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1716        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1717        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1718        *data++ = 0;
1719        *data++ = 0;
1720        *data++ = 0;
1721        *data++ = s->rx_cong_drops;
1722
1723        *data++ = s->num_toggled;
1724        *data++ = s->num_resets;
1725
1726        *data++ = s->link_faults;
1727}
1728
1729static inline void reg_block_dump(struct adapter *ap, void *buf,
1730                                  unsigned int start, unsigned int end)
1731{
1732        u32 *p = buf + start;
1733
1734        for (; start <= end; start += sizeof(u32))
1735                *p++ = t3_read_reg(ap, start);
1736}
1737
1738static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1739                     void *buf)
1740{
1741        struct port_info *pi = netdev_priv(dev);
1742        struct adapter *ap = pi->adapter;
1743
1744        /*
1745         * Version scheme:
1746         * bits 0..9: chip version
1747         * bits 10..15: chip revision
1748         * bit 31: set for PCIe cards
1749         */
1750        regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1751
1752        /*
1753         * We skip the MAC statistics registers because they are clear-on-read.
1754         * Also reading multi-register stats would need to synchronize with the
1755         * periodic mac stats accumulation.  Hard to justify the complexity.
1756         */
1757        memset(buf, 0, T3_REGMAP_SIZE);
1758        reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1759        reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1760        reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1761        reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1762        reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1763        reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1764                       XGM_REG(A_XGM_SERDES_STAT3, 1));
1765        reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1766                       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1767}
1768
1769static int restart_autoneg(struct net_device *dev)
1770{
1771        struct port_info *p = netdev_priv(dev);
1772
1773        if (!netif_running(dev))
1774                return -EAGAIN;
1775        if (p->link_config.autoneg != AUTONEG_ENABLE)
1776                return -EINVAL;
1777        p->phy.ops->autoneg_restart(&p->phy);
1778        return 0;
1779}
1780
1781static int set_phys_id(struct net_device *dev,
1782                       enum ethtool_phys_id_state state)
1783{
1784        struct port_info *pi = netdev_priv(dev);
1785        struct adapter *adapter = pi->adapter;
1786
1787        switch (state) {
1788        case ETHTOOL_ID_ACTIVE:
1789                return 1;       /* cycle on/off once per second */
1790
1791        case ETHTOOL_ID_OFF:
1792                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1793                break;
1794
1795        case ETHTOOL_ID_ON:
1796        case ETHTOOL_ID_INACTIVE:
1797                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1798                         F_GPIO0_OUT_VAL);
1799        }
1800
1801        return 0;
1802}
1803
1804static int get_link_ksettings(struct net_device *dev,
1805                              struct ethtool_link_ksettings *cmd)
1806{
1807        struct port_info *p = netdev_priv(dev);
1808        u32 supported;
1809
1810        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1811                                                p->link_config.supported);
1812        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1813                                                p->link_config.advertising);
1814
1815        if (netif_carrier_ok(dev)) {
1816                cmd->base.speed = p->link_config.speed;
1817                cmd->base.duplex = p->link_config.duplex;
1818        } else {
1819                cmd->base.speed = SPEED_UNKNOWN;
1820                cmd->base.duplex = DUPLEX_UNKNOWN;
1821        }
1822
1823        ethtool_convert_link_mode_to_legacy_u32(&supported,
1824                                                cmd->link_modes.supported);
1825
1826        cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1827        cmd->base.phy_address = p->phy.mdio.prtad;
1828        cmd->base.autoneg = p->link_config.autoneg;
1829        return 0;
1830}
1831
1832static int speed_duplex_to_caps(int speed, int duplex)
1833{
1834        int cap = 0;
1835
1836        switch (speed) {
1837        case SPEED_10:
1838                if (duplex == DUPLEX_FULL)
1839                        cap = SUPPORTED_10baseT_Full;
1840                else
1841                        cap = SUPPORTED_10baseT_Half;
1842                break;
1843        case SPEED_100:
1844                if (duplex == DUPLEX_FULL)
1845                        cap = SUPPORTED_100baseT_Full;
1846                else
1847                        cap = SUPPORTED_100baseT_Half;
1848                break;
1849        case SPEED_1000:
1850                if (duplex == DUPLEX_FULL)
1851                        cap = SUPPORTED_1000baseT_Full;
1852                else
1853                        cap = SUPPORTED_1000baseT_Half;
1854                break;
1855        case SPEED_10000:
1856                if (duplex == DUPLEX_FULL)
1857                        cap = SUPPORTED_10000baseT_Full;
1858        }
1859        return cap;
1860}
1861
1862#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1863                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1864                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1865                      ADVERTISED_10000baseT_Full)
1866
1867static int set_link_ksettings(struct net_device *dev,
1868                              const struct ethtool_link_ksettings *cmd)
1869{
1870        struct port_info *p = netdev_priv(dev);
1871        struct link_config *lc = &p->link_config;
1872        u32 advertising;
1873
1874        ethtool_convert_link_mode_to_legacy_u32(&advertising,
1875                                                cmd->link_modes.advertising);
1876
1877        if (!(lc->supported & SUPPORTED_Autoneg)) {
1878                /*
1879                 * PHY offers a single speed/duplex.  See if that's what's
1880                 * being requested.
1881                 */
1882                if (cmd->base.autoneg == AUTONEG_DISABLE) {
1883                        u32 speed = cmd->base.speed;
1884                        int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1885                        if (lc->supported & cap)
1886                                return 0;
1887                }
1888                return -EINVAL;
1889        }
1890
1891        if (cmd->base.autoneg == AUTONEG_DISABLE) {
1892                u32 speed = cmd->base.speed;
1893                int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1894
1895                if (!(lc->supported & cap) || (speed == SPEED_1000))
1896                        return -EINVAL;
1897                lc->requested_speed = speed;
1898                lc->requested_duplex = cmd->base.duplex;
1899                lc->advertising = 0;
1900        } else {
1901                advertising &= ADVERTISED_MASK;
1902                advertising &= lc->supported;
1903                if (!advertising)
1904                        return -EINVAL;
1905                lc->requested_speed = SPEED_INVALID;
1906                lc->requested_duplex = DUPLEX_INVALID;
1907                lc->advertising = advertising | ADVERTISED_Autoneg;
1908        }
1909        lc->autoneg = cmd->base.autoneg;
1910        if (netif_running(dev))
1911                t3_link_start(&p->phy, &p->mac, lc);
1912        return 0;
1913}
1914
1915static void get_pauseparam(struct net_device *dev,
1916                           struct ethtool_pauseparam *epause)
1917{
1918        struct port_info *p = netdev_priv(dev);
1919
1920        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1921        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1922        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1923}
1924
1925static int set_pauseparam(struct net_device *dev,
1926                          struct ethtool_pauseparam *epause)
1927{
1928        struct port_info *p = netdev_priv(dev);
1929        struct link_config *lc = &p->link_config;
1930
1931        if (epause->autoneg == AUTONEG_DISABLE)
1932                lc->requested_fc = 0;
1933        else if (lc->supported & SUPPORTED_Autoneg)
1934                lc->requested_fc = PAUSE_AUTONEG;
1935        else
1936                return -EINVAL;
1937
1938        if (epause->rx_pause)
1939                lc->requested_fc |= PAUSE_RX;
1940        if (epause->tx_pause)
1941                lc->requested_fc |= PAUSE_TX;
1942        if (lc->autoneg == AUTONEG_ENABLE) {
1943                if (netif_running(dev))
1944                        t3_link_start(&p->phy, &p->mac, lc);
1945        } else {
1946                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1947                if (netif_running(dev))
1948                        t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1949        }
1950        return 0;
1951}
1952
1953static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1954{
1955        struct port_info *pi = netdev_priv(dev);
1956        struct adapter *adapter = pi->adapter;
1957        const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1958
1959        e->rx_max_pending = MAX_RX_BUFFERS;
1960        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1961        e->tx_max_pending = MAX_TXQ_ENTRIES;
1962
1963        e->rx_pending = q->fl_size;
1964        e->rx_mini_pending = q->rspq_size;
1965        e->rx_jumbo_pending = q->jumbo_size;
1966        e->tx_pending = q->txq_size[0];
1967}
1968
1969static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1970{
1971        struct port_info *pi = netdev_priv(dev);
1972        struct adapter *adapter = pi->adapter;
1973        struct qset_params *q;
1974        int i;
1975
1976        if (e->rx_pending > MAX_RX_BUFFERS ||
1977            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1978            e->tx_pending > MAX_TXQ_ENTRIES ||
1979            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1980            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1981            e->rx_pending < MIN_FL_ENTRIES ||
1982            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1983            e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1984                return -EINVAL;
1985
1986        if (adapter->flags & FULL_INIT_DONE)
1987                return -EBUSY;
1988
1989        q = &adapter->params.sge.qset[pi->first_qset];
1990        for (i = 0; i < pi->nqsets; ++i, ++q) {
1991                q->rspq_size = e->rx_mini_pending;
1992                q->fl_size = e->rx_pending;
1993                q->jumbo_size = e->rx_jumbo_pending;
1994                q->txq_size[0] = e->tx_pending;
1995                q->txq_size[1] = e->tx_pending;
1996                q->txq_size[2] = e->tx_pending;
1997        }
1998        return 0;
1999}
2000
2001static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2002{
2003        struct port_info *pi = netdev_priv(dev);
2004        struct adapter *adapter = pi->adapter;
2005        struct qset_params *qsp;
2006        struct sge_qset *qs;
2007        int i;
2008
2009        if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2010                return -EINVAL;
2011
2012        for (i = 0; i < pi->nqsets; i++) {
2013                qsp = &adapter->params.sge.qset[i];
2014                qs = &adapter->sge.qs[i];
2015                qsp->coalesce_usecs = c->rx_coalesce_usecs;
2016                t3_update_qset_coalesce(qs, qsp);
2017        }
2018
2019        return 0;
2020}
2021
2022static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2023{
2024        struct port_info *pi = netdev_priv(dev);
2025        struct adapter *adapter = pi->adapter;
2026        struct qset_params *q = adapter->params.sge.qset;
2027
2028        c->rx_coalesce_usecs = q->coalesce_usecs;
2029        return 0;
2030}
2031
2032static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2033                      u8 * data)
2034{
2035        struct port_info *pi = netdev_priv(dev);
2036        struct adapter *adapter = pi->adapter;
2037        int i, err = 0;
2038
2039        u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2040        if (!buf)
2041                return -ENOMEM;
2042
2043        e->magic = EEPROM_MAGIC;
2044        for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2045                err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2046
2047        if (!err)
2048                memcpy(data, buf + e->offset, e->len);
2049        kfree(buf);
2050        return err;
2051}
2052
2053static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2054                      u8 * data)
2055{
2056        struct port_info *pi = netdev_priv(dev);
2057        struct adapter *adapter = pi->adapter;
2058        u32 aligned_offset, aligned_len;
2059        __le32 *p;
2060        u8 *buf;
2061        int err;
2062
2063        if (eeprom->magic != EEPROM_MAGIC)
2064                return -EINVAL;
2065
2066        aligned_offset = eeprom->offset & ~3;
2067        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2068
2069        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2070                buf = kmalloc(aligned_len, GFP_KERNEL);
2071                if (!buf)
2072                        return -ENOMEM;
2073                err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2074                if (!err && aligned_len > 4)
2075                        err = t3_seeprom_read(adapter,
2076                                              aligned_offset + aligned_len - 4,
2077                                              (__le32 *) & buf[aligned_len - 4]);
2078                if (err)
2079                        goto out;
2080                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2081        } else
2082                buf = data;
2083
2084        err = t3_seeprom_wp(adapter, 0);
2085        if (err)
2086                goto out;
2087
2088        for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2089                err = t3_seeprom_write(adapter, aligned_offset, *p);
2090                aligned_offset += 4;
2091        }
2092
2093        if (!err)
2094                err = t3_seeprom_wp(adapter, 1);
2095out:
2096        if (buf != data)
2097                kfree(buf);
2098        return err;
2099}
2100
2101static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2102{
2103        wol->supported = 0;
2104        wol->wolopts = 0;
2105        memset(&wol->sopass, 0, sizeof(wol->sopass));
2106}
2107
2108static const struct ethtool_ops cxgb_ethtool_ops = {
2109        .get_drvinfo = get_drvinfo,
2110        .get_msglevel = get_msglevel,
2111        .set_msglevel = set_msglevel,
2112        .get_ringparam = get_sge_param,
2113        .set_ringparam = set_sge_param,
2114        .get_coalesce = get_coalesce,
2115        .set_coalesce = set_coalesce,
2116        .get_eeprom_len = get_eeprom_len,
2117        .get_eeprom = get_eeprom,
2118        .set_eeprom = set_eeprom,
2119        .get_pauseparam = get_pauseparam,
2120        .set_pauseparam = set_pauseparam,
2121        .get_link = ethtool_op_get_link,
2122        .get_strings = get_strings,
2123        .set_phys_id = set_phys_id,
2124        .nway_reset = restart_autoneg,
2125        .get_sset_count = get_sset_count,
2126        .get_ethtool_stats = get_stats,
2127        .get_regs_len = get_regs_len,
2128        .get_regs = get_regs,
2129        .get_wol = get_wol,
2130        .get_link_ksettings = get_link_ksettings,
2131        .set_link_ksettings = set_link_ksettings,
2132};
2133
2134static int in_range(int val, int lo, int hi)
2135{
2136        return val < 0 || (val <= hi && val >= lo);
2137}
2138
2139static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2140{
2141        struct port_info *pi = netdev_priv(dev);
2142        struct adapter *adapter = pi->adapter;
2143        u32 cmd;
2144        int ret;
2145
2146        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2147                return -EFAULT;
2148
2149        switch (cmd) {
2150        case CHELSIO_SET_QSET_PARAMS:{
2151                int i;
2152                struct qset_params *q;
2153                struct ch_qset_params t;
2154                int q1 = pi->first_qset;
2155                int nqsets = pi->nqsets;
2156
2157                if (!capable(CAP_NET_ADMIN))
2158                        return -EPERM;
2159                if (copy_from_user(&t, useraddr, sizeof(t)))
2160                        return -EFAULT;
2161                if (t.qset_idx >= SGE_QSETS)
2162                        return -EINVAL;
2163                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2164                    !in_range(t.cong_thres, 0, 255) ||
2165                    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2166                              MAX_TXQ_ENTRIES) ||
2167                    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2168                              MAX_TXQ_ENTRIES) ||
2169                    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2170                              MAX_CTRL_TXQ_ENTRIES) ||
2171                    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2172                              MAX_RX_BUFFERS) ||
2173                    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2174                              MAX_RX_JUMBO_BUFFERS) ||
2175                    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2176                              MAX_RSPQ_ENTRIES))
2177                        return -EINVAL;
2178
2179                if ((adapter->flags & FULL_INIT_DONE) &&
2180                        (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2181                        t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2182                        t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2183                        t.polling >= 0 || t.cong_thres >= 0))
2184                        return -EBUSY;
2185
2186                /* Allow setting of any available qset when offload enabled */
2187                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2188                        q1 = 0;
2189                        for_each_port(adapter, i) {
2190                                pi = adap2pinfo(adapter, i);
2191                                nqsets += pi->first_qset + pi->nqsets;
2192                        }
2193                }
2194
2195                if (t.qset_idx < q1)
2196                        return -EINVAL;
2197                if (t.qset_idx > q1 + nqsets - 1)
2198                        return -EINVAL;
2199
2200                q = &adapter->params.sge.qset[t.qset_idx];
2201
2202                if (t.rspq_size >= 0)
2203                        q->rspq_size = t.rspq_size;
2204                if (t.fl_size[0] >= 0)
2205                        q->fl_size = t.fl_size[0];
2206                if (t.fl_size[1] >= 0)
2207                        q->jumbo_size = t.fl_size[1];
2208                if (t.txq_size[0] >= 0)
2209                        q->txq_size[0] = t.txq_size[0];
2210                if (t.txq_size[1] >= 0)
2211                        q->txq_size[1] = t.txq_size[1];
2212                if (t.txq_size[2] >= 0)
2213                        q->txq_size[2] = t.txq_size[2];
2214                if (t.cong_thres >= 0)
2215                        q->cong_thres = t.cong_thres;
2216                if (t.intr_lat >= 0) {
2217                        struct sge_qset *qs =
2218                                &adapter->sge.qs[t.qset_idx];
2219
2220                        q->coalesce_usecs = t.intr_lat;
2221                        t3_update_qset_coalesce(qs, q);
2222                }
2223                if (t.polling >= 0) {
2224                        if (adapter->flags & USING_MSIX)
2225                                q->polling = t.polling;
2226                        else {
2227                                /* No polling with INTx for T3A */
2228                                if (adapter->params.rev == 0 &&
2229                                        !(adapter->flags & USING_MSI))
2230                                        t.polling = 0;
2231
2232                                for (i = 0; i < SGE_QSETS; i++) {
2233                                        q = &adapter->params.sge.
2234                                                qset[i];
2235                                        q->polling = t.polling;
2236                                }
2237                        }
2238                }
2239
2240                if (t.lro >= 0) {
2241                        if (t.lro)
2242                                dev->wanted_features |= NETIF_F_GRO;
2243                        else
2244                                dev->wanted_features &= ~NETIF_F_GRO;
2245                        netdev_update_features(dev);
2246                }
2247
2248                break;
2249        }
2250        case CHELSIO_GET_QSET_PARAMS:{
2251                struct qset_params *q;
2252                struct ch_qset_params t;
2253                int q1 = pi->first_qset;
2254                int nqsets = pi->nqsets;
2255                int i;
2256
2257                if (copy_from_user(&t, useraddr, sizeof(t)))
2258                        return -EFAULT;
2259
2260                /* Display qsets for all ports when offload enabled */
2261                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2262                        q1 = 0;
2263                        for_each_port(adapter, i) {
2264                                pi = adap2pinfo(adapter, i);
2265                                nqsets = pi->first_qset + pi->nqsets;
2266                        }
2267                }
2268
2269                if (t.qset_idx >= nqsets)
2270                        return -EINVAL;
2271
2272                q = &adapter->params.sge.qset[q1 + t.qset_idx];
2273                t.rspq_size = q->rspq_size;
2274                t.txq_size[0] = q->txq_size[0];
2275                t.txq_size[1] = q->txq_size[1];
2276                t.txq_size[2] = q->txq_size[2];
2277                t.fl_size[0] = q->fl_size;
2278                t.fl_size[1] = q->jumbo_size;
2279                t.polling = q->polling;
2280                t.lro = !!(dev->features & NETIF_F_GRO);
2281                t.intr_lat = q->coalesce_usecs;
2282                t.cong_thres = q->cong_thres;
2283                t.qnum = q1;
2284
2285                if (adapter->flags & USING_MSIX)
2286                        t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2287                else
2288                        t.vector = adapter->pdev->irq;
2289
2290                if (copy_to_user(useraddr, &t, sizeof(t)))
2291                        return -EFAULT;
2292                break;
2293        }
2294        case CHELSIO_SET_QSET_NUM:{
2295                struct ch_reg edata;
2296                unsigned int i, first_qset = 0, other_qsets = 0;
2297
2298                if (!capable(CAP_NET_ADMIN))
2299                        return -EPERM;
2300                if (adapter->flags & FULL_INIT_DONE)
2301                        return -EBUSY;
2302                if (copy_from_user(&edata, useraddr, sizeof(edata)))
2303                        return -EFAULT;
2304                if (edata.val < 1 ||
2305                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2306                        return -EINVAL;
2307
2308                for_each_port(adapter, i)
2309                        if (adapter->port[i] && adapter->port[i] != dev)
2310                                other_qsets += adap2pinfo(adapter, i)->nqsets;
2311
2312                if (edata.val + other_qsets > SGE_QSETS)
2313                        return -EINVAL;
2314
2315                pi->nqsets = edata.val;
2316
2317                for_each_port(adapter, i)
2318                        if (adapter->port[i]) {
2319                                pi = adap2pinfo(adapter, i);
2320                                pi->first_qset = first_qset;
2321                                first_qset += pi->nqsets;
2322                        }
2323                break;
2324        }
2325        case CHELSIO_GET_QSET_NUM:{
2326                struct ch_reg edata;
2327
2328                memset(&edata, 0, sizeof(struct ch_reg));
2329
2330                edata.cmd = CHELSIO_GET_QSET_NUM;
2331                edata.val = pi->nqsets;
2332                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2333                        return -EFAULT;
2334                break;
2335        }
2336        case CHELSIO_LOAD_FW:{
2337                u8 *fw_data;
2338                struct ch_mem_range t;
2339
2340                if (!capable(CAP_SYS_RAWIO))
2341                        return -EPERM;
2342                if (copy_from_user(&t, useraddr, sizeof(t)))
2343                        return -EFAULT;
2344                /* Check t.len sanity ? */
2345                fw_data = memdup_user(useraddr + sizeof(t), t.len);
2346                if (IS_ERR(fw_data))
2347                        return PTR_ERR(fw_data);
2348
2349                ret = t3_load_fw(adapter, fw_data, t.len);
2350                kfree(fw_data);
2351                if (ret)
2352                        return ret;
2353                break;
2354        }
2355        case CHELSIO_SETMTUTAB:{
2356                struct ch_mtus m;
2357                int i;
2358
2359                if (!is_offload(adapter))
2360                        return -EOPNOTSUPP;
2361                if (!capable(CAP_NET_ADMIN))
2362                        return -EPERM;
2363                if (offload_running(adapter))
2364                        return -EBUSY;
2365                if (copy_from_user(&m, useraddr, sizeof(m)))
2366                        return -EFAULT;
2367                if (m.nmtus != NMTUS)
2368                        return -EINVAL;
2369                if (m.mtus[0] < 81)     /* accommodate SACK */
2370                        return -EINVAL;
2371
2372                /* MTUs must be in ascending order */
2373                for (i = 1; i < NMTUS; ++i)
2374                        if (m.mtus[i] < m.mtus[i - 1])
2375                                return -EINVAL;
2376
2377                memcpy(adapter->params.mtus, m.mtus,
2378                        sizeof(adapter->params.mtus));
2379                break;
2380        }
2381        case CHELSIO_GET_PM:{
2382                struct tp_params *p = &adapter->params.tp;
2383                struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2384
2385                if (!is_offload(adapter))
2386                        return -EOPNOTSUPP;
2387                m.tx_pg_sz = p->tx_pg_size;
2388                m.tx_num_pg = p->tx_num_pgs;
2389                m.rx_pg_sz = p->rx_pg_size;
2390                m.rx_num_pg = p->rx_num_pgs;
2391                m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2392                if (copy_to_user(useraddr, &m, sizeof(m)))
2393                        return -EFAULT;
2394                break;
2395        }
2396        case CHELSIO_SET_PM:{
2397                struct ch_pm m;
2398                struct tp_params *p = &adapter->params.tp;
2399
2400                if (!is_offload(adapter))
2401                        return -EOPNOTSUPP;
2402                if (!capable(CAP_NET_ADMIN))
2403                        return -EPERM;
2404                if (adapter->flags & FULL_INIT_DONE)
2405                        return -EBUSY;
2406                if (copy_from_user(&m, useraddr, sizeof(m)))
2407                        return -EFAULT;
2408                if (!is_power_of_2(m.rx_pg_sz) ||
2409                        !is_power_of_2(m.tx_pg_sz))
2410                        return -EINVAL; /* not power of 2 */
2411                if (!(m.rx_pg_sz & 0x14000))
2412                        return -EINVAL; /* not 16KB or 64KB */
2413                if (!(m.tx_pg_sz & 0x1554000))
2414                        return -EINVAL;
2415                if (m.tx_num_pg == -1)
2416                        m.tx_num_pg = p->tx_num_pgs;
2417                if (m.rx_num_pg == -1)
2418                        m.rx_num_pg = p->rx_num_pgs;
2419                if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2420                        return -EINVAL;
2421                if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2422                        m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2423                        return -EINVAL;
2424                p->rx_pg_size = m.rx_pg_sz;
2425                p->tx_pg_size = m.tx_pg_sz;
2426                p->rx_num_pgs = m.rx_num_pg;
2427                p->tx_num_pgs = m.tx_num_pg;
2428                break;
2429        }
2430        case CHELSIO_GET_MEM:{
2431                struct ch_mem_range t;
2432                struct mc7 *mem;
2433                u64 buf[32];
2434
2435                if (!is_offload(adapter))
2436                        return -EOPNOTSUPP;
2437                if (!(adapter->flags & FULL_INIT_DONE))
2438                        return -EIO;    /* need the memory controllers */
2439                if (copy_from_user(&t, useraddr, sizeof(t)))
2440                        return -EFAULT;
2441                if ((t.addr & 7) || (t.len & 7))
2442                        return -EINVAL;
2443                if (t.mem_id == MEM_CM)
2444                        mem = &adapter->cm;
2445                else if (t.mem_id == MEM_PMRX)
2446                        mem = &adapter->pmrx;
2447                else if (t.mem_id == MEM_PMTX)
2448                        mem = &adapter->pmtx;
2449                else
2450                        return -EINVAL;
2451
2452                /*
2453                 * Version scheme:
2454                 * bits 0..9: chip version
2455                 * bits 10..15: chip revision
2456                 */
2457                t.version = 3 | (adapter->params.rev << 10);
2458                if (copy_to_user(useraddr, &t, sizeof(t)))
2459                        return -EFAULT;
2460
2461                /*
2462                 * Read 256 bytes at a time as len can be large and we don't
2463                 * want to use huge intermediate buffers.
2464                 */
2465                useraddr += sizeof(t);  /* advance to start of buffer */
2466                while (t.len) {
2467                        unsigned int chunk =
2468                                min_t(unsigned int, t.len, sizeof(buf));
2469
2470                        ret =
2471                                t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2472                                                buf);
2473                        if (ret)
2474                                return ret;
2475                        if (copy_to_user(useraddr, buf, chunk))
2476                                return -EFAULT;
2477                        useraddr += chunk;
2478                        t.addr += chunk;
2479                        t.len -= chunk;
2480                }
2481                break;
2482        }
2483        case CHELSIO_SET_TRACE_FILTER:{
2484                struct ch_trace t;
2485                const struct trace_params *tp;
2486
2487                if (!capable(CAP_NET_ADMIN))
2488                        return -EPERM;
2489                if (!offload_running(adapter))
2490                        return -EAGAIN;
2491                if (copy_from_user(&t, useraddr, sizeof(t)))
2492                        return -EFAULT;
2493
2494                tp = (const struct trace_params *)&t.sip;
2495                if (t.config_tx)
2496                        t3_config_trace_filter(adapter, tp, 0,
2497                                                t.invert_match,
2498                                                t.trace_tx);
2499                if (t.config_rx)
2500                        t3_config_trace_filter(adapter, tp, 1,
2501                                                t.invert_match,
2502                                                t.trace_rx);
2503                break;
2504        }
2505        default:
2506                return -EOPNOTSUPP;
2507        }
2508        return 0;
2509}
2510
2511static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2512{
2513        struct mii_ioctl_data *data = if_mii(req);
2514        struct port_info *pi = netdev_priv(dev);
2515        struct adapter *adapter = pi->adapter;
2516
2517        switch (cmd) {
2518        case SIOCGMIIREG:
2519        case SIOCSMIIREG:
2520                /* Convert phy_id from older PRTAD/DEVAD format */
2521                if (is_10G(adapter) &&
2522                    !mdio_phy_id_is_c45(data->phy_id) &&
2523                    (data->phy_id & 0x1f00) &&
2524                    !(data->phy_id & 0xe0e0))
2525                        data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2526                                                       data->phy_id & 0x1f);
2527                /* FALLTHRU */
2528        case SIOCGMIIPHY:
2529                return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2530        case SIOCCHIOCTL:
2531                return cxgb_extension_ioctl(dev, req->ifr_data);
2532        default:
2533                return -EOPNOTSUPP;
2534        }
2535}
2536
2537static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2538{
2539        struct port_info *pi = netdev_priv(dev);
2540        struct adapter *adapter = pi->adapter;
2541        int ret;
2542
2543        if (new_mtu < 81)       /* accommodate SACK */
2544                return -EINVAL;
2545        if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2546                return ret;
2547        dev->mtu = new_mtu;
2548        init_port_mtus(adapter);
2549        if (adapter->params.rev == 0 && offload_running(adapter))
2550                t3_load_mtus(adapter, adapter->params.mtus,
2551                             adapter->params.a_wnd, adapter->params.b_wnd,
2552                             adapter->port[0]->mtu);
2553        return 0;
2554}
2555
2556static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2557{
2558        struct port_info *pi = netdev_priv(dev);
2559        struct adapter *adapter = pi->adapter;
2560        struct sockaddr *addr = p;
2561
2562        if (!is_valid_ether_addr(addr->sa_data))
2563                return -EADDRNOTAVAIL;
2564
2565        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2566        t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2567        if (offload_running(adapter))
2568                write_smt_entry(adapter, pi->port_id);
2569        return 0;
2570}
2571
2572static netdev_features_t cxgb_fix_features(struct net_device *dev,
2573        netdev_features_t features)
2574{
2575        /*
2576         * Since there is no support for separate rx/tx vlan accel
2577         * enable/disable make sure tx flag is always in same state as rx.
2578         */
2579        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2580                features |= NETIF_F_HW_VLAN_CTAG_TX;
2581        else
2582                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2583
2584        return features;
2585}
2586
2587static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2588{
2589        netdev_features_t changed = dev->features ^ features;
2590
2591        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2592                cxgb_vlan_mode(dev, features);
2593
2594        return 0;
2595}
2596
2597#ifdef CONFIG_NET_POLL_CONTROLLER
2598static void cxgb_netpoll(struct net_device *dev)
2599{
2600        struct port_info *pi = netdev_priv(dev);
2601        struct adapter *adapter = pi->adapter;
2602        int qidx;
2603
2604        for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2605                struct sge_qset *qs = &adapter->sge.qs[qidx];
2606                void *source;
2607
2608                if (adapter->flags & USING_MSIX)
2609                        source = qs;
2610                else
2611                        source = adapter;
2612
2613                t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2614        }
2615}
2616#endif
2617
2618/*
2619 * Periodic accumulation of MAC statistics.
2620 */
2621static void mac_stats_update(struct adapter *adapter)
2622{
2623        int i;
2624
2625        for_each_port(adapter, i) {
2626                struct net_device *dev = adapter->port[i];
2627                struct port_info *p = netdev_priv(dev);
2628
2629                if (netif_running(dev)) {
2630                        spin_lock(&adapter->stats_lock);
2631                        t3_mac_update_stats(&p->mac);
2632                        spin_unlock(&adapter->stats_lock);
2633                }
2634        }
2635}
2636
2637static void check_link_status(struct adapter *adapter)
2638{
2639        int i;
2640
2641        for_each_port(adapter, i) {
2642                struct net_device *dev = adapter->port[i];
2643                struct port_info *p = netdev_priv(dev);
2644                int link_fault;
2645
2646                spin_lock_irq(&adapter->work_lock);
2647                link_fault = p->link_fault;
2648                spin_unlock_irq(&adapter->work_lock);
2649
2650                if (link_fault) {
2651                        t3_link_fault(adapter, i);
2652                        continue;
2653                }
2654
2655                if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2656                        t3_xgm_intr_disable(adapter, i);
2657                        t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2658
2659                        t3_link_changed(adapter, i);
2660                        t3_xgm_intr_enable(adapter, i);
2661                }
2662        }
2663}
2664
2665static void check_t3b2_mac(struct adapter *adapter)
2666{
2667        int i;
2668
2669        if (!rtnl_trylock())    /* synchronize with ifdown */
2670                return;
2671
2672        for_each_port(adapter, i) {
2673                struct net_device *dev = adapter->port[i];
2674                struct port_info *p = netdev_priv(dev);
2675                int status;
2676
2677                if (!netif_running(dev))
2678                        continue;
2679
2680                status = 0;
2681                if (netif_running(dev) && netif_carrier_ok(dev))
2682                        status = t3b2_mac_watchdog_task(&p->mac);
2683                if (status == 1)
2684                        p->mac.stats.num_toggled++;
2685                else if (status == 2) {
2686                        struct cmac *mac = &p->mac;
2687
2688                        t3_mac_set_mtu(mac, dev->mtu);
2689                        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2690                        cxgb_set_rxmode(dev);
2691                        t3_link_start(&p->phy, mac, &p->link_config);
2692                        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2693                        t3_port_intr_enable(adapter, p->port_id);
2694                        p->mac.stats.num_resets++;
2695                }
2696        }
2697        rtnl_unlock();
2698}
2699
2700
2701static void t3_adap_check_task(struct work_struct *work)
2702{
2703        struct adapter *adapter = container_of(work, struct adapter,
2704                                               adap_check_task.work);
2705        const struct adapter_params *p = &adapter->params;
2706        int port;
2707        unsigned int v, status, reset;
2708
2709        adapter->check_task_cnt++;
2710
2711        check_link_status(adapter);
2712
2713        /* Accumulate MAC stats if needed */
2714        if (!p->linkpoll_period ||
2715            (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2716            p->stats_update_period) {
2717                mac_stats_update(adapter);
2718                adapter->check_task_cnt = 0;
2719        }
2720
2721        if (p->rev == T3_REV_B2)
2722                check_t3b2_mac(adapter);
2723
2724        /*
2725         * Scan the XGMAC's to check for various conditions which we want to
2726         * monitor in a periodic polling manner rather than via an interrupt
2727         * condition.  This is used for conditions which would otherwise flood
2728         * the system with interrupts and we only really need to know that the
2729         * conditions are "happening" ...  For each condition we count the
2730         * detection of the condition and reset it for the next polling loop.
2731         */
2732        for_each_port(adapter, port) {
2733                struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2734                u32 cause;
2735
2736                cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2737                reset = 0;
2738                if (cause & F_RXFIFO_OVERFLOW) {
2739                        mac->stats.rx_fifo_ovfl++;
2740                        reset |= F_RXFIFO_OVERFLOW;
2741                }
2742
2743                t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2744        }
2745
2746        /*
2747         * We do the same as above for FL_EMPTY interrupts.
2748         */
2749        status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2750        reset = 0;
2751
2752        if (status & F_FLEMPTY) {
2753                struct sge_qset *qs = &adapter->sge.qs[0];
2754                int i = 0;
2755
2756                reset |= F_FLEMPTY;
2757
2758                v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2759                    0xffff;
2760
2761                while (v) {
2762                        qs->fl[i].empty += (v & 1);
2763                        if (i)
2764                                qs++;
2765                        i ^= 1;
2766                        v >>= 1;
2767                }
2768        }
2769
2770        t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2771
2772        /* Schedule the next check update if any port is active. */
2773        spin_lock_irq(&adapter->work_lock);
2774        if (adapter->open_device_map & PORT_MASK)
2775                schedule_chk_task(adapter);
2776        spin_unlock_irq(&adapter->work_lock);
2777}
2778
2779static void db_full_task(struct work_struct *work)
2780{
2781        struct adapter *adapter = container_of(work, struct adapter,
2782                                               db_full_task);
2783
2784        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2785}
2786
2787static void db_empty_task(struct work_struct *work)
2788{
2789        struct adapter *adapter = container_of(work, struct adapter,
2790                                               db_empty_task);
2791
2792        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2793}
2794
2795static void db_drop_task(struct work_struct *work)
2796{
2797        struct adapter *adapter = container_of(work, struct adapter,
2798                                               db_drop_task);
2799        unsigned long delay = 1000;
2800        unsigned short r;
2801
2802        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2803
2804        /*
2805         * Sleep a while before ringing the driver qset dbs.
2806         * The delay is between 1000-2023 usecs.
2807         */
2808        get_random_bytes(&r, 2);
2809        delay += r & 1023;
2810        set_current_state(TASK_UNINTERRUPTIBLE);
2811        schedule_timeout(usecs_to_jiffies(delay));
2812        ring_dbs(adapter);
2813}
2814
2815/*
2816 * Processes external (PHY) interrupts in process context.
2817 */
2818static void ext_intr_task(struct work_struct *work)
2819{
2820        struct adapter *adapter = container_of(work, struct adapter,
2821                                               ext_intr_handler_task);
2822        int i;
2823
2824        /* Disable link fault interrupts */
2825        for_each_port(adapter, i) {
2826                struct net_device *dev = adapter->port[i];
2827                struct port_info *p = netdev_priv(dev);
2828
2829                t3_xgm_intr_disable(adapter, i);
2830                t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2831        }
2832
2833        /* Re-enable link fault interrupts */
2834        t3_phy_intr_handler(adapter);
2835
2836        for_each_port(adapter, i)
2837                t3_xgm_intr_enable(adapter, i);
2838
2839        /* Now reenable external interrupts */
2840        spin_lock_irq(&adapter->work_lock);
2841        if (adapter->slow_intr_mask) {
2842                adapter->slow_intr_mask |= F_T3DBG;
2843                t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2844                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2845                             adapter->slow_intr_mask);
2846        }
2847        spin_unlock_irq(&adapter->work_lock);
2848}
2849
2850/*
2851 * Interrupt-context handler for external (PHY) interrupts.
2852 */
2853void t3_os_ext_intr_handler(struct adapter *adapter)
2854{
2855        /*
2856         * Schedule a task to handle external interrupts as they may be slow
2857         * and we use a mutex to protect MDIO registers.  We disable PHY
2858         * interrupts in the meantime and let the task reenable them when
2859         * it's done.
2860         */
2861        spin_lock(&adapter->work_lock);
2862        if (adapter->slow_intr_mask) {
2863                adapter->slow_intr_mask &= ~F_T3DBG;
2864                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2865                             adapter->slow_intr_mask);
2866                queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2867        }
2868        spin_unlock(&adapter->work_lock);
2869}
2870
2871void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2872{
2873        struct net_device *netdev = adapter->port[port_id];
2874        struct port_info *pi = netdev_priv(netdev);
2875
2876        spin_lock(&adapter->work_lock);
2877        pi->link_fault = 1;
2878        spin_unlock(&adapter->work_lock);
2879}
2880
2881static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2882{
2883        int i, ret = 0;
2884
2885        if (is_offload(adapter) &&
2886            test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2887                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2888                offload_close(&adapter->tdev);
2889        }
2890
2891        /* Stop all ports */
2892        for_each_port(adapter, i) {
2893                struct net_device *netdev = adapter->port[i];
2894
2895                if (netif_running(netdev))
2896                        __cxgb_close(netdev, on_wq);
2897        }
2898
2899        /* Stop SGE timers */
2900        t3_stop_sge_timers(adapter);
2901
2902        adapter->flags &= ~FULL_INIT_DONE;
2903
2904        if (reset)
2905                ret = t3_reset_adapter(adapter);
2906
2907        pci_disable_device(adapter->pdev);
2908
2909        return ret;
2910}
2911
2912static int t3_reenable_adapter(struct adapter *adapter)
2913{
2914        if (pci_enable_device(adapter->pdev)) {
2915                dev_err(&adapter->pdev->dev,
2916                        "Cannot re-enable PCI device after reset.\n");
2917                goto err;
2918        }
2919        pci_set_master(adapter->pdev);
2920        pci_restore_state(adapter->pdev);
2921        pci_save_state(adapter->pdev);
2922
2923        /* Free sge resources */
2924        t3_free_sge_resources(adapter);
2925
2926        if (t3_replay_prep_adapter(adapter))
2927                goto err;
2928
2929        return 0;
2930err:
2931        return -1;
2932}
2933
2934static void t3_resume_ports(struct adapter *adapter)
2935{
2936        int i;
2937
2938        /* Restart the ports */
2939        for_each_port(adapter, i) {
2940                struct net_device *netdev = adapter->port[i];
2941
2942                if (netif_running(netdev)) {
2943                        if (cxgb_open(netdev)) {
2944                                dev_err(&adapter->pdev->dev,
2945                                        "can't bring device back up"
2946                                        " after reset\n");
2947                                continue;
2948                        }
2949                }
2950        }
2951
2952        if (is_offload(adapter) && !ofld_disable)
2953                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2954}
2955
2956/*
2957 * processes a fatal error.
2958 * Bring the ports down, reset the chip, bring the ports back up.
2959 */
2960static void fatal_error_task(struct work_struct *work)
2961{
2962        struct adapter *adapter = container_of(work, struct adapter,
2963                                               fatal_error_handler_task);
2964        int err = 0;
2965
2966        rtnl_lock();
2967        err = t3_adapter_error(adapter, 1, 1);
2968        if (!err)
2969                err = t3_reenable_adapter(adapter);
2970        if (!err)
2971                t3_resume_ports(adapter);
2972
2973        CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2974        rtnl_unlock();
2975}
2976
2977void t3_fatal_err(struct adapter *adapter)
2978{
2979        unsigned int fw_status[4];
2980
2981        if (adapter->flags & FULL_INIT_DONE) {
2982                t3_sge_stop(adapter);
2983                t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2984                t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2985                t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2986                t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2987
2988                spin_lock(&adapter->work_lock);
2989                t3_intr_disable(adapter);
2990                queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2991                spin_unlock(&adapter->work_lock);
2992        }
2993        CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2994        if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2995                CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2996                         fw_status[0], fw_status[1],
2997                         fw_status[2], fw_status[3]);
2998}
2999
3000/**
3001 * t3_io_error_detected - called when PCI error is detected
3002 * @pdev: Pointer to PCI device
3003 * @state: The current pci connection state
3004 *
3005 * This function is called after a PCI bus error affecting
3006 * this device has been detected.
3007 */
3008static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3009                                             pci_channel_state_t state)
3010{
3011        struct adapter *adapter = pci_get_drvdata(pdev);
3012
3013        if (state == pci_channel_io_perm_failure)
3014                return PCI_ERS_RESULT_DISCONNECT;
3015
3016        t3_adapter_error(adapter, 0, 0);
3017
3018        /* Request a slot reset. */
3019        return PCI_ERS_RESULT_NEED_RESET;
3020}
3021
3022/**
3023 * t3_io_slot_reset - called after the pci bus has been reset.
3024 * @pdev: Pointer to PCI device
3025 *
3026 * Restart the card from scratch, as if from a cold-boot.
3027 */
3028static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3029{
3030        struct adapter *adapter = pci_get_drvdata(pdev);
3031
3032        if (!t3_reenable_adapter(adapter))
3033                return PCI_ERS_RESULT_RECOVERED;
3034
3035        return PCI_ERS_RESULT_DISCONNECT;
3036}
3037
3038/**
3039 * t3_io_resume - called when traffic can start flowing again.
3040 * @pdev: Pointer to PCI device
3041 *
3042 * This callback is called when the error recovery driver tells us that
3043 * its OK to resume normal operation.
3044 */
3045static void t3_io_resume(struct pci_dev *pdev)
3046{
3047        struct adapter *adapter = pci_get_drvdata(pdev);
3048
3049        CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3050                 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3051
3052        rtnl_lock();
3053        t3_resume_ports(adapter);
3054        rtnl_unlock();
3055}
3056
3057static const struct pci_error_handlers t3_err_handler = {
3058        .error_detected = t3_io_error_detected,
3059        .slot_reset = t3_io_slot_reset,
3060        .resume = t3_io_resume,
3061};
3062
3063/*
3064 * Set the number of qsets based on the number of CPUs and the number of ports,
3065 * not to exceed the number of available qsets, assuming there are enough qsets
3066 * per port in HW.
3067 */
3068static void set_nqsets(struct adapter *adap)
3069{
3070        int i, j = 0;
3071        int num_cpus = netif_get_num_default_rss_queues();
3072        int hwports = adap->params.nports;
3073        int nqsets = adap->msix_nvectors - 1;
3074
3075        if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3076                if (hwports == 2 &&
3077                    (hwports * nqsets > SGE_QSETS ||
3078                     num_cpus >= nqsets / hwports))
3079                        nqsets /= hwports;
3080                if (nqsets > num_cpus)
3081                        nqsets = num_cpus;
3082                if (nqsets < 1 || hwports == 4)
3083                        nqsets = 1;
3084        } else
3085                nqsets = 1;
3086
3087        for_each_port(adap, i) {
3088                struct port_info *pi = adap2pinfo(adap, i);
3089
3090                pi->first_qset = j;
3091                pi->nqsets = nqsets;
3092                j = pi->first_qset + nqsets;
3093
3094                dev_info(&adap->pdev->dev,
3095                         "Port %d using %d queue sets.\n", i, nqsets);
3096        }
3097}
3098
3099static int cxgb_enable_msix(struct adapter *adap)
3100{
3101        struct msix_entry entries[SGE_QSETS + 1];
3102        int vectors;
3103        int i, err;
3104
3105        vectors = ARRAY_SIZE(entries);
3106        for (i = 0; i < vectors; ++i)
3107                entries[i].entry = i;
3108
3109        while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
3110                vectors = err;
3111
3112        if (err < 0)
3113                pci_disable_msix(adap->pdev);
3114
3115        if (!err && vectors < (adap->params.nports + 1)) {
3116                pci_disable_msix(adap->pdev);
3117                err = -1;
3118        }
3119
3120        if (!err) {
3121                for (i = 0; i < vectors; ++i)
3122                        adap->msix_info[i].vec = entries[i].vector;
3123                adap->msix_nvectors = vectors;
3124        }
3125
3126        return err;
3127}
3128
3129static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3130{
3131        static const char *pci_variant[] = {
3132                "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3133        };
3134
3135        int i;
3136        char buf[80];
3137
3138        if (is_pcie(adap))
3139                snprintf(buf, sizeof(buf), "%s x%d",
3140                         pci_variant[adap->params.pci.variant],
3141                         adap->params.pci.width);
3142        else
3143                snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3144                         pci_variant[adap->params.pci.variant],
3145                         adap->params.pci.speed, adap->params.pci.width);
3146
3147        for_each_port(adap, i) {
3148                struct net_device *dev = adap->port[i];
3149                const struct port_info *pi = netdev_priv(dev);
3150
3151                if (!test_bit(i, &adap->registered_device_map))
3152                        continue;
3153                netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3154                            ai->desc, pi->phy.desc,
3155                            is_offload(adap) ? "R" : "", adap->params.rev, buf,
3156                            (adap->flags & USING_MSIX) ? " MSI-X" :
3157                            (adap->flags & USING_MSI) ? " MSI" : "");
3158                if (adap->name == dev->name && adap->params.vpd.mclk)
3159                        pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3160                               adap->name, t3_mc7_size(&adap->cm) >> 20,
3161                               t3_mc7_size(&adap->pmtx) >> 20,
3162                               t3_mc7_size(&adap->pmrx) >> 20,
3163                               adap->params.vpd.sn);
3164        }
3165}
3166
3167static const struct net_device_ops cxgb_netdev_ops = {
3168        .ndo_open               = cxgb_open,
3169        .ndo_stop               = cxgb_close,
3170        .ndo_start_xmit         = t3_eth_xmit,
3171        .ndo_get_stats          = cxgb_get_stats,
3172        .ndo_validate_addr      = eth_validate_addr,
3173        .ndo_set_rx_mode        = cxgb_set_rxmode,
3174        .ndo_do_ioctl           = cxgb_ioctl,
3175        .ndo_change_mtu         = cxgb_change_mtu,
3176        .ndo_set_mac_address    = cxgb_set_mac_addr,
3177        .ndo_fix_features       = cxgb_fix_features,
3178        .ndo_set_features       = cxgb_set_features,
3179#ifdef CONFIG_NET_POLL_CONTROLLER
3180        .ndo_poll_controller    = cxgb_netpoll,
3181#endif
3182};
3183
3184static void cxgb3_init_iscsi_mac(struct net_device *dev)
3185{
3186        struct port_info *pi = netdev_priv(dev);
3187
3188        memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3189        pi->iscsic.mac_addr[3] |= 0x80;
3190}
3191
3192#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3193#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3194                        NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3195static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3196{
3197        int i, err, pci_using_dac = 0;
3198        resource_size_t mmio_start, mmio_len;
3199        const struct adapter_info *ai;
3200        struct adapter *adapter = NULL;
3201        struct port_info *pi;
3202
3203        pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3204
3205        if (!cxgb3_wq) {
3206                cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3207                if (!cxgb3_wq) {
3208                        pr_err("cannot initialize work queue\n");
3209                        return -ENOMEM;
3210                }
3211        }
3212
3213        err = pci_enable_device(pdev);
3214        if (err) {
3215                dev_err(&pdev->dev, "cannot enable PCI device\n");
3216                goto out;
3217        }
3218
3219        err = pci_request_regions(pdev, DRV_NAME);
3220        if (err) {
3221                /* Just info, some other driver may have claimed the device. */
3222                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3223                goto out_disable_device;
3224        }
3225
3226        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3227                pci_using_dac = 1;
3228                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3229                if (err) {
3230                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3231                               "coherent allocations\n");
3232                        goto out_release_regions;
3233                }
3234        } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3235                dev_err(&pdev->dev, "no usable DMA configuration\n");
3236                goto out_release_regions;
3237        }
3238
3239        pci_set_master(pdev);
3240        pci_save_state(pdev);
3241
3242        mmio_start = pci_resource_start(pdev, 0);
3243        mmio_len = pci_resource_len(pdev, 0);
3244        ai = t3_get_adapter_info(ent->driver_data);
3245
3246        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3247        if (!adapter) {
3248                err = -ENOMEM;
3249                goto out_release_regions;
3250        }
3251
3252        adapter->nofail_skb =
3253                alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3254        if (!adapter->nofail_skb) {
3255                dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3256                err = -ENOMEM;
3257                goto out_free_adapter;
3258        }
3259
3260        adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3261        if (!adapter->regs) {
3262                dev_err(&pdev->dev, "cannot map device registers\n");
3263                err = -ENOMEM;
3264                goto out_free_adapter;
3265        }
3266
3267        adapter->pdev = pdev;
3268        adapter->name = pci_name(pdev);
3269        adapter->msg_enable = dflt_msg_enable;
3270        adapter->mmio_len = mmio_len;
3271
3272        mutex_init(&adapter->mdio_lock);
3273        spin_lock_init(&adapter->work_lock);
3274        spin_lock_init(&adapter->stats_lock);
3275
3276        INIT_LIST_HEAD(&adapter->adapter_list);
3277        INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3278        INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3279
3280        INIT_WORK(&adapter->db_full_task, db_full_task);
3281        INIT_WORK(&adapter->db_empty_task, db_empty_task);
3282        INIT_WORK(&adapter->db_drop_task, db_drop_task);
3283
3284        INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3285
3286        for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3287                struct net_device *netdev;
3288
3289                netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3290                if (!netdev) {
3291                        err = -ENOMEM;
3292                        goto out_free_dev;
3293                }
3294
3295                SET_NETDEV_DEV(netdev, &pdev->dev);
3296
3297                adapter->port[i] = netdev;
3298                pi = netdev_priv(netdev);
3299                pi->adapter = adapter;
3300                pi->port_id = i;
3301                netif_carrier_off(netdev);
3302                netdev->irq = pdev->irq;
3303                netdev->mem_start = mmio_start;
3304                netdev->mem_end = mmio_start + mmio_len - 1;
3305                netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3306                        NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3307                netdev->features |= netdev->hw_features |
3308                                    NETIF_F_HW_VLAN_CTAG_TX;
3309                netdev->vlan_features |= netdev->features & VLAN_FEAT;
3310                if (pci_using_dac)
3311                        netdev->features |= NETIF_F_HIGHDMA;
3312
3313                netdev->netdev_ops = &cxgb_netdev_ops;
3314                SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3315        }
3316
3317        pci_set_drvdata(pdev, adapter);
3318        if (t3_prep_adapter(adapter, ai, 1) < 0) {
3319                err = -ENODEV;
3320                goto out_free_dev;
3321        }
3322
3323        /*
3324         * The card is now ready to go.  If any errors occur during device
3325         * registration we do not fail the whole card but rather proceed only
3326         * with the ports we manage to register successfully.  However we must
3327         * register at least one net device.
3328         */
3329        for_each_port(adapter, i) {
3330                err = register_netdev(adapter->port[i]);
3331                if (err)
3332                        dev_warn(&pdev->dev,
3333                                 "cannot register net device %s, skipping\n",
3334                                 adapter->port[i]->name);
3335                else {
3336                        /*
3337                         * Change the name we use for messages to the name of
3338                         * the first successfully registered interface.
3339                         */
3340                        if (!adapter->registered_device_map)
3341                                adapter->name = adapter->port[i]->name;
3342
3343                        __set_bit(i, &adapter->registered_device_map);
3344                }
3345        }
3346        if (!adapter->registered_device_map) {
3347                dev_err(&pdev->dev, "could not register any net devices\n");
3348                goto out_free_dev;
3349        }
3350
3351        for_each_port(adapter, i)
3352                cxgb3_init_iscsi_mac(adapter->port[i]);
3353
3354        /* Driver's ready. Reflect it on LEDs */
3355        t3_led_ready(adapter);
3356
3357        if (is_offload(adapter)) {
3358                __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3359                cxgb3_adapter_ofld(adapter);
3360        }
3361
3362        /* See what interrupts we'll be using */
3363        if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3364                adapter->flags |= USING_MSIX;
3365        else if (msi > 0 && pci_enable_msi(pdev) == 0)
3366                adapter->flags |= USING_MSI;
3367
3368        set_nqsets(adapter);
3369
3370        err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3371                                 &cxgb3_attr_group);
3372
3373        print_port_info(adapter, ai);
3374        return 0;
3375
3376out_free_dev:
3377        iounmap(adapter->regs);
3378        for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3379                if (adapter->port[i])
3380                        free_netdev(adapter->port[i]);
3381
3382out_free_adapter:
3383        kfree(adapter);
3384
3385out_release_regions:
3386        pci_release_regions(pdev);
3387out_disable_device:
3388        pci_disable_device(pdev);
3389out:
3390        return err;
3391}
3392
3393static void remove_one(struct pci_dev *pdev)
3394{
3395        struct adapter *adapter = pci_get_drvdata(pdev);
3396
3397        if (adapter) {
3398                int i;
3399
3400                t3_sge_stop(adapter);
3401                sysfs_remove_group(&adapter->port[0]->dev.kobj,
3402                                   &cxgb3_attr_group);
3403
3404                if (is_offload(adapter)) {
3405                        cxgb3_adapter_unofld(adapter);
3406                        if (test_bit(OFFLOAD_DEVMAP_BIT,
3407                                     &adapter->open_device_map))
3408                                offload_close(&adapter->tdev);
3409                }
3410
3411                for_each_port(adapter, i)
3412                    if (test_bit(i, &adapter->registered_device_map))
3413                        unregister_netdev(adapter->port[i]);
3414
3415                t3_stop_sge_timers(adapter);
3416                t3_free_sge_resources(adapter);
3417                cxgb_disable_msi(adapter);
3418
3419                for_each_port(adapter, i)
3420                        if (adapter->port[i])
3421                                free_netdev(adapter->port[i]);
3422
3423                iounmap(adapter->regs);
3424                if (adapter->nofail_skb)
3425                        kfree_skb(adapter->nofail_skb);
3426                kfree(adapter);
3427                pci_release_regions(pdev);
3428                pci_disable_device(pdev);
3429        }
3430}
3431
3432static struct pci_driver driver = {
3433        .name = DRV_NAME,
3434        .id_table = cxgb3_pci_tbl,
3435        .probe = init_one,
3436        .remove = remove_one,
3437        .err_handler = &t3_err_handler,
3438};
3439
3440static int __init cxgb3_init_module(void)
3441{
3442        int ret;
3443
3444        cxgb3_offload_init();
3445
3446        ret = pci_register_driver(&driver);
3447        return ret;
3448}
3449
3450static void __exit cxgb3_cleanup_module(void)
3451{
3452        pci_unregister_driver(&driver);
3453        if (cxgb3_wq)
3454                destroy_workqueue(cxgb3_wq);
3455}
3456
3457module_init(cxgb3_init_module);
3458module_exit(cxgb3_cleanup_module);
3459