linux/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/pci.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/netdevice.h>
  40#include <linux/etherdevice.h>
  41#include <linux/if_vlan.h>
  42#include <linux/mdio.h>
  43#include <linux/sockios.h>
  44#include <linux/workqueue.h>
  45#include <linux/proc_fs.h>
  46#include <linux/rtnetlink.h>
  47#include <linux/firmware.h>
  48#include <linux/log2.h>
  49#include <linux/stringify.h>
  50#include <linux/sched.h>
  51#include <linux/slab.h>
  52#include <linux/uaccess.h>
  53#include <linux/nospec.h>
  54
  55#include "common.h"
  56#include "cxgb3_ioctl.h"
  57#include "regs.h"
  58#include "cxgb3_offload.h"
  59#include "version.h"
  60
  61#include "cxgb3_ctl_defs.h"
  62#include "t3_cpl.h"
  63#include "firmware_exports.h"
  64
  65enum {
  66        MAX_TXQ_ENTRIES = 16384,
  67        MAX_CTRL_TXQ_ENTRIES = 1024,
  68        MAX_RSPQ_ENTRIES = 16384,
  69        MAX_RX_BUFFERS = 16384,
  70        MAX_RX_JUMBO_BUFFERS = 16384,
  71        MIN_TXQ_ENTRIES = 4,
  72        MIN_CTRL_TXQ_ENTRIES = 4,
  73        MIN_RSPQ_ENTRIES = 32,
  74        MIN_FL_ENTRIES = 32
  75};
  76
  77#define PORT_MASK ((1 << MAX_NPORTS) - 1)
  78
  79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  80                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  81                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  82
  83#define EEPROM_MAGIC 0x38E2F10C
  84
  85#define CH_DEVICE(devid, idx) \
  86        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  87
  88static const struct pci_device_id cxgb3_pci_tbl[] = {
  89        CH_DEVICE(0x20, 0),     /* PE9000 */
  90        CH_DEVICE(0x21, 1),     /* T302E */
  91        CH_DEVICE(0x22, 2),     /* T310E */
  92        CH_DEVICE(0x23, 3),     /* T320X */
  93        CH_DEVICE(0x24, 1),     /* T302X */
  94        CH_DEVICE(0x25, 3),     /* T320E */
  95        CH_DEVICE(0x26, 2),     /* T310X */
  96        CH_DEVICE(0x30, 2),     /* T3B10 */
  97        CH_DEVICE(0x31, 3),     /* T3B20 */
  98        CH_DEVICE(0x32, 1),     /* T3B02 */
  99        CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
 100        CH_DEVICE(0x36, 3),     /* S320E-CR */
 101        CH_DEVICE(0x37, 7),     /* N320E-G2 */
 102        {0,}
 103};
 104
 105MODULE_DESCRIPTION(DRV_DESC);
 106MODULE_AUTHOR("Chelsio Communications");
 107MODULE_LICENSE("Dual BSD/GPL");
 108MODULE_VERSION(DRV_VERSION);
 109MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 110
 111static int dflt_msg_enable = DFLT_MSG_ENABLE;
 112
 113module_param(dflt_msg_enable, int, 0644);
 114MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 115
 116/*
 117 * The driver uses the best interrupt scheme available on a platform in the
 118 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 119 * of these schemes the driver may consider as follows:
 120 *
 121 * msi = 2: choose from among all three options
 122 * msi = 1: only consider MSI and pin interrupts
 123 * msi = 0: force pin interrupts
 124 */
 125static int msi = 2;
 126
 127module_param(msi, int, 0644);
 128MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 129
 130/*
 131 * The driver enables offload as a default.
 132 * To disable it, use ofld_disable = 1.
 133 */
 134
 135static int ofld_disable = 0;
 136
 137module_param(ofld_disable, int, 0644);
 138MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 139
 140/*
 141 * We have work elements that we need to cancel when an interface is taken
 142 * down.  Normally the work elements would be executed by keventd but that
 143 * can deadlock because of linkwatch.  If our close method takes the rtnl
 144 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 145 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 146 * for our work to complete.  Get our own work queue to solve this.
 147 */
 148struct workqueue_struct *cxgb3_wq;
 149
 150/**
 151 *      link_report - show link status and link speed/duplex
 152 *      @p: the port whose settings are to be reported
 153 *
 154 *      Shows the link status, speed, and duplex of a port.
 155 */
 156static void link_report(struct net_device *dev)
 157{
 158        if (!netif_carrier_ok(dev))
 159                netdev_info(dev, "link down\n");
 160        else {
 161                const char *s = "10Mbps";
 162                const struct port_info *p = netdev_priv(dev);
 163
 164                switch (p->link_config.speed) {
 165                case SPEED_10000:
 166                        s = "10Gbps";
 167                        break;
 168                case SPEED_1000:
 169                        s = "1000Mbps";
 170                        break;
 171                case SPEED_100:
 172                        s = "100Mbps";
 173                        break;
 174                }
 175
 176                netdev_info(dev, "link up, %s, %s-duplex\n",
 177                            s, p->link_config.duplex == DUPLEX_FULL
 178                            ? "full" : "half");
 179        }
 180}
 181
 182static void enable_tx_fifo_drain(struct adapter *adapter,
 183                                 struct port_info *pi)
 184{
 185        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
 186                         F_ENDROPPKT);
 187        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
 188        t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
 189        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
 190}
 191
 192static void disable_tx_fifo_drain(struct adapter *adapter,
 193                                  struct port_info *pi)
 194{
 195        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 196                         F_ENDROPPKT, 0);
 197}
 198
 199void t3_os_link_fault(struct adapter *adap, int port_id, int state)
 200{
 201        struct net_device *dev = adap->port[port_id];
 202        struct port_info *pi = netdev_priv(dev);
 203
 204        if (state == netif_carrier_ok(dev))
 205                return;
 206
 207        if (state) {
 208                struct cmac *mac = &pi->mac;
 209
 210                netif_carrier_on(dev);
 211
 212                disable_tx_fifo_drain(adap, pi);
 213
 214                /* Clear local faults */
 215                t3_xgm_intr_disable(adap, pi->port_id);
 216                t3_read_reg(adap, A_XGM_INT_STATUS +
 217                                    pi->mac.offset);
 218                t3_write_reg(adap,
 219                             A_XGM_INT_CAUSE + pi->mac.offset,
 220                             F_XGM_INT);
 221
 222                t3_set_reg_field(adap,
 223                                 A_XGM_INT_ENABLE +
 224                                 pi->mac.offset,
 225                                 F_XGM_INT, F_XGM_INT);
 226                t3_xgm_intr_enable(adap, pi->port_id);
 227
 228                t3_mac_enable(mac, MAC_DIRECTION_TX);
 229        } else {
 230                netif_carrier_off(dev);
 231
 232                /* Flush TX FIFO */
 233                enable_tx_fifo_drain(adap, pi);
 234        }
 235        link_report(dev);
 236}
 237
 238/**
 239 *      t3_os_link_changed - handle link status changes
 240 *      @adapter: the adapter associated with the link change
 241 *      @port_id: the port index whose limk status has changed
 242 *      @link_stat: the new status of the link
 243 *      @speed: the new speed setting
 244 *      @duplex: the new duplex setting
 245 *      @pause: the new flow-control setting
 246 *
 247 *      This is the OS-dependent handler for link status changes.  The OS
 248 *      neutral handler takes care of most of the processing for these events,
 249 *      then calls this handler for any OS-specific processing.
 250 */
 251void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 252                        int speed, int duplex, int pause)
 253{
 254        struct net_device *dev = adapter->port[port_id];
 255        struct port_info *pi = netdev_priv(dev);
 256        struct cmac *mac = &pi->mac;
 257
 258        /* Skip changes from disabled ports. */
 259        if (!netif_running(dev))
 260                return;
 261
 262        if (link_stat != netif_carrier_ok(dev)) {
 263                if (link_stat) {
 264                        disable_tx_fifo_drain(adapter, pi);
 265
 266                        t3_mac_enable(mac, MAC_DIRECTION_RX);
 267
 268                        /* Clear local faults */
 269                        t3_xgm_intr_disable(adapter, pi->port_id);
 270                        t3_read_reg(adapter, A_XGM_INT_STATUS +
 271                                    pi->mac.offset);
 272                        t3_write_reg(adapter,
 273                                     A_XGM_INT_CAUSE + pi->mac.offset,
 274                                     F_XGM_INT);
 275
 276                        t3_set_reg_field(adapter,
 277                                         A_XGM_INT_ENABLE + pi->mac.offset,
 278                                         F_XGM_INT, F_XGM_INT);
 279                        t3_xgm_intr_enable(adapter, pi->port_id);
 280
 281                        netif_carrier_on(dev);
 282                } else {
 283                        netif_carrier_off(dev);
 284
 285                        t3_xgm_intr_disable(adapter, pi->port_id);
 286                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 287                        t3_set_reg_field(adapter,
 288                                         A_XGM_INT_ENABLE + pi->mac.offset,
 289                                         F_XGM_INT, 0);
 290
 291                        if (is_10G(adapter))
 292                                pi->phy.ops->power_down(&pi->phy, 1);
 293
 294                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 295                        t3_mac_disable(mac, MAC_DIRECTION_RX);
 296                        t3_link_start(&pi->phy, mac, &pi->link_config);
 297
 298                        /* Flush TX FIFO */
 299                        enable_tx_fifo_drain(adapter, pi);
 300                }
 301
 302                link_report(dev);
 303        }
 304}
 305
 306/**
 307 *      t3_os_phymod_changed - handle PHY module changes
 308 *      @phy: the PHY reporting the module change
 309 *      @mod_type: new module type
 310 *
 311 *      This is the OS-dependent handler for PHY module changes.  It is
 312 *      invoked when a PHY module is removed or inserted for any OS-specific
 313 *      processing.
 314 */
 315void t3_os_phymod_changed(struct adapter *adap, int port_id)
 316{
 317        static const char *mod_str[] = {
 318                NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 319        };
 320
 321        const struct net_device *dev = adap->port[port_id];
 322        const struct port_info *pi = netdev_priv(dev);
 323
 324        if (pi->phy.modtype == phy_modtype_none)
 325                netdev_info(dev, "PHY module unplugged\n");
 326        else
 327                netdev_info(dev, "%s PHY module inserted\n",
 328                            mod_str[pi->phy.modtype]);
 329}
 330
 331static void cxgb_set_rxmode(struct net_device *dev)
 332{
 333        struct port_info *pi = netdev_priv(dev);
 334
 335        t3_mac_set_rx_mode(&pi->mac, dev);
 336}
 337
 338/**
 339 *      link_start - enable a port
 340 *      @dev: the device to enable
 341 *
 342 *      Performs the MAC and PHY actions needed to enable a port.
 343 */
 344static void link_start(struct net_device *dev)
 345{
 346        struct port_info *pi = netdev_priv(dev);
 347        struct cmac *mac = &pi->mac;
 348
 349        t3_mac_reset(mac);
 350        t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 351        t3_mac_set_mtu(mac, dev->mtu);
 352        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 353        t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 354        t3_mac_set_rx_mode(mac, dev);
 355        t3_link_start(&pi->phy, mac, &pi->link_config);
 356        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 357}
 358
 359static inline void cxgb_disable_msi(struct adapter *adapter)
 360{
 361        if (adapter->flags & USING_MSIX) {
 362                pci_disable_msix(adapter->pdev);
 363                adapter->flags &= ~USING_MSIX;
 364        } else if (adapter->flags & USING_MSI) {
 365                pci_disable_msi(adapter->pdev);
 366                adapter->flags &= ~USING_MSI;
 367        }
 368}
 369
 370/*
 371 * Interrupt handler for asynchronous events used with MSI-X.
 372 */
 373static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 374{
 375        t3_slow_intr_handler(cookie);
 376        return IRQ_HANDLED;
 377}
 378
 379/*
 380 * Name the MSI-X interrupts.
 381 */
 382static void name_msix_vecs(struct adapter *adap)
 383{
 384        int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 385
 386        snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 387        adap->msix_info[0].desc[n] = 0;
 388
 389        for_each_port(adap, j) {
 390                struct net_device *d = adap->port[j];
 391                const struct port_info *pi = netdev_priv(d);
 392
 393                for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 394                        snprintf(adap->msix_info[msi_idx].desc, n,
 395                                 "%s-%d", d->name, pi->first_qset + i);
 396                        adap->msix_info[msi_idx].desc[n] = 0;
 397                }
 398        }
 399}
 400
 401static int request_msix_data_irqs(struct adapter *adap)
 402{
 403        int i, j, err, qidx = 0;
 404
 405        for_each_port(adap, i) {
 406                int nqsets = adap2pinfo(adap, i)->nqsets;
 407
 408                for (j = 0; j < nqsets; ++j) {
 409                        err = request_irq(adap->msix_info[qidx + 1].vec,
 410                                          t3_intr_handler(adap,
 411                                                          adap->sge.qs[qidx].
 412                                                          rspq.polling), 0,
 413                                          adap->msix_info[qidx + 1].desc,
 414                                          &adap->sge.qs[qidx]);
 415                        if (err) {
 416                                while (--qidx >= 0)
 417                                        free_irq(adap->msix_info[qidx + 1].vec,
 418                                                 &adap->sge.qs[qidx]);
 419                                return err;
 420                        }
 421                        qidx++;
 422                }
 423        }
 424        return 0;
 425}
 426
 427static void free_irq_resources(struct adapter *adapter)
 428{
 429        if (adapter->flags & USING_MSIX) {
 430                int i, n = 0;
 431
 432                free_irq(adapter->msix_info[0].vec, adapter);
 433                for_each_port(adapter, i)
 434                        n += adap2pinfo(adapter, i)->nqsets;
 435
 436                for (i = 0; i < n; ++i)
 437                        free_irq(adapter->msix_info[i + 1].vec,
 438                                 &adapter->sge.qs[i]);
 439        } else
 440                free_irq(adapter->pdev->irq, adapter);
 441}
 442
 443static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 444                              unsigned long n)
 445{
 446        int attempts = 10;
 447
 448        while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 449                if (!--attempts)
 450                        return -ETIMEDOUT;
 451                msleep(10);
 452        }
 453        return 0;
 454}
 455
 456static int init_tp_parity(struct adapter *adap)
 457{
 458        int i;
 459        struct sk_buff *skb;
 460        struct cpl_set_tcb_field *greq;
 461        unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 462
 463        t3_tp_set_offload_mode(adap, 1);
 464
 465        for (i = 0; i < 16; i++) {
 466                struct cpl_smt_write_req *req;
 467
 468                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 469                if (!skb)
 470                        skb = adap->nofail_skb;
 471                if (!skb)
 472                        goto alloc_skb_fail;
 473
 474                req = __skb_put_zero(skb, sizeof(*req));
 475                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 476                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 477                req->mtu_idx = NMTUS - 1;
 478                req->iff = i;
 479                t3_mgmt_tx(adap, skb);
 480                if (skb == adap->nofail_skb) {
 481                        await_mgmt_replies(adap, cnt, i + 1);
 482                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 483                        if (!adap->nofail_skb)
 484                                goto alloc_skb_fail;
 485                }
 486        }
 487
 488        for (i = 0; i < 2048; i++) {
 489                struct cpl_l2t_write_req *req;
 490
 491                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 492                if (!skb)
 493                        skb = adap->nofail_skb;
 494                if (!skb)
 495                        goto alloc_skb_fail;
 496
 497                req = __skb_put_zero(skb, sizeof(*req));
 498                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 499                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 500                req->params = htonl(V_L2T_W_IDX(i));
 501                t3_mgmt_tx(adap, skb);
 502                if (skb == adap->nofail_skb) {
 503                        await_mgmt_replies(adap, cnt, 16 + i + 1);
 504                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 505                        if (!adap->nofail_skb)
 506                                goto alloc_skb_fail;
 507                }
 508        }
 509
 510        for (i = 0; i < 2048; i++) {
 511                struct cpl_rte_write_req *req;
 512
 513                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 514                if (!skb)
 515                        skb = adap->nofail_skb;
 516                if (!skb)
 517                        goto alloc_skb_fail;
 518
 519                req = __skb_put_zero(skb, sizeof(*req));
 520                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 521                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 522                req->l2t_idx = htonl(V_L2T_W_IDX(i));
 523                t3_mgmt_tx(adap, skb);
 524                if (skb == adap->nofail_skb) {
 525                        await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
 526                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 527                        if (!adap->nofail_skb)
 528                                goto alloc_skb_fail;
 529                }
 530        }
 531
 532        skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 533        if (!skb)
 534                skb = adap->nofail_skb;
 535        if (!skb)
 536                goto alloc_skb_fail;
 537
 538        greq = __skb_put_zero(skb, sizeof(*greq));
 539        greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 540        OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 541        greq->mask = cpu_to_be64(1);
 542        t3_mgmt_tx(adap, skb);
 543
 544        i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 545        if (skb == adap->nofail_skb) {
 546                i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 547                adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 548        }
 549
 550        t3_tp_set_offload_mode(adap, 0);
 551        return i;
 552
 553alloc_skb_fail:
 554        t3_tp_set_offload_mode(adap, 0);
 555        return -ENOMEM;
 556}
 557
 558/**
 559 *      setup_rss - configure RSS
 560 *      @adap: the adapter
 561 *
 562 *      Sets up RSS to distribute packets to multiple receive queues.  We
 563 *      configure the RSS CPU lookup table to distribute to the number of HW
 564 *      receive queues, and the response queue lookup table to narrow that
 565 *      down to the response queues actually configured for each port.
 566 *      We always configure the RSS mapping for two ports since the mapping
 567 *      table has plenty of entries.
 568 */
 569static void setup_rss(struct adapter *adap)
 570{
 571        int i;
 572        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 573        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 574        u8 cpus[SGE_QSETS + 1];
 575        u16 rspq_map[RSS_TABLE_SIZE + 1];
 576
 577        for (i = 0; i < SGE_QSETS; ++i)
 578                cpus[i] = i;
 579        cpus[SGE_QSETS] = 0xff; /* terminator */
 580
 581        for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 582                rspq_map[i] = i % nq0;
 583                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 584        }
 585        rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 586
 587        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 588                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 589                      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 590}
 591
 592static void ring_dbs(struct adapter *adap)
 593{
 594        int i, j;
 595
 596        for (i = 0; i < SGE_QSETS; i++) {
 597                struct sge_qset *qs = &adap->sge.qs[i];
 598
 599                if (qs->adap)
 600                        for (j = 0; j < SGE_TXQ_PER_SET; j++)
 601                                t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
 602        }
 603}
 604
 605static void init_napi(struct adapter *adap)
 606{
 607        int i;
 608
 609        for (i = 0; i < SGE_QSETS; i++) {
 610                struct sge_qset *qs = &adap->sge.qs[i];
 611
 612                if (qs->adap)
 613                        netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 614                                       64);
 615        }
 616
 617        /*
 618         * netif_napi_add() can be called only once per napi_struct because it
 619         * adds each new napi_struct to a list.  Be careful not to call it a
 620         * second time, e.g., during EEH recovery, by making a note of it.
 621         */
 622        adap->flags |= NAPI_INIT;
 623}
 624
 625/*
 626 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 627 * both netdevices representing interfaces and the dummy ones for the extra
 628 * queues.
 629 */
 630static void quiesce_rx(struct adapter *adap)
 631{
 632        int i;
 633
 634        for (i = 0; i < SGE_QSETS; i++)
 635                if (adap->sge.qs[i].adap)
 636                        napi_disable(&adap->sge.qs[i].napi);
 637}
 638
 639static void enable_all_napi(struct adapter *adap)
 640{
 641        int i;
 642        for (i = 0; i < SGE_QSETS; i++)
 643                if (adap->sge.qs[i].adap)
 644                        napi_enable(&adap->sge.qs[i].napi);
 645}
 646
 647/**
 648 *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 649 *      @adap: the adapter
 650 *
 651 *      Determines how many sets of SGE queues to use and initializes them.
 652 *      We support multiple queue sets per port if we have MSI-X, otherwise
 653 *      just one queue set per port.
 654 */
 655static int setup_sge_qsets(struct adapter *adap)
 656{
 657        int i, j, err, irq_idx = 0, qset_idx = 0;
 658        unsigned int ntxq = SGE_TXQ_PER_SET;
 659
 660        if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 661                irq_idx = -1;
 662
 663        for_each_port(adap, i) {
 664                struct net_device *dev = adap->port[i];
 665                struct port_info *pi = netdev_priv(dev);
 666
 667                pi->qs = &adap->sge.qs[pi->first_qset];
 668                for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 669                        err = t3_sge_alloc_qset(adap, qset_idx, 1,
 670                                (adap->flags & USING_MSIX) ? qset_idx + 1 :
 671                                                             irq_idx,
 672                                &adap->params.sge.qset[qset_idx], ntxq, dev,
 673                                netdev_get_tx_queue(dev, j));
 674                        if (err) {
 675                                t3_free_sge_resources(adap);
 676                                return err;
 677                        }
 678                }
 679        }
 680
 681        return 0;
 682}
 683
 684static ssize_t attr_show(struct device *d, char *buf,
 685                         ssize_t(*format) (struct net_device *, char *))
 686{
 687        ssize_t len;
 688
 689        /* Synchronize with ioctls that may shut down the device */
 690        rtnl_lock();
 691        len = (*format) (to_net_dev(d), buf);
 692        rtnl_unlock();
 693        return len;
 694}
 695
 696static ssize_t attr_store(struct device *d,
 697                          const char *buf, size_t len,
 698                          ssize_t(*set) (struct net_device *, unsigned int),
 699                          unsigned int min_val, unsigned int max_val)
 700{
 701        ssize_t ret;
 702        unsigned int val;
 703
 704        if (!capable(CAP_NET_ADMIN))
 705                return -EPERM;
 706
 707        ret = kstrtouint(buf, 0, &val);
 708        if (ret)
 709                return ret;
 710        if (val < min_val || val > max_val)
 711                return -EINVAL;
 712
 713        rtnl_lock();
 714        ret = (*set) (to_net_dev(d), val);
 715        if (!ret)
 716                ret = len;
 717        rtnl_unlock();
 718        return ret;
 719}
 720
 721#define CXGB3_SHOW(name, val_expr) \
 722static ssize_t format_##name(struct net_device *dev, char *buf) \
 723{ \
 724        struct port_info *pi = netdev_priv(dev); \
 725        struct adapter *adap = pi->adapter; \
 726        return sprintf(buf, "%u\n", val_expr); \
 727} \
 728static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 729                           char *buf) \
 730{ \
 731        return attr_show(d, buf, format_##name); \
 732}
 733
 734static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 735{
 736        struct port_info *pi = netdev_priv(dev);
 737        struct adapter *adap = pi->adapter;
 738        int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 739
 740        if (adap->flags & FULL_INIT_DONE)
 741                return -EBUSY;
 742        if (val && adap->params.rev == 0)
 743                return -EINVAL;
 744        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 745            min_tids)
 746                return -EINVAL;
 747        adap->params.mc5.nfilters = val;
 748        return 0;
 749}
 750
 751static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 752                              const char *buf, size_t len)
 753{
 754        return attr_store(d, buf, len, set_nfilters, 0, ~0);
 755}
 756
 757static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 758{
 759        struct port_info *pi = netdev_priv(dev);
 760        struct adapter *adap = pi->adapter;
 761
 762        if (adap->flags & FULL_INIT_DONE)
 763                return -EBUSY;
 764        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 765            MC5_MIN_TIDS)
 766                return -EINVAL;
 767        adap->params.mc5.nservers = val;
 768        return 0;
 769}
 770
 771static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 772                              const char *buf, size_t len)
 773{
 774        return attr_store(d, buf, len, set_nservers, 0, ~0);
 775}
 776
 777#define CXGB3_ATTR_R(name, val_expr) \
 778CXGB3_SHOW(name, val_expr) \
 779static DEVICE_ATTR(name, 0444, show_##name, NULL)
 780
 781#define CXGB3_ATTR_RW(name, val_expr, store_method) \
 782CXGB3_SHOW(name, val_expr) \
 783static DEVICE_ATTR(name, 0644, show_##name, store_method)
 784
 785CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 786CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 787CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 788
 789static struct attribute *cxgb3_attrs[] = {
 790        &dev_attr_cam_size.attr,
 791        &dev_attr_nfilters.attr,
 792        &dev_attr_nservers.attr,
 793        NULL
 794};
 795
 796static const struct attribute_group cxgb3_attr_group = {
 797        .attrs = cxgb3_attrs,
 798};
 799
 800static ssize_t tm_attr_show(struct device *d,
 801                            char *buf, int sched)
 802{
 803        struct port_info *pi = netdev_priv(to_net_dev(d));
 804        struct adapter *adap = pi->adapter;
 805        unsigned int v, addr, bpt, cpt;
 806        ssize_t len;
 807
 808        addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 809        rtnl_lock();
 810        t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 811        v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 812        if (sched & 1)
 813                v >>= 16;
 814        bpt = (v >> 8) & 0xff;
 815        cpt = v & 0xff;
 816        if (!cpt)
 817                len = sprintf(buf, "disabled\n");
 818        else {
 819                v = (adap->params.vpd.cclk * 1000) / cpt;
 820                len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 821        }
 822        rtnl_unlock();
 823        return len;
 824}
 825
 826static ssize_t tm_attr_store(struct device *d,
 827                             const char *buf, size_t len, int sched)
 828{
 829        struct port_info *pi = netdev_priv(to_net_dev(d));
 830        struct adapter *adap = pi->adapter;
 831        unsigned int val;
 832        ssize_t ret;
 833
 834        if (!capable(CAP_NET_ADMIN))
 835                return -EPERM;
 836
 837        ret = kstrtouint(buf, 0, &val);
 838        if (ret)
 839                return ret;
 840        if (val > 10000000)
 841                return -EINVAL;
 842
 843        rtnl_lock();
 844        ret = t3_config_sched(adap, val, sched);
 845        if (!ret)
 846                ret = len;
 847        rtnl_unlock();
 848        return ret;
 849}
 850
 851#define TM_ATTR(name, sched) \
 852static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 853                           char *buf) \
 854{ \
 855        return tm_attr_show(d, buf, sched); \
 856} \
 857static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 858                            const char *buf, size_t len) \
 859{ \
 860        return tm_attr_store(d, buf, len, sched); \
 861} \
 862static DEVICE_ATTR(name, 0644, show_##name, store_##name)
 863
 864TM_ATTR(sched0, 0);
 865TM_ATTR(sched1, 1);
 866TM_ATTR(sched2, 2);
 867TM_ATTR(sched3, 3);
 868TM_ATTR(sched4, 4);
 869TM_ATTR(sched5, 5);
 870TM_ATTR(sched6, 6);
 871TM_ATTR(sched7, 7);
 872
 873static struct attribute *offload_attrs[] = {
 874        &dev_attr_sched0.attr,
 875        &dev_attr_sched1.attr,
 876        &dev_attr_sched2.attr,
 877        &dev_attr_sched3.attr,
 878        &dev_attr_sched4.attr,
 879        &dev_attr_sched5.attr,
 880        &dev_attr_sched6.attr,
 881        &dev_attr_sched7.attr,
 882        NULL
 883};
 884
 885static const struct attribute_group offload_attr_group = {
 886        .attrs = offload_attrs,
 887};
 888
 889/*
 890 * Sends an sk_buff to an offload queue driver
 891 * after dealing with any active network taps.
 892 */
 893static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 894{
 895        int ret;
 896
 897        local_bh_disable();
 898        ret = t3_offload_tx(tdev, skb);
 899        local_bh_enable();
 900        return ret;
 901}
 902
 903static int write_smt_entry(struct adapter *adapter, int idx)
 904{
 905        struct cpl_smt_write_req *req;
 906        struct port_info *pi = netdev_priv(adapter->port[idx]);
 907        struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 908
 909        if (!skb)
 910                return -ENOMEM;
 911
 912        req = __skb_put(skb, sizeof(*req));
 913        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 914        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 915        req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 916        req->iff = idx;
 917        memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 918        memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 919        skb->priority = 1;
 920        offload_tx(&adapter->tdev, skb);
 921        return 0;
 922}
 923
 924static int init_smt(struct adapter *adapter)
 925{
 926        int i;
 927
 928        for_each_port(adapter, i)
 929            write_smt_entry(adapter, i);
 930        return 0;
 931}
 932
 933static void init_port_mtus(struct adapter *adapter)
 934{
 935        unsigned int mtus = adapter->port[0]->mtu;
 936
 937        if (adapter->port[1])
 938                mtus |= adapter->port[1]->mtu << 16;
 939        t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 940}
 941
 942static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 943                              int hi, int port)
 944{
 945        struct sk_buff *skb;
 946        struct mngt_pktsched_wr *req;
 947        int ret;
 948
 949        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 950        if (!skb)
 951                skb = adap->nofail_skb;
 952        if (!skb)
 953                return -ENOMEM;
 954
 955        req = skb_put(skb, sizeof(*req));
 956        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 957        req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 958        req->sched = sched;
 959        req->idx = qidx;
 960        req->min = lo;
 961        req->max = hi;
 962        req->binding = port;
 963        ret = t3_mgmt_tx(adap, skb);
 964        if (skb == adap->nofail_skb) {
 965                adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
 966                                             GFP_KERNEL);
 967                if (!adap->nofail_skb)
 968                        ret = -ENOMEM;
 969        }
 970
 971        return ret;
 972}
 973
 974static int bind_qsets(struct adapter *adap)
 975{
 976        int i, j, err = 0;
 977
 978        for_each_port(adap, i) {
 979                const struct port_info *pi = adap2pinfo(adap, i);
 980
 981                for (j = 0; j < pi->nqsets; ++j) {
 982                        int ret = send_pktsched_cmd(adap, 1,
 983                                                    pi->first_qset + j, -1,
 984                                                    -1, i);
 985                        if (ret)
 986                                err = ret;
 987                }
 988        }
 989
 990        return err;
 991}
 992
 993#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
 994        __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
 995#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
 996#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
 997        __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
 998#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
 999#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1000#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1001#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1002MODULE_FIRMWARE(FW_FNAME);
1003MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1004MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1005MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1006MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1007MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1008
1009static inline const char *get_edc_fw_name(int edc_idx)
1010{
1011        const char *fw_name = NULL;
1012
1013        switch (edc_idx) {
1014        case EDC_OPT_AEL2005:
1015                fw_name = AEL2005_OPT_EDC_NAME;
1016                break;
1017        case EDC_TWX_AEL2005:
1018                fw_name = AEL2005_TWX_EDC_NAME;
1019                break;
1020        case EDC_TWX_AEL2020:
1021                fw_name = AEL2020_TWX_EDC_NAME;
1022                break;
1023        }
1024        return fw_name;
1025}
1026
1027int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1028{
1029        struct adapter *adapter = phy->adapter;
1030        const struct firmware *fw;
1031        const char *fw_name;
1032        u32 csum;
1033        const __be32 *p;
1034        u16 *cache = phy->phy_cache;
1035        int i, ret = -EINVAL;
1036
1037        fw_name = get_edc_fw_name(edc_idx);
1038        if (fw_name)
1039                ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1040        if (ret < 0) {
1041                dev_err(&adapter->pdev->dev,
1042                        "could not upgrade firmware: unable to load %s\n",
1043                        fw_name);
1044                return ret;
1045        }
1046
1047        /* check size, take checksum in account */
1048        if (fw->size > size + 4) {
1049                CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1050                       (unsigned int)fw->size, size + 4);
1051                ret = -EINVAL;
1052        }
1053
1054        /* compute checksum */
1055        p = (const __be32 *)fw->data;
1056        for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1057                csum += ntohl(p[i]);
1058
1059        if (csum != 0xffffffff) {
1060                CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1061                       csum);
1062                ret = -EINVAL;
1063        }
1064
1065        for (i = 0; i < size / 4 ; i++) {
1066                *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1067                *cache++ = be32_to_cpu(p[i]) & 0xffff;
1068        }
1069
1070        release_firmware(fw);
1071
1072        return ret;
1073}
1074
1075static int upgrade_fw(struct adapter *adap)
1076{
1077        int ret;
1078        const struct firmware *fw;
1079        struct device *dev = &adap->pdev->dev;
1080
1081        ret = request_firmware(&fw, FW_FNAME, dev);
1082        if (ret < 0) {
1083                dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1084                        FW_FNAME);
1085                return ret;
1086        }
1087        ret = t3_load_fw(adap, fw->data, fw->size);
1088        release_firmware(fw);
1089
1090        if (ret == 0)
1091                dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1092                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1093        else
1094                dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1095                        FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1096
1097        return ret;
1098}
1099
1100static inline char t3rev2char(struct adapter *adapter)
1101{
1102        char rev = 0;
1103
1104        switch(adapter->params.rev) {
1105        case T3_REV_B:
1106        case T3_REV_B2:
1107                rev = 'b';
1108                break;
1109        case T3_REV_C:
1110                rev = 'c';
1111                break;
1112        }
1113        return rev;
1114}
1115
1116static int update_tpsram(struct adapter *adap)
1117{
1118        const struct firmware *tpsram;
1119        char buf[64];
1120        struct device *dev = &adap->pdev->dev;
1121        int ret;
1122        char rev;
1123
1124        rev = t3rev2char(adap);
1125        if (!rev)
1126                return 0;
1127
1128        snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1129
1130        ret = request_firmware(&tpsram, buf, dev);
1131        if (ret < 0) {
1132                dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1133                        buf);
1134                return ret;
1135        }
1136
1137        ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1138        if (ret)
1139                goto release_tpsram;
1140
1141        ret = t3_set_proto_sram(adap, tpsram->data);
1142        if (ret == 0)
1143                dev_info(dev,
1144                         "successful update of protocol engine "
1145                         "to %d.%d.%d\n",
1146                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1147        else
1148                dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1149                        TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1150        if (ret)
1151                dev_err(dev, "loading protocol SRAM failed\n");
1152
1153release_tpsram:
1154        release_firmware(tpsram);
1155
1156        return ret;
1157}
1158
1159/**
1160 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1161 * @adap: the adapter
1162 * @p: the port
1163 *
1164 * Ensures that current Rx processing on any of the queues associated with
1165 * the given port completes before returning.  We do this by acquiring and
1166 * releasing the locks of the response queues associated with the port.
1167 */
1168static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1169{
1170        int i;
1171
1172        for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1173                struct sge_rspq *q = &adap->sge.qs[i].rspq;
1174
1175                spin_lock_irq(&q->lock);
1176                spin_unlock_irq(&q->lock);
1177        }
1178}
1179
1180static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1181{
1182        struct port_info *pi = netdev_priv(dev);
1183        struct adapter *adapter = pi->adapter;
1184
1185        if (adapter->params.rev > 0) {
1186                t3_set_vlan_accel(adapter, 1 << pi->port_id,
1187                                  features & NETIF_F_HW_VLAN_CTAG_RX);
1188        } else {
1189                /* single control for all ports */
1190                unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1191
1192                for_each_port(adapter, i)
1193                        have_vlans |=
1194                                adapter->port[i]->features &
1195                                NETIF_F_HW_VLAN_CTAG_RX;
1196
1197                t3_set_vlan_accel(adapter, 1, have_vlans);
1198        }
1199        t3_synchronize_rx(adapter, pi);
1200}
1201
1202/**
1203 *      cxgb_up - enable the adapter
1204 *      @adapter: adapter being enabled
1205 *
1206 *      Called when the first port is enabled, this function performs the
1207 *      actions necessary to make an adapter operational, such as completing
1208 *      the initialization of HW modules, and enabling interrupts.
1209 *
1210 *      Must be called with the rtnl lock held.
1211 */
1212static int cxgb_up(struct adapter *adap)
1213{
1214        int i, err;
1215
1216        if (!(adap->flags & FULL_INIT_DONE)) {
1217                err = t3_check_fw_version(adap);
1218                if (err == -EINVAL) {
1219                        err = upgrade_fw(adap);
1220                        CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1221                                FW_VERSION_MAJOR, FW_VERSION_MINOR,
1222                                FW_VERSION_MICRO, err ? "failed" : "succeeded");
1223                }
1224
1225                err = t3_check_tpsram_version(adap);
1226                if (err == -EINVAL) {
1227                        err = update_tpsram(adap);
1228                        CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1229                                TP_VERSION_MAJOR, TP_VERSION_MINOR,
1230                                TP_VERSION_MICRO, err ? "failed" : "succeeded");
1231                }
1232
1233                /*
1234                 * Clear interrupts now to catch errors if t3_init_hw fails.
1235                 * We clear them again later as initialization may trigger
1236                 * conditions that can interrupt.
1237                 */
1238                t3_intr_clear(adap);
1239
1240                err = t3_init_hw(adap, 0);
1241                if (err)
1242                        goto out;
1243
1244                t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1245                t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1246
1247                err = setup_sge_qsets(adap);
1248                if (err)
1249                        goto out;
1250
1251                for_each_port(adap, i)
1252                        cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1253
1254                setup_rss(adap);
1255                if (!(adap->flags & NAPI_INIT))
1256                        init_napi(adap);
1257
1258                t3_start_sge_timers(adap);
1259                adap->flags |= FULL_INIT_DONE;
1260        }
1261
1262        t3_intr_clear(adap);
1263
1264        if (adap->flags & USING_MSIX) {
1265                name_msix_vecs(adap);
1266                err = request_irq(adap->msix_info[0].vec,
1267                                  t3_async_intr_handler, 0,
1268                                  adap->msix_info[0].desc, adap);
1269                if (err)
1270                        goto irq_err;
1271
1272                err = request_msix_data_irqs(adap);
1273                if (err) {
1274                        free_irq(adap->msix_info[0].vec, adap);
1275                        goto irq_err;
1276                }
1277        } else if ((err = request_irq(adap->pdev->irq,
1278                                      t3_intr_handler(adap,
1279                                                      adap->sge.qs[0].rspq.
1280                                                      polling),
1281                                      (adap->flags & USING_MSI) ?
1282                                       0 : IRQF_SHARED,
1283                                      adap->name, adap)))
1284                goto irq_err;
1285
1286        enable_all_napi(adap);
1287        t3_sge_start(adap);
1288        t3_intr_enable(adap);
1289
1290        if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1291            is_offload(adap) && init_tp_parity(adap) == 0)
1292                adap->flags |= TP_PARITY_INIT;
1293
1294        if (adap->flags & TP_PARITY_INIT) {
1295                t3_write_reg(adap, A_TP_INT_CAUSE,
1296                             F_CMCACHEPERR | F_ARPLUTPERR);
1297                t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1298        }
1299
1300        if (!(adap->flags & QUEUES_BOUND)) {
1301                int ret = bind_qsets(adap);
1302
1303                if (ret < 0) {
1304                        CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1305                        t3_intr_disable(adap);
1306                        free_irq_resources(adap);
1307                        err = ret;
1308                        goto out;
1309                }
1310                adap->flags |= QUEUES_BOUND;
1311        }
1312
1313out:
1314        return err;
1315irq_err:
1316        CH_ERR(adap, "request_irq failed, err %d\n", err);
1317        goto out;
1318}
1319
1320/*
1321 * Release resources when all the ports and offloading have been stopped.
1322 */
1323static void cxgb_down(struct adapter *adapter, int on_wq)
1324{
1325        t3_sge_stop(adapter);
1326        spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1327        t3_intr_disable(adapter);
1328        spin_unlock_irq(&adapter->work_lock);
1329
1330        free_irq_resources(adapter);
1331        quiesce_rx(adapter);
1332        t3_sge_stop(adapter);
1333        if (!on_wq)
1334                flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1335}
1336
1337static void schedule_chk_task(struct adapter *adap)
1338{
1339        unsigned int timeo;
1340
1341        timeo = adap->params.linkpoll_period ?
1342            (HZ * adap->params.linkpoll_period) / 10 :
1343            adap->params.stats_update_period * HZ;
1344        if (timeo)
1345                queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1346}
1347
1348static int offload_open(struct net_device *dev)
1349{
1350        struct port_info *pi = netdev_priv(dev);
1351        struct adapter *adapter = pi->adapter;
1352        struct t3cdev *tdev = dev2t3cdev(dev);
1353        int adap_up = adapter->open_device_map & PORT_MASK;
1354        int err;
1355
1356        if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1357                return 0;
1358
1359        if (!adap_up && (err = cxgb_up(adapter)) < 0)
1360                goto out;
1361
1362        t3_tp_set_offload_mode(adapter, 1);
1363        tdev->lldev = adapter->port[0];
1364        err = cxgb3_offload_activate(adapter);
1365        if (err)
1366                goto out;
1367
1368        init_port_mtus(adapter);
1369        t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1370                     adapter->params.b_wnd,
1371                     adapter->params.rev == 0 ?
1372                     adapter->port[0]->mtu : 0xffff);
1373        init_smt(adapter);
1374
1375        if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1376                dev_dbg(&dev->dev, "cannot create sysfs group\n");
1377
1378        /* Call back all registered clients */
1379        cxgb3_add_clients(tdev);
1380
1381out:
1382        /* restore them in case the offload module has changed them */
1383        if (err) {
1384                t3_tp_set_offload_mode(adapter, 0);
1385                clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1386                cxgb3_set_dummy_ops(tdev);
1387        }
1388        return err;
1389}
1390
1391static int offload_close(struct t3cdev *tdev)
1392{
1393        struct adapter *adapter = tdev2adap(tdev);
1394        struct t3c_data *td = T3C_DATA(tdev);
1395
1396        if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1397                return 0;
1398
1399        /* Call back all registered clients */
1400        cxgb3_remove_clients(tdev);
1401
1402        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1403
1404        /* Flush work scheduled while releasing TIDs */
1405        flush_work(&td->tid_release_task);
1406
1407        tdev->lldev = NULL;
1408        cxgb3_set_dummy_ops(tdev);
1409        t3_tp_set_offload_mode(adapter, 0);
1410        clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1411
1412        if (!adapter->open_device_map)
1413                cxgb_down(adapter, 0);
1414
1415        cxgb3_offload_deactivate(adapter);
1416        return 0;
1417}
1418
1419static int cxgb_open(struct net_device *dev)
1420{
1421        struct port_info *pi = netdev_priv(dev);
1422        struct adapter *adapter = pi->adapter;
1423        int other_ports = adapter->open_device_map & PORT_MASK;
1424        int err;
1425
1426        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1427                return err;
1428
1429        set_bit(pi->port_id, &adapter->open_device_map);
1430        if (is_offload(adapter) && !ofld_disable) {
1431                err = offload_open(dev);
1432                if (err)
1433                        pr_warn("Could not initialize offload capabilities\n");
1434        }
1435
1436        netif_set_real_num_tx_queues(dev, pi->nqsets);
1437        err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1438        if (err)
1439                return err;
1440        link_start(dev);
1441        t3_port_intr_enable(adapter, pi->port_id);
1442        netif_tx_start_all_queues(dev);
1443        if (!other_ports)
1444                schedule_chk_task(adapter);
1445
1446        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1447        return 0;
1448}
1449
1450static int __cxgb_close(struct net_device *dev, int on_wq)
1451{
1452        struct port_info *pi = netdev_priv(dev);
1453        struct adapter *adapter = pi->adapter;
1454
1455        
1456        if (!adapter->open_device_map)
1457                return 0;
1458
1459        /* Stop link fault interrupts */
1460        t3_xgm_intr_disable(adapter, pi->port_id);
1461        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1462
1463        t3_port_intr_disable(adapter, pi->port_id);
1464        netif_tx_stop_all_queues(dev);
1465        pi->phy.ops->power_down(&pi->phy, 1);
1466        netif_carrier_off(dev);
1467        t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1468
1469        spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1470        clear_bit(pi->port_id, &adapter->open_device_map);
1471        spin_unlock_irq(&adapter->work_lock);
1472
1473        if (!(adapter->open_device_map & PORT_MASK))
1474                cancel_delayed_work_sync(&adapter->adap_check_task);
1475
1476        if (!adapter->open_device_map)
1477                cxgb_down(adapter, on_wq);
1478
1479        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1480        return 0;
1481}
1482
1483static int cxgb_close(struct net_device *dev)
1484{
1485        return __cxgb_close(dev, 0);
1486}
1487
1488static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1489{
1490        struct port_info *pi = netdev_priv(dev);
1491        struct adapter *adapter = pi->adapter;
1492        struct net_device_stats *ns = &dev->stats;
1493        const struct mac_stats *pstats;
1494
1495        spin_lock(&adapter->stats_lock);
1496        pstats = t3_mac_update_stats(&pi->mac);
1497        spin_unlock(&adapter->stats_lock);
1498
1499        ns->tx_bytes = pstats->tx_octets;
1500        ns->tx_packets = pstats->tx_frames;
1501        ns->rx_bytes = pstats->rx_octets;
1502        ns->rx_packets = pstats->rx_frames;
1503        ns->multicast = pstats->rx_mcast_frames;
1504
1505        ns->tx_errors = pstats->tx_underrun;
1506        ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1507            pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1508            pstats->rx_fifo_ovfl;
1509
1510        /* detailed rx_errors */
1511        ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1512        ns->rx_over_errors = 0;
1513        ns->rx_crc_errors = pstats->rx_fcs_errs;
1514        ns->rx_frame_errors = pstats->rx_symbol_errs;
1515        ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1516        ns->rx_missed_errors = pstats->rx_cong_drops;
1517
1518        /* detailed tx_errors */
1519        ns->tx_aborted_errors = 0;
1520        ns->tx_carrier_errors = 0;
1521        ns->tx_fifo_errors = pstats->tx_underrun;
1522        ns->tx_heartbeat_errors = 0;
1523        ns->tx_window_errors = 0;
1524        return ns;
1525}
1526
1527static u32 get_msglevel(struct net_device *dev)
1528{
1529        struct port_info *pi = netdev_priv(dev);
1530        struct adapter *adapter = pi->adapter;
1531
1532        return adapter->msg_enable;
1533}
1534
1535static void set_msglevel(struct net_device *dev, u32 val)
1536{
1537        struct port_info *pi = netdev_priv(dev);
1538        struct adapter *adapter = pi->adapter;
1539
1540        adapter->msg_enable = val;
1541}
1542
1543static const char stats_strings[][ETH_GSTRING_LEN] = {
1544        "TxOctetsOK         ",
1545        "TxFramesOK         ",
1546        "TxMulticastFramesOK",
1547        "TxBroadcastFramesOK",
1548        "TxPauseFrames      ",
1549        "TxUnderrun         ",
1550        "TxExtUnderrun      ",
1551
1552        "TxFrames64         ",
1553        "TxFrames65To127    ",
1554        "TxFrames128To255   ",
1555        "TxFrames256To511   ",
1556        "TxFrames512To1023  ",
1557        "TxFrames1024To1518 ",
1558        "TxFrames1519ToMax  ",
1559
1560        "RxOctetsOK         ",
1561        "RxFramesOK         ",
1562        "RxMulticastFramesOK",
1563        "RxBroadcastFramesOK",
1564        "RxPauseFrames      ",
1565        "RxFCSErrors        ",
1566        "RxSymbolErrors     ",
1567        "RxShortErrors      ",
1568        "RxJabberErrors     ",
1569        "RxLengthErrors     ",
1570        "RxFIFOoverflow     ",
1571
1572        "RxFrames64         ",
1573        "RxFrames65To127    ",
1574        "RxFrames128To255   ",
1575        "RxFrames256To511   ",
1576        "RxFrames512To1023  ",
1577        "RxFrames1024To1518 ",
1578        "RxFrames1519ToMax  ",
1579
1580        "PhyFIFOErrors      ",
1581        "TSO                ",
1582        "VLANextractions    ",
1583        "VLANinsertions     ",
1584        "TxCsumOffload      ",
1585        "RxCsumGood         ",
1586        "LroAggregated      ",
1587        "LroFlushed         ",
1588        "LroNoDesc          ",
1589        "RxDrops            ",
1590
1591        "CheckTXEnToggled   ",
1592        "CheckResets        ",
1593
1594        "LinkFaults         ",
1595};
1596
1597static int get_sset_count(struct net_device *dev, int sset)
1598{
1599        switch (sset) {
1600        case ETH_SS_STATS:
1601                return ARRAY_SIZE(stats_strings);
1602        default:
1603                return -EOPNOTSUPP;
1604        }
1605}
1606
1607#define T3_REGMAP_SIZE (3 * 1024)
1608
1609static int get_regs_len(struct net_device *dev)
1610{
1611        return T3_REGMAP_SIZE;
1612}
1613
1614static int get_eeprom_len(struct net_device *dev)
1615{
1616        return EEPROMSIZE;
1617}
1618
1619static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1620{
1621        struct port_info *pi = netdev_priv(dev);
1622        struct adapter *adapter = pi->adapter;
1623        u32 fw_vers = 0;
1624        u32 tp_vers = 0;
1625
1626        spin_lock(&adapter->stats_lock);
1627        t3_get_fw_version(adapter, &fw_vers);
1628        t3_get_tp_version(adapter, &tp_vers);
1629        spin_unlock(&adapter->stats_lock);
1630
1631        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1632        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1633        strlcpy(info->bus_info, pci_name(adapter->pdev),
1634                sizeof(info->bus_info));
1635        if (fw_vers)
1636                snprintf(info->fw_version, sizeof(info->fw_version),
1637                         "%s %u.%u.%u TP %u.%u.%u",
1638                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1639                         G_FW_VERSION_MAJOR(fw_vers),
1640                         G_FW_VERSION_MINOR(fw_vers),
1641                         G_FW_VERSION_MICRO(fw_vers),
1642                         G_TP_VERSION_MAJOR(tp_vers),
1643                         G_TP_VERSION_MINOR(tp_vers),
1644                         G_TP_VERSION_MICRO(tp_vers));
1645}
1646
1647static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1648{
1649        if (stringset == ETH_SS_STATS)
1650                memcpy(data, stats_strings, sizeof(stats_strings));
1651}
1652
1653static unsigned long collect_sge_port_stats(struct adapter *adapter,
1654                                            struct port_info *p, int idx)
1655{
1656        int i;
1657        unsigned long tot = 0;
1658
1659        for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1660                tot += adapter->sge.qs[i].port_stats[idx];
1661        return tot;
1662}
1663
1664static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1665                      u64 *data)
1666{
1667        struct port_info *pi = netdev_priv(dev);
1668        struct adapter *adapter = pi->adapter;
1669        const struct mac_stats *s;
1670
1671        spin_lock(&adapter->stats_lock);
1672        s = t3_mac_update_stats(&pi->mac);
1673        spin_unlock(&adapter->stats_lock);
1674
1675        *data++ = s->tx_octets;
1676        *data++ = s->tx_frames;
1677        *data++ = s->tx_mcast_frames;
1678        *data++ = s->tx_bcast_frames;
1679        *data++ = s->tx_pause;
1680        *data++ = s->tx_underrun;
1681        *data++ = s->tx_fifo_urun;
1682
1683        *data++ = s->tx_frames_64;
1684        *data++ = s->tx_frames_65_127;
1685        *data++ = s->tx_frames_128_255;
1686        *data++ = s->tx_frames_256_511;
1687        *data++ = s->tx_frames_512_1023;
1688        *data++ = s->tx_frames_1024_1518;
1689        *data++ = s->tx_frames_1519_max;
1690
1691        *data++ = s->rx_octets;
1692        *data++ = s->rx_frames;
1693        *data++ = s->rx_mcast_frames;
1694        *data++ = s->rx_bcast_frames;
1695        *data++ = s->rx_pause;
1696        *data++ = s->rx_fcs_errs;
1697        *data++ = s->rx_symbol_errs;
1698        *data++ = s->rx_short;
1699        *data++ = s->rx_jabber;
1700        *data++ = s->rx_too_long;
1701        *data++ = s->rx_fifo_ovfl;
1702
1703        *data++ = s->rx_frames_64;
1704        *data++ = s->rx_frames_65_127;
1705        *data++ = s->rx_frames_128_255;
1706        *data++ = s->rx_frames_256_511;
1707        *data++ = s->rx_frames_512_1023;
1708        *data++ = s->rx_frames_1024_1518;
1709        *data++ = s->rx_frames_1519_max;
1710
1711        *data++ = pi->phy.fifo_errors;
1712
1713        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1714        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1715        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1716        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1717        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1718        *data++ = 0;
1719        *data++ = 0;
1720        *data++ = 0;
1721        *data++ = s->rx_cong_drops;
1722
1723        *data++ = s->num_toggled;
1724        *data++ = s->num_resets;
1725
1726        *data++ = s->link_faults;
1727}
1728
1729static inline void reg_block_dump(struct adapter *ap, void *buf,
1730                                  unsigned int start, unsigned int end)
1731{
1732        u32 *p = buf + start;
1733
1734        for (; start <= end; start += sizeof(u32))
1735                *p++ = t3_read_reg(ap, start);
1736}
1737
1738static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1739                     void *buf)
1740{
1741        struct port_info *pi = netdev_priv(dev);
1742        struct adapter *ap = pi->adapter;
1743
1744        /*
1745         * Version scheme:
1746         * bits 0..9: chip version
1747         * bits 10..15: chip revision
1748         * bit 31: set for PCIe cards
1749         */
1750        regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1751
1752        /*
1753         * We skip the MAC statistics registers because they are clear-on-read.
1754         * Also reading multi-register stats would need to synchronize with the
1755         * periodic mac stats accumulation.  Hard to justify the complexity.
1756         */
1757        memset(buf, 0, T3_REGMAP_SIZE);
1758        reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1759        reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1760        reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1761        reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1762        reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1763        reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1764                       XGM_REG(A_XGM_SERDES_STAT3, 1));
1765        reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1766                       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1767}
1768
1769static int restart_autoneg(struct net_device *dev)
1770{
1771        struct port_info *p = netdev_priv(dev);
1772
1773        if (!netif_running(dev))
1774                return -EAGAIN;
1775        if (p->link_config.autoneg != AUTONEG_ENABLE)
1776                return -EINVAL;
1777        p->phy.ops->autoneg_restart(&p->phy);
1778        return 0;
1779}
1780
1781static int set_phys_id(struct net_device *dev,
1782                       enum ethtool_phys_id_state state)
1783{
1784        struct port_info *pi = netdev_priv(dev);
1785        struct adapter *adapter = pi->adapter;
1786
1787        switch (state) {
1788        case ETHTOOL_ID_ACTIVE:
1789                return 1;       /* cycle on/off once per second */
1790
1791        case ETHTOOL_ID_OFF:
1792                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1793                break;
1794
1795        case ETHTOOL_ID_ON:
1796        case ETHTOOL_ID_INACTIVE:
1797                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1798                         F_GPIO0_OUT_VAL);
1799        }
1800
1801        return 0;
1802}
1803
1804static int get_link_ksettings(struct net_device *dev,
1805                              struct ethtool_link_ksettings *cmd)
1806{
1807        struct port_info *p = netdev_priv(dev);
1808        u32 supported;
1809
1810        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1811                                                p->link_config.supported);
1812        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1813                                                p->link_config.advertising);
1814
1815        if (netif_carrier_ok(dev)) {
1816                cmd->base.speed = p->link_config.speed;
1817                cmd->base.duplex = p->link_config.duplex;
1818        } else {
1819                cmd->base.speed = SPEED_UNKNOWN;
1820                cmd->base.duplex = DUPLEX_UNKNOWN;
1821        }
1822
1823        ethtool_convert_link_mode_to_legacy_u32(&supported,
1824                                                cmd->link_modes.supported);
1825
1826        cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1827        cmd->base.phy_address = p->phy.mdio.prtad;
1828        cmd->base.autoneg = p->link_config.autoneg;
1829        return 0;
1830}
1831
1832static int speed_duplex_to_caps(int speed, int duplex)
1833{
1834        int cap = 0;
1835
1836        switch (speed) {
1837        case SPEED_10:
1838                if (duplex == DUPLEX_FULL)
1839                        cap = SUPPORTED_10baseT_Full;
1840                else
1841                        cap = SUPPORTED_10baseT_Half;
1842                break;
1843        case SPEED_100:
1844                if (duplex == DUPLEX_FULL)
1845                        cap = SUPPORTED_100baseT_Full;
1846                else
1847                        cap = SUPPORTED_100baseT_Half;
1848                break;
1849        case SPEED_1000:
1850                if (duplex == DUPLEX_FULL)
1851                        cap = SUPPORTED_1000baseT_Full;
1852                else
1853                        cap = SUPPORTED_1000baseT_Half;
1854                break;
1855        case SPEED_10000:
1856                if (duplex == DUPLEX_FULL)
1857                        cap = SUPPORTED_10000baseT_Full;
1858        }
1859        return cap;
1860}
1861
1862#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1863                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1864                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1865                      ADVERTISED_10000baseT_Full)
1866
1867static int set_link_ksettings(struct net_device *dev,
1868                              const struct ethtool_link_ksettings *cmd)
1869{
1870        struct port_info *p = netdev_priv(dev);
1871        struct link_config *lc = &p->link_config;
1872        u32 advertising;
1873
1874        ethtool_convert_link_mode_to_legacy_u32(&advertising,
1875                                                cmd->link_modes.advertising);
1876
1877        if (!(lc->supported & SUPPORTED_Autoneg)) {
1878                /*
1879                 * PHY offers a single speed/duplex.  See if that's what's
1880                 * being requested.
1881                 */
1882                if (cmd->base.autoneg == AUTONEG_DISABLE) {
1883                        u32 speed = cmd->base.speed;
1884                        int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1885                        if (lc->supported & cap)
1886                                return 0;
1887                }
1888                return -EINVAL;
1889        }
1890
1891        if (cmd->base.autoneg == AUTONEG_DISABLE) {
1892                u32 speed = cmd->base.speed;
1893                int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1894
1895                if (!(lc->supported & cap) || (speed == SPEED_1000))
1896                        return -EINVAL;
1897                lc->requested_speed = speed;
1898                lc->requested_duplex = cmd->base.duplex;
1899                lc->advertising = 0;
1900        } else {
1901                advertising &= ADVERTISED_MASK;
1902                advertising &= lc->supported;
1903                if (!advertising)
1904                        return -EINVAL;
1905                lc->requested_speed = SPEED_INVALID;
1906                lc->requested_duplex = DUPLEX_INVALID;
1907                lc->advertising = advertising | ADVERTISED_Autoneg;
1908        }
1909        lc->autoneg = cmd->base.autoneg;
1910        if (netif_running(dev))
1911                t3_link_start(&p->phy, &p->mac, lc);
1912        return 0;
1913}
1914
1915static void get_pauseparam(struct net_device *dev,
1916                           struct ethtool_pauseparam *epause)
1917{
1918        struct port_info *p = netdev_priv(dev);
1919
1920        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1921        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1922        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1923}
1924
1925static int set_pauseparam(struct net_device *dev,
1926                          struct ethtool_pauseparam *epause)
1927{
1928        struct port_info *p = netdev_priv(dev);
1929        struct link_config *lc = &p->link_config;
1930
1931        if (epause->autoneg == AUTONEG_DISABLE)
1932                lc->requested_fc = 0;
1933        else if (lc->supported & SUPPORTED_Autoneg)
1934                lc->requested_fc = PAUSE_AUTONEG;
1935        else
1936                return -EINVAL;
1937
1938        if (epause->rx_pause)
1939                lc->requested_fc |= PAUSE_RX;
1940        if (epause->tx_pause)
1941                lc->requested_fc |= PAUSE_TX;
1942        if (lc->autoneg == AUTONEG_ENABLE) {
1943                if (netif_running(dev))
1944                        t3_link_start(&p->phy, &p->mac, lc);
1945        } else {
1946                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1947                if (netif_running(dev))
1948                        t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1949        }
1950        return 0;
1951}
1952
1953static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1954{
1955        struct port_info *pi = netdev_priv(dev);
1956        struct adapter *adapter = pi->adapter;
1957        const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1958
1959        e->rx_max_pending = MAX_RX_BUFFERS;
1960        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1961        e->tx_max_pending = MAX_TXQ_ENTRIES;
1962
1963        e->rx_pending = q->fl_size;
1964        e->rx_mini_pending = q->rspq_size;
1965        e->rx_jumbo_pending = q->jumbo_size;
1966        e->tx_pending = q->txq_size[0];
1967}
1968
1969static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1970{
1971        struct port_info *pi = netdev_priv(dev);
1972        struct adapter *adapter = pi->adapter;
1973        struct qset_params *q;
1974        int i;
1975
1976        if (e->rx_pending > MAX_RX_BUFFERS ||
1977            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1978            e->tx_pending > MAX_TXQ_ENTRIES ||
1979            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1980            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1981            e->rx_pending < MIN_FL_ENTRIES ||
1982            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1983            e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1984                return -EINVAL;
1985
1986        if (adapter->flags & FULL_INIT_DONE)
1987                return -EBUSY;
1988
1989        q = &adapter->params.sge.qset[pi->first_qset];
1990        for (i = 0; i < pi->nqsets; ++i, ++q) {
1991                q->rspq_size = e->rx_mini_pending;
1992                q->fl_size = e->rx_pending;
1993                q->jumbo_size = e->rx_jumbo_pending;
1994                q->txq_size[0] = e->tx_pending;
1995                q->txq_size[1] = e->tx_pending;
1996                q->txq_size[2] = e->tx_pending;
1997        }
1998        return 0;
1999}
2000
2001static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2002{
2003        struct port_info *pi = netdev_priv(dev);
2004        struct adapter *adapter = pi->adapter;
2005        struct qset_params *qsp;
2006        struct sge_qset *qs;
2007        int i;
2008
2009        if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2010                return -EINVAL;
2011
2012        for (i = 0; i < pi->nqsets; i++) {
2013                qsp = &adapter->params.sge.qset[i];
2014                qs = &adapter->sge.qs[i];
2015                qsp->coalesce_usecs = c->rx_coalesce_usecs;
2016                t3_update_qset_coalesce(qs, qsp);
2017        }
2018
2019        return 0;
2020}
2021
2022static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2023{
2024        struct port_info *pi = netdev_priv(dev);
2025        struct adapter *adapter = pi->adapter;
2026        struct qset_params *q = adapter->params.sge.qset;
2027
2028        c->rx_coalesce_usecs = q->coalesce_usecs;
2029        return 0;
2030}
2031
2032static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2033                      u8 * data)
2034{
2035        struct port_info *pi = netdev_priv(dev);
2036        struct adapter *adapter = pi->adapter;
2037        int i, err = 0;
2038
2039        u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2040        if (!buf)
2041                return -ENOMEM;
2042
2043        e->magic = EEPROM_MAGIC;
2044        for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2045                err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2046
2047        if (!err)
2048                memcpy(data, buf + e->offset, e->len);
2049        kfree(buf);
2050        return err;
2051}
2052
2053static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2054                      u8 * data)
2055{
2056        struct port_info *pi = netdev_priv(dev);
2057        struct adapter *adapter = pi->adapter;
2058        u32 aligned_offset, aligned_len;
2059        __le32 *p;
2060        u8 *buf;
2061        int err;
2062
2063        if (eeprom->magic != EEPROM_MAGIC)
2064                return -EINVAL;
2065
2066        aligned_offset = eeprom->offset & ~3;
2067        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2068
2069        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2070                buf = kmalloc(aligned_len, GFP_KERNEL);
2071                if (!buf)
2072                        return -ENOMEM;
2073                err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2074                if (!err && aligned_len > 4)
2075                        err = t3_seeprom_read(adapter,
2076                                              aligned_offset + aligned_len - 4,
2077                                              (__le32 *) & buf[aligned_len - 4]);
2078                if (err)
2079                        goto out;
2080                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2081        } else
2082                buf = data;
2083
2084        err = t3_seeprom_wp(adapter, 0);
2085        if (err)
2086                goto out;
2087
2088        for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2089                err = t3_seeprom_write(adapter, aligned_offset, *p);
2090                aligned_offset += 4;
2091        }
2092
2093        if (!err)
2094                err = t3_seeprom_wp(adapter, 1);
2095out:
2096        if (buf != data)
2097                kfree(buf);
2098        return err;
2099}
2100
2101static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2102{
2103        wol->supported = 0;
2104        wol->wolopts = 0;
2105        memset(&wol->sopass, 0, sizeof(wol->sopass));
2106}
2107
2108static const struct ethtool_ops cxgb_ethtool_ops = {
2109        .get_drvinfo = get_drvinfo,
2110        .get_msglevel = get_msglevel,
2111        .set_msglevel = set_msglevel,
2112        .get_ringparam = get_sge_param,
2113        .set_ringparam = set_sge_param,
2114        .get_coalesce = get_coalesce,
2115        .set_coalesce = set_coalesce,
2116        .get_eeprom_len = get_eeprom_len,
2117        .get_eeprom = get_eeprom,
2118        .set_eeprom = set_eeprom,
2119        .get_pauseparam = get_pauseparam,
2120        .set_pauseparam = set_pauseparam,
2121        .get_link = ethtool_op_get_link,
2122        .get_strings = get_strings,
2123        .set_phys_id = set_phys_id,
2124        .nway_reset = restart_autoneg,
2125        .get_sset_count = get_sset_count,
2126        .get_ethtool_stats = get_stats,
2127        .get_regs_len = get_regs_len,
2128        .get_regs = get_regs,
2129        .get_wol = get_wol,
2130        .get_link_ksettings = get_link_ksettings,
2131        .set_link_ksettings = set_link_ksettings,
2132};
2133
2134static int in_range(int val, int lo, int hi)
2135{
2136        return val < 0 || (val <= hi && val >= lo);
2137}
2138
2139static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2140{
2141        struct port_info *pi = netdev_priv(dev);
2142        struct adapter *adapter = pi->adapter;
2143        u32 cmd;
2144        int ret;
2145
2146        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2147                return -EFAULT;
2148
2149        switch (cmd) {
2150        case CHELSIO_SET_QSET_PARAMS:{
2151                int i;
2152                struct qset_params *q;
2153                struct ch_qset_params t;
2154                int q1 = pi->first_qset;
2155                int nqsets = pi->nqsets;
2156
2157                if (!capable(CAP_NET_ADMIN))
2158                        return -EPERM;
2159                if (copy_from_user(&t, useraddr, sizeof(t)))
2160                        return -EFAULT;
2161                if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2162                        return -EINVAL;
2163                if (t.qset_idx >= SGE_QSETS)
2164                        return -EINVAL;
2165                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2166                    !in_range(t.cong_thres, 0, 255) ||
2167                    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2168                              MAX_TXQ_ENTRIES) ||
2169                    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2170                              MAX_TXQ_ENTRIES) ||
2171                    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2172                              MAX_CTRL_TXQ_ENTRIES) ||
2173                    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2174                              MAX_RX_BUFFERS) ||
2175                    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2176                              MAX_RX_JUMBO_BUFFERS) ||
2177                    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2178                              MAX_RSPQ_ENTRIES))
2179                        return -EINVAL;
2180
2181                if ((adapter->flags & FULL_INIT_DONE) &&
2182                        (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2183                        t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2184                        t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2185                        t.polling >= 0 || t.cong_thres >= 0))
2186                        return -EBUSY;
2187
2188                /* Allow setting of any available qset when offload enabled */
2189                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2190                        q1 = 0;
2191                        for_each_port(adapter, i) {
2192                                pi = adap2pinfo(adapter, i);
2193                                nqsets += pi->first_qset + pi->nqsets;
2194                        }
2195                }
2196
2197                if (t.qset_idx < q1)
2198                        return -EINVAL;
2199                if (t.qset_idx > q1 + nqsets - 1)
2200                        return -EINVAL;
2201
2202                q = &adapter->params.sge.qset[t.qset_idx];
2203
2204                if (t.rspq_size >= 0)
2205                        q->rspq_size = t.rspq_size;
2206                if (t.fl_size[0] >= 0)
2207                        q->fl_size = t.fl_size[0];
2208                if (t.fl_size[1] >= 0)
2209                        q->jumbo_size = t.fl_size[1];
2210                if (t.txq_size[0] >= 0)
2211                        q->txq_size[0] = t.txq_size[0];
2212                if (t.txq_size[1] >= 0)
2213                        q->txq_size[1] = t.txq_size[1];
2214                if (t.txq_size[2] >= 0)
2215                        q->txq_size[2] = t.txq_size[2];
2216                if (t.cong_thres >= 0)
2217                        q->cong_thres = t.cong_thres;
2218                if (t.intr_lat >= 0) {
2219                        struct sge_qset *qs =
2220                                &adapter->sge.qs[t.qset_idx];
2221
2222                        q->coalesce_usecs = t.intr_lat;
2223                        t3_update_qset_coalesce(qs, q);
2224                }
2225                if (t.polling >= 0) {
2226                        if (adapter->flags & USING_MSIX)
2227                                q->polling = t.polling;
2228                        else {
2229                                /* No polling with INTx for T3A */
2230                                if (adapter->params.rev == 0 &&
2231                                        !(adapter->flags & USING_MSI))
2232                                        t.polling = 0;
2233
2234                                for (i = 0; i < SGE_QSETS; i++) {
2235                                        q = &adapter->params.sge.
2236                                                qset[i];
2237                                        q->polling = t.polling;
2238                                }
2239                        }
2240                }
2241
2242                if (t.lro >= 0) {
2243                        if (t.lro)
2244                                dev->wanted_features |= NETIF_F_GRO;
2245                        else
2246                                dev->wanted_features &= ~NETIF_F_GRO;
2247                        netdev_update_features(dev);
2248                }
2249
2250                break;
2251        }
2252        case CHELSIO_GET_QSET_PARAMS:{
2253                struct qset_params *q;
2254                struct ch_qset_params t;
2255                int q1 = pi->first_qset;
2256                int nqsets = pi->nqsets;
2257                int i;
2258
2259                if (copy_from_user(&t, useraddr, sizeof(t)))
2260                        return -EFAULT;
2261
2262                if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2263                        return -EINVAL;
2264
2265                /* Display qsets for all ports when offload enabled */
2266                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2267                        q1 = 0;
2268                        for_each_port(adapter, i) {
2269                                pi = adap2pinfo(adapter, i);
2270                                nqsets = pi->first_qset + pi->nqsets;
2271                        }
2272                }
2273
2274                if (t.qset_idx >= nqsets)
2275                        return -EINVAL;
2276                t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2277
2278                q = &adapter->params.sge.qset[q1 + t.qset_idx];
2279                t.rspq_size = q->rspq_size;
2280                t.txq_size[0] = q->txq_size[0];
2281                t.txq_size[1] = q->txq_size[1];
2282                t.txq_size[2] = q->txq_size[2];
2283                t.fl_size[0] = q->fl_size;
2284                t.fl_size[1] = q->jumbo_size;
2285                t.polling = q->polling;
2286                t.lro = !!(dev->features & NETIF_F_GRO);
2287                t.intr_lat = q->coalesce_usecs;
2288                t.cong_thres = q->cong_thres;
2289                t.qnum = q1;
2290
2291                if (adapter->flags & USING_MSIX)
2292                        t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2293                else
2294                        t.vector = adapter->pdev->irq;
2295
2296                if (copy_to_user(useraddr, &t, sizeof(t)))
2297                        return -EFAULT;
2298                break;
2299        }
2300        case CHELSIO_SET_QSET_NUM:{
2301                struct ch_reg edata;
2302                unsigned int i, first_qset = 0, other_qsets = 0;
2303
2304                if (!capable(CAP_NET_ADMIN))
2305                        return -EPERM;
2306                if (adapter->flags & FULL_INIT_DONE)
2307                        return -EBUSY;
2308                if (copy_from_user(&edata, useraddr, sizeof(edata)))
2309                        return -EFAULT;
2310                if (edata.cmd != CHELSIO_SET_QSET_NUM)
2311                        return -EINVAL;
2312                if (edata.val < 1 ||
2313                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2314                        return -EINVAL;
2315
2316                for_each_port(adapter, i)
2317                        if (adapter->port[i] && adapter->port[i] != dev)
2318                                other_qsets += adap2pinfo(adapter, i)->nqsets;
2319
2320                if (edata.val + other_qsets > SGE_QSETS)
2321                        return -EINVAL;
2322
2323                pi->nqsets = edata.val;
2324
2325                for_each_port(adapter, i)
2326                        if (adapter->port[i]) {
2327                                pi = adap2pinfo(adapter, i);
2328                                pi->first_qset = first_qset;
2329                                first_qset += pi->nqsets;
2330                        }
2331                break;
2332        }
2333        case CHELSIO_GET_QSET_NUM:{
2334                struct ch_reg edata;
2335
2336                memset(&edata, 0, sizeof(struct ch_reg));
2337
2338                edata.cmd = CHELSIO_GET_QSET_NUM;
2339                edata.val = pi->nqsets;
2340                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2341                        return -EFAULT;
2342                break;
2343        }
2344        case CHELSIO_LOAD_FW:{
2345                u8 *fw_data;
2346                struct ch_mem_range t;
2347
2348                if (!capable(CAP_SYS_RAWIO))
2349                        return -EPERM;
2350                if (copy_from_user(&t, useraddr, sizeof(t)))
2351                        return -EFAULT;
2352                if (t.cmd != CHELSIO_LOAD_FW)
2353                        return -EINVAL;
2354                /* Check t.len sanity ? */
2355                fw_data = memdup_user(useraddr + sizeof(t), t.len);
2356                if (IS_ERR(fw_data))
2357                        return PTR_ERR(fw_data);
2358
2359                ret = t3_load_fw(adapter, fw_data, t.len);
2360                kfree(fw_data);
2361                if (ret)
2362                        return ret;
2363                break;
2364        }
2365        case CHELSIO_SETMTUTAB:{
2366                struct ch_mtus m;
2367                int i;
2368
2369                if (!is_offload(adapter))
2370                        return -EOPNOTSUPP;
2371                if (!capable(CAP_NET_ADMIN))
2372                        return -EPERM;
2373                if (offload_running(adapter))
2374                        return -EBUSY;
2375                if (copy_from_user(&m, useraddr, sizeof(m)))
2376                        return -EFAULT;
2377                if (m.cmd != CHELSIO_SETMTUTAB)
2378                        return -EINVAL;
2379                if (m.nmtus != NMTUS)
2380                        return -EINVAL;
2381                if (m.mtus[0] < 81)     /* accommodate SACK */
2382                        return -EINVAL;
2383
2384                /* MTUs must be in ascending order */
2385                for (i = 1; i < NMTUS; ++i)
2386                        if (m.mtus[i] < m.mtus[i - 1])
2387                                return -EINVAL;
2388
2389                memcpy(adapter->params.mtus, m.mtus,
2390                        sizeof(adapter->params.mtus));
2391                break;
2392        }
2393        case CHELSIO_GET_PM:{
2394                struct tp_params *p = &adapter->params.tp;
2395                struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2396
2397                if (!is_offload(adapter))
2398                        return -EOPNOTSUPP;
2399                m.tx_pg_sz = p->tx_pg_size;
2400                m.tx_num_pg = p->tx_num_pgs;
2401                m.rx_pg_sz = p->rx_pg_size;
2402                m.rx_num_pg = p->rx_num_pgs;
2403                m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2404                if (copy_to_user(useraddr, &m, sizeof(m)))
2405                        return -EFAULT;
2406                break;
2407        }
2408        case CHELSIO_SET_PM:{
2409                struct ch_pm m;
2410                struct tp_params *p = &adapter->params.tp;
2411
2412                if (!is_offload(adapter))
2413                        return -EOPNOTSUPP;
2414                if (!capable(CAP_NET_ADMIN))
2415                        return -EPERM;
2416                if (adapter->flags & FULL_INIT_DONE)
2417                        return -EBUSY;
2418                if (copy_from_user(&m, useraddr, sizeof(m)))
2419                        return -EFAULT;
2420                if (m.cmd != CHELSIO_SET_PM)
2421                        return -EINVAL;
2422                if (!is_power_of_2(m.rx_pg_sz) ||
2423                        !is_power_of_2(m.tx_pg_sz))
2424                        return -EINVAL; /* not power of 2 */
2425                if (!(m.rx_pg_sz & 0x14000))
2426                        return -EINVAL; /* not 16KB or 64KB */
2427                if (!(m.tx_pg_sz & 0x1554000))
2428                        return -EINVAL;
2429                if (m.tx_num_pg == -1)
2430                        m.tx_num_pg = p->tx_num_pgs;
2431                if (m.rx_num_pg == -1)
2432                        m.rx_num_pg = p->rx_num_pgs;
2433                if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2434                        return -EINVAL;
2435                if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2436                        m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2437                        return -EINVAL;
2438                p->rx_pg_size = m.rx_pg_sz;
2439                p->tx_pg_size = m.tx_pg_sz;
2440                p->rx_num_pgs = m.rx_num_pg;
2441                p->tx_num_pgs = m.tx_num_pg;
2442                break;
2443        }
2444        case CHELSIO_GET_MEM:{
2445                struct ch_mem_range t;
2446                struct mc7 *mem;
2447                u64 buf[32];
2448
2449                if (!is_offload(adapter))
2450                        return -EOPNOTSUPP;
2451                if (!(adapter->flags & FULL_INIT_DONE))
2452                        return -EIO;    /* need the memory controllers */
2453                if (copy_from_user(&t, useraddr, sizeof(t)))
2454                        return -EFAULT;
2455                if (t.cmd != CHELSIO_GET_MEM)
2456                        return -EINVAL;
2457                if ((t.addr & 7) || (t.len & 7))
2458                        return -EINVAL;
2459                if (t.mem_id == MEM_CM)
2460                        mem = &adapter->cm;
2461                else if (t.mem_id == MEM_PMRX)
2462                        mem = &adapter->pmrx;
2463                else if (t.mem_id == MEM_PMTX)
2464                        mem = &adapter->pmtx;
2465                else
2466                        return -EINVAL;
2467
2468                /*
2469                 * Version scheme:
2470                 * bits 0..9: chip version
2471                 * bits 10..15: chip revision
2472                 */
2473                t.version = 3 | (adapter->params.rev << 10);
2474                if (copy_to_user(useraddr, &t, sizeof(t)))
2475                        return -EFAULT;
2476
2477                /*
2478                 * Read 256 bytes at a time as len can be large and we don't
2479                 * want to use huge intermediate buffers.
2480                 */
2481                useraddr += sizeof(t);  /* advance to start of buffer */
2482                while (t.len) {
2483                        unsigned int chunk =
2484                                min_t(unsigned int, t.len, sizeof(buf));
2485
2486                        ret =
2487                                t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2488                                                buf);
2489                        if (ret)
2490                                return ret;
2491                        if (copy_to_user(useraddr, buf, chunk))
2492                                return -EFAULT;
2493                        useraddr += chunk;
2494                        t.addr += chunk;
2495                        t.len -= chunk;
2496                }
2497                break;
2498        }
2499        case CHELSIO_SET_TRACE_FILTER:{
2500                struct ch_trace t;
2501                const struct trace_params *tp;
2502
2503                if (!capable(CAP_NET_ADMIN))
2504                        return -EPERM;
2505                if (!offload_running(adapter))
2506                        return -EAGAIN;
2507                if (copy_from_user(&t, useraddr, sizeof(t)))
2508                        return -EFAULT;
2509                if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2510                        return -EINVAL;
2511
2512                tp = (const struct trace_params *)&t.sip;
2513                if (t.config_tx)
2514                        t3_config_trace_filter(adapter, tp, 0,
2515                                                t.invert_match,
2516                                                t.trace_tx);
2517                if (t.config_rx)
2518                        t3_config_trace_filter(adapter, tp, 1,
2519                                                t.invert_match,
2520                                                t.trace_rx);
2521                break;
2522        }
2523        default:
2524                return -EOPNOTSUPP;
2525        }
2526        return 0;
2527}
2528
2529static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2530{
2531        struct mii_ioctl_data *data = if_mii(req);
2532        struct port_info *pi = netdev_priv(dev);
2533        struct adapter *adapter = pi->adapter;
2534
2535        switch (cmd) {
2536        case SIOCGMIIREG:
2537        case SIOCSMIIREG:
2538                /* Convert phy_id from older PRTAD/DEVAD format */
2539                if (is_10G(adapter) &&
2540                    !mdio_phy_id_is_c45(data->phy_id) &&
2541                    (data->phy_id & 0x1f00) &&
2542                    !(data->phy_id & 0xe0e0))
2543                        data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2544                                                       data->phy_id & 0x1f);
2545                /* FALLTHRU */
2546        case SIOCGMIIPHY:
2547                return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2548        case SIOCCHIOCTL:
2549                return cxgb_extension_ioctl(dev, req->ifr_data);
2550        default:
2551                return -EOPNOTSUPP;
2552        }
2553}
2554
2555static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2556{
2557        struct port_info *pi = netdev_priv(dev);
2558        struct adapter *adapter = pi->adapter;
2559        int ret;
2560
2561        if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2562                return ret;
2563        dev->mtu = new_mtu;
2564        init_port_mtus(adapter);
2565        if (adapter->params.rev == 0 && offload_running(adapter))
2566                t3_load_mtus(adapter, adapter->params.mtus,
2567                             adapter->params.a_wnd, adapter->params.b_wnd,
2568                             adapter->port[0]->mtu);
2569        return 0;
2570}
2571
2572static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2573{
2574        struct port_info *pi = netdev_priv(dev);
2575        struct adapter *adapter = pi->adapter;
2576        struct sockaddr *addr = p;
2577
2578        if (!is_valid_ether_addr(addr->sa_data))
2579                return -EADDRNOTAVAIL;
2580
2581        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2582        t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2583        if (offload_running(adapter))
2584                write_smt_entry(adapter, pi->port_id);
2585        return 0;
2586}
2587
2588static netdev_features_t cxgb_fix_features(struct net_device *dev,
2589        netdev_features_t features)
2590{
2591        /*
2592         * Since there is no support for separate rx/tx vlan accel
2593         * enable/disable make sure tx flag is always in same state as rx.
2594         */
2595        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2596                features |= NETIF_F_HW_VLAN_CTAG_TX;
2597        else
2598                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2599
2600        return features;
2601}
2602
2603static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2604{
2605        netdev_features_t changed = dev->features ^ features;
2606
2607        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2608                cxgb_vlan_mode(dev, features);
2609
2610        return 0;
2611}
2612
2613#ifdef CONFIG_NET_POLL_CONTROLLER
2614static void cxgb_netpoll(struct net_device *dev)
2615{
2616        struct port_info *pi = netdev_priv(dev);
2617        struct adapter *adapter = pi->adapter;
2618        int qidx;
2619
2620        for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2621                struct sge_qset *qs = &adapter->sge.qs[qidx];
2622                void *source;
2623
2624                if (adapter->flags & USING_MSIX)
2625                        source = qs;
2626                else
2627                        source = adapter;
2628
2629                t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2630        }
2631}
2632#endif
2633
2634/*
2635 * Periodic accumulation of MAC statistics.
2636 */
2637static void mac_stats_update(struct adapter *adapter)
2638{
2639        int i;
2640
2641        for_each_port(adapter, i) {
2642                struct net_device *dev = adapter->port[i];
2643                struct port_info *p = netdev_priv(dev);
2644
2645                if (netif_running(dev)) {
2646                        spin_lock(&adapter->stats_lock);
2647                        t3_mac_update_stats(&p->mac);
2648                        spin_unlock(&adapter->stats_lock);
2649                }
2650        }
2651}
2652
2653static void check_link_status(struct adapter *adapter)
2654{
2655        int i;
2656
2657        for_each_port(adapter, i) {
2658                struct net_device *dev = adapter->port[i];
2659                struct port_info *p = netdev_priv(dev);
2660                int link_fault;
2661
2662                spin_lock_irq(&adapter->work_lock);
2663                link_fault = p->link_fault;
2664                spin_unlock_irq(&adapter->work_lock);
2665
2666                if (link_fault) {
2667                        t3_link_fault(adapter, i);
2668                        continue;
2669                }
2670
2671                if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2672                        t3_xgm_intr_disable(adapter, i);
2673                        t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2674
2675                        t3_link_changed(adapter, i);
2676                        t3_xgm_intr_enable(adapter, i);
2677                }
2678        }
2679}
2680
2681static void check_t3b2_mac(struct adapter *adapter)
2682{
2683        int i;
2684
2685        if (!rtnl_trylock())    /* synchronize with ifdown */
2686                return;
2687
2688        for_each_port(adapter, i) {
2689                struct net_device *dev = adapter->port[i];
2690                struct port_info *p = netdev_priv(dev);
2691                int status;
2692
2693                if (!netif_running(dev))
2694                        continue;
2695
2696                status = 0;
2697                if (netif_running(dev) && netif_carrier_ok(dev))
2698                        status = t3b2_mac_watchdog_task(&p->mac);
2699                if (status == 1)
2700                        p->mac.stats.num_toggled++;
2701                else if (status == 2) {
2702                        struct cmac *mac = &p->mac;
2703
2704                        t3_mac_set_mtu(mac, dev->mtu);
2705                        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2706                        cxgb_set_rxmode(dev);
2707                        t3_link_start(&p->phy, mac, &p->link_config);
2708                        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2709                        t3_port_intr_enable(adapter, p->port_id);
2710                        p->mac.stats.num_resets++;
2711                }
2712        }
2713        rtnl_unlock();
2714}
2715
2716
2717static void t3_adap_check_task(struct work_struct *work)
2718{
2719        struct adapter *adapter = container_of(work, struct adapter,
2720                                               adap_check_task.work);
2721        const struct adapter_params *p = &adapter->params;
2722        int port;
2723        unsigned int v, status, reset;
2724
2725        adapter->check_task_cnt++;
2726
2727        check_link_status(adapter);
2728
2729        /* Accumulate MAC stats if needed */
2730        if (!p->linkpoll_period ||
2731            (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2732            p->stats_update_period) {
2733                mac_stats_update(adapter);
2734                adapter->check_task_cnt = 0;
2735        }
2736
2737        if (p->rev == T3_REV_B2)
2738                check_t3b2_mac(adapter);
2739
2740        /*
2741         * Scan the XGMAC's to check for various conditions which we want to
2742         * monitor in a periodic polling manner rather than via an interrupt
2743         * condition.  This is used for conditions which would otherwise flood
2744         * the system with interrupts and we only really need to know that the
2745         * conditions are "happening" ...  For each condition we count the
2746         * detection of the condition and reset it for the next polling loop.
2747         */
2748        for_each_port(adapter, port) {
2749                struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2750                u32 cause;
2751
2752                cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2753                reset = 0;
2754                if (cause & F_RXFIFO_OVERFLOW) {
2755                        mac->stats.rx_fifo_ovfl++;
2756                        reset |= F_RXFIFO_OVERFLOW;
2757                }
2758
2759                t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2760        }
2761
2762        /*
2763         * We do the same as above for FL_EMPTY interrupts.
2764         */
2765        status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2766        reset = 0;
2767
2768        if (status & F_FLEMPTY) {
2769                struct sge_qset *qs = &adapter->sge.qs[0];
2770                int i = 0;
2771
2772                reset |= F_FLEMPTY;
2773
2774                v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2775                    0xffff;
2776
2777                while (v) {
2778                        qs->fl[i].empty += (v & 1);
2779                        if (i)
2780                                qs++;
2781                        i ^= 1;
2782                        v >>= 1;
2783                }
2784        }
2785
2786        t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2787
2788        /* Schedule the next check update if any port is active. */
2789        spin_lock_irq(&adapter->work_lock);
2790        if (adapter->open_device_map & PORT_MASK)
2791                schedule_chk_task(adapter);
2792        spin_unlock_irq(&adapter->work_lock);
2793}
2794
2795static void db_full_task(struct work_struct *work)
2796{
2797        struct adapter *adapter = container_of(work, struct adapter,
2798                                               db_full_task);
2799
2800        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2801}
2802
2803static void db_empty_task(struct work_struct *work)
2804{
2805        struct adapter *adapter = container_of(work, struct adapter,
2806                                               db_empty_task);
2807
2808        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2809}
2810
2811static void db_drop_task(struct work_struct *work)
2812{
2813        struct adapter *adapter = container_of(work, struct adapter,
2814                                               db_drop_task);
2815        unsigned long delay = 1000;
2816        unsigned short r;
2817
2818        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2819
2820        /*
2821         * Sleep a while before ringing the driver qset dbs.
2822         * The delay is between 1000-2023 usecs.
2823         */
2824        get_random_bytes(&r, 2);
2825        delay += r & 1023;
2826        set_current_state(TASK_UNINTERRUPTIBLE);
2827        schedule_timeout(usecs_to_jiffies(delay));
2828        ring_dbs(adapter);
2829}
2830
2831/*
2832 * Processes external (PHY) interrupts in process context.
2833 */
2834static void ext_intr_task(struct work_struct *work)
2835{
2836        struct adapter *adapter = container_of(work, struct adapter,
2837                                               ext_intr_handler_task);
2838        int i;
2839
2840        /* Disable link fault interrupts */
2841        for_each_port(adapter, i) {
2842                struct net_device *dev = adapter->port[i];
2843                struct port_info *p = netdev_priv(dev);
2844
2845                t3_xgm_intr_disable(adapter, i);
2846                t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2847        }
2848
2849        /* Re-enable link fault interrupts */
2850        t3_phy_intr_handler(adapter);
2851
2852        for_each_port(adapter, i)
2853                t3_xgm_intr_enable(adapter, i);
2854
2855        /* Now reenable external interrupts */
2856        spin_lock_irq(&adapter->work_lock);
2857        if (adapter->slow_intr_mask) {
2858                adapter->slow_intr_mask |= F_T3DBG;
2859                t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2860                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2861                             adapter->slow_intr_mask);
2862        }
2863        spin_unlock_irq(&adapter->work_lock);
2864}
2865
2866/*
2867 * Interrupt-context handler for external (PHY) interrupts.
2868 */
2869void t3_os_ext_intr_handler(struct adapter *adapter)
2870{
2871        /*
2872         * Schedule a task to handle external interrupts as they may be slow
2873         * and we use a mutex to protect MDIO registers.  We disable PHY
2874         * interrupts in the meantime and let the task reenable them when
2875         * it's done.
2876         */
2877        spin_lock(&adapter->work_lock);
2878        if (adapter->slow_intr_mask) {
2879                adapter->slow_intr_mask &= ~F_T3DBG;
2880                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2881                             adapter->slow_intr_mask);
2882                queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2883        }
2884        spin_unlock(&adapter->work_lock);
2885}
2886
2887void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2888{
2889        struct net_device *netdev = adapter->port[port_id];
2890        struct port_info *pi = netdev_priv(netdev);
2891
2892        spin_lock(&adapter->work_lock);
2893        pi->link_fault = 1;
2894        spin_unlock(&adapter->work_lock);
2895}
2896
2897static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2898{
2899        int i, ret = 0;
2900
2901        if (is_offload(adapter) &&
2902            test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2903                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2904                offload_close(&adapter->tdev);
2905        }
2906
2907        /* Stop all ports */
2908        for_each_port(adapter, i) {
2909                struct net_device *netdev = adapter->port[i];
2910
2911                if (netif_running(netdev))
2912                        __cxgb_close(netdev, on_wq);
2913        }
2914
2915        /* Stop SGE timers */
2916        t3_stop_sge_timers(adapter);
2917
2918        adapter->flags &= ~FULL_INIT_DONE;
2919
2920        if (reset)
2921                ret = t3_reset_adapter(adapter);
2922
2923        pci_disable_device(adapter->pdev);
2924
2925        return ret;
2926}
2927
2928static int t3_reenable_adapter(struct adapter *adapter)
2929{
2930        if (pci_enable_device(adapter->pdev)) {
2931                dev_err(&adapter->pdev->dev,
2932                        "Cannot re-enable PCI device after reset.\n");
2933                goto err;
2934        }
2935        pci_set_master(adapter->pdev);
2936        pci_restore_state(adapter->pdev);
2937        pci_save_state(adapter->pdev);
2938
2939        /* Free sge resources */
2940        t3_free_sge_resources(adapter);
2941
2942        if (t3_replay_prep_adapter(adapter))
2943                goto err;
2944
2945        return 0;
2946err:
2947        return -1;
2948}
2949
2950static void t3_resume_ports(struct adapter *adapter)
2951{
2952        int i;
2953
2954        /* Restart the ports */
2955        for_each_port(adapter, i) {
2956                struct net_device *netdev = adapter->port[i];
2957
2958                if (netif_running(netdev)) {
2959                        if (cxgb_open(netdev)) {
2960                                dev_err(&adapter->pdev->dev,
2961                                        "can't bring device back up"
2962                                        " after reset\n");
2963                                continue;
2964                        }
2965                }
2966        }
2967
2968        if (is_offload(adapter) && !ofld_disable)
2969                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2970}
2971
2972/*
2973 * processes a fatal error.
2974 * Bring the ports down, reset the chip, bring the ports back up.
2975 */
2976static void fatal_error_task(struct work_struct *work)
2977{
2978        struct adapter *adapter = container_of(work, struct adapter,
2979                                               fatal_error_handler_task);
2980        int err = 0;
2981
2982        rtnl_lock();
2983        err = t3_adapter_error(adapter, 1, 1);
2984        if (!err)
2985                err = t3_reenable_adapter(adapter);
2986        if (!err)
2987                t3_resume_ports(adapter);
2988
2989        CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2990        rtnl_unlock();
2991}
2992
2993void t3_fatal_err(struct adapter *adapter)
2994{
2995        unsigned int fw_status[4];
2996
2997        if (adapter->flags & FULL_INIT_DONE) {
2998                t3_sge_stop(adapter);
2999                t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3000                t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3001                t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3002                t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3003
3004                spin_lock(&adapter->work_lock);
3005                t3_intr_disable(adapter);
3006                queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3007                spin_unlock(&adapter->work_lock);
3008        }
3009        CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3010        if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3011                CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3012                         fw_status[0], fw_status[1],
3013                         fw_status[2], fw_status[3]);
3014}
3015
3016/**
3017 * t3_io_error_detected - called when PCI error is detected
3018 * @pdev: Pointer to PCI device
3019 * @state: The current pci connection state
3020 *
3021 * This function is called after a PCI bus error affecting
3022 * this device has been detected.
3023 */
3024static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3025                                             pci_channel_state_t state)
3026{
3027        struct adapter *adapter = pci_get_drvdata(pdev);
3028
3029        if (state == pci_channel_io_perm_failure)
3030                return PCI_ERS_RESULT_DISCONNECT;
3031
3032        t3_adapter_error(adapter, 0, 0);
3033
3034        /* Request a slot reset. */
3035        return PCI_ERS_RESULT_NEED_RESET;
3036}
3037
3038/**
3039 * t3_io_slot_reset - called after the pci bus has been reset.
3040 * @pdev: Pointer to PCI device
3041 *
3042 * Restart the card from scratch, as if from a cold-boot.
3043 */
3044static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3045{
3046        struct adapter *adapter = pci_get_drvdata(pdev);
3047
3048        if (!t3_reenable_adapter(adapter))
3049                return PCI_ERS_RESULT_RECOVERED;
3050
3051        return PCI_ERS_RESULT_DISCONNECT;
3052}
3053
3054/**
3055 * t3_io_resume - called when traffic can start flowing again.
3056 * @pdev: Pointer to PCI device
3057 *
3058 * This callback is called when the error recovery driver tells us that
3059 * its OK to resume normal operation.
3060 */
3061static void t3_io_resume(struct pci_dev *pdev)
3062{
3063        struct adapter *adapter = pci_get_drvdata(pdev);
3064
3065        CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3066                 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3067
3068        rtnl_lock();
3069        t3_resume_ports(adapter);
3070        rtnl_unlock();
3071}
3072
3073static const struct pci_error_handlers t3_err_handler = {
3074        .error_detected = t3_io_error_detected,
3075        .slot_reset = t3_io_slot_reset,
3076        .resume = t3_io_resume,
3077};
3078
3079/*
3080 * Set the number of qsets based on the number of CPUs and the number of ports,
3081 * not to exceed the number of available qsets, assuming there are enough qsets
3082 * per port in HW.
3083 */
3084static void set_nqsets(struct adapter *adap)
3085{
3086        int i, j = 0;
3087        int num_cpus = netif_get_num_default_rss_queues();
3088        int hwports = adap->params.nports;
3089        int nqsets = adap->msix_nvectors - 1;
3090
3091        if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3092                if (hwports == 2 &&
3093                    (hwports * nqsets > SGE_QSETS ||
3094                     num_cpus >= nqsets / hwports))
3095                        nqsets /= hwports;
3096                if (nqsets > num_cpus)
3097                        nqsets = num_cpus;
3098                if (nqsets < 1 || hwports == 4)
3099                        nqsets = 1;
3100        } else
3101                nqsets = 1;
3102
3103        for_each_port(adap, i) {
3104                struct port_info *pi = adap2pinfo(adap, i);
3105
3106                pi->first_qset = j;
3107                pi->nqsets = nqsets;
3108                j = pi->first_qset + nqsets;
3109
3110                dev_info(&adap->pdev->dev,
3111                         "Port %d using %d queue sets.\n", i, nqsets);
3112        }
3113}
3114
3115static int cxgb_enable_msix(struct adapter *adap)
3116{
3117        struct msix_entry entries[SGE_QSETS + 1];
3118        int vectors;
3119        int i;
3120
3121        vectors = ARRAY_SIZE(entries);
3122        for (i = 0; i < vectors; ++i)
3123                entries[i].entry = i;
3124
3125        vectors = pci_enable_msix_range(adap->pdev, entries,
3126                                        adap->params.nports + 1, vectors);
3127        if (vectors < 0)
3128                return vectors;
3129
3130        for (i = 0; i < vectors; ++i)
3131                adap->msix_info[i].vec = entries[i].vector;
3132        adap->msix_nvectors = vectors;
3133
3134        return 0;
3135}
3136
3137static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3138{
3139        static const char *pci_variant[] = {
3140                "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3141        };
3142
3143        int i;
3144        char buf[80];
3145
3146        if (is_pcie(adap))
3147                snprintf(buf, sizeof(buf), "%s x%d",
3148                         pci_variant[adap->params.pci.variant],
3149                         adap->params.pci.width);
3150        else
3151                snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3152                         pci_variant[adap->params.pci.variant],
3153                         adap->params.pci.speed, adap->params.pci.width);
3154
3155        for_each_port(adap, i) {
3156                struct net_device *dev = adap->port[i];
3157                const struct port_info *pi = netdev_priv(dev);
3158
3159                if (!test_bit(i, &adap->registered_device_map))
3160                        continue;
3161                netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3162                            ai->desc, pi->phy.desc,
3163                            is_offload(adap) ? "R" : "", adap->params.rev, buf,
3164                            (adap->flags & USING_MSIX) ? " MSI-X" :
3165                            (adap->flags & USING_MSI) ? " MSI" : "");
3166                if (adap->name == dev->name && adap->params.vpd.mclk)
3167                        pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3168                               adap->name, t3_mc7_size(&adap->cm) >> 20,
3169                               t3_mc7_size(&adap->pmtx) >> 20,
3170                               t3_mc7_size(&adap->pmrx) >> 20,
3171                               adap->params.vpd.sn);
3172        }
3173}
3174
3175static const struct net_device_ops cxgb_netdev_ops = {
3176        .ndo_open               = cxgb_open,
3177        .ndo_stop               = cxgb_close,
3178        .ndo_start_xmit         = t3_eth_xmit,
3179        .ndo_get_stats          = cxgb_get_stats,
3180        .ndo_validate_addr      = eth_validate_addr,
3181        .ndo_set_rx_mode        = cxgb_set_rxmode,
3182        .ndo_do_ioctl           = cxgb_ioctl,
3183        .ndo_change_mtu         = cxgb_change_mtu,
3184        .ndo_set_mac_address    = cxgb_set_mac_addr,
3185        .ndo_fix_features       = cxgb_fix_features,
3186        .ndo_set_features       = cxgb_set_features,
3187#ifdef CONFIG_NET_POLL_CONTROLLER
3188        .ndo_poll_controller    = cxgb_netpoll,
3189#endif
3190};
3191
3192static void cxgb3_init_iscsi_mac(struct net_device *dev)
3193{
3194        struct port_info *pi = netdev_priv(dev);
3195
3196        memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3197        pi->iscsic.mac_addr[3] |= 0x80;
3198}
3199
3200#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3201#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3202                        NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3203static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3204{
3205        int i, err, pci_using_dac = 0;
3206        resource_size_t mmio_start, mmio_len;
3207        const struct adapter_info *ai;
3208        struct adapter *adapter = NULL;
3209        struct port_info *pi;
3210
3211        pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
3212
3213        if (!cxgb3_wq) {
3214                cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3215                if (!cxgb3_wq) {
3216                        pr_err("cannot initialize work queue\n");
3217                        return -ENOMEM;
3218                }
3219        }
3220
3221        err = pci_enable_device(pdev);
3222        if (err) {
3223                dev_err(&pdev->dev, "cannot enable PCI device\n");
3224                goto out;
3225        }
3226
3227        err = pci_request_regions(pdev, DRV_NAME);
3228        if (err) {
3229                /* Just info, some other driver may have claimed the device. */
3230                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3231                goto out_disable_device;
3232        }
3233
3234        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3235                pci_using_dac = 1;
3236                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3237                if (err) {
3238                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3239                               "coherent allocations\n");
3240                        goto out_release_regions;
3241                }
3242        } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3243                dev_err(&pdev->dev, "no usable DMA configuration\n");
3244                goto out_release_regions;
3245        }
3246
3247        pci_set_master(pdev);
3248        pci_save_state(pdev);
3249
3250        mmio_start = pci_resource_start(pdev, 0);
3251        mmio_len = pci_resource_len(pdev, 0);
3252        ai = t3_get_adapter_info(ent->driver_data);
3253
3254        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3255        if (!adapter) {
3256                err = -ENOMEM;
3257                goto out_release_regions;
3258        }
3259
3260        adapter->nofail_skb =
3261                alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3262        if (!adapter->nofail_skb) {
3263                dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3264                err = -ENOMEM;
3265                goto out_free_adapter;
3266        }
3267
3268        adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3269        if (!adapter->regs) {
3270                dev_err(&pdev->dev, "cannot map device registers\n");
3271                err = -ENOMEM;
3272                goto out_free_adapter_nofail;
3273        }
3274
3275        adapter->pdev = pdev;
3276        adapter->name = pci_name(pdev);
3277        adapter->msg_enable = dflt_msg_enable;
3278        adapter->mmio_len = mmio_len;
3279
3280        mutex_init(&adapter->mdio_lock);
3281        spin_lock_init(&adapter->work_lock);
3282        spin_lock_init(&adapter->stats_lock);
3283
3284        INIT_LIST_HEAD(&adapter->adapter_list);
3285        INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3286        INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3287
3288        INIT_WORK(&adapter->db_full_task, db_full_task);
3289        INIT_WORK(&adapter->db_empty_task, db_empty_task);
3290        INIT_WORK(&adapter->db_drop_task, db_drop_task);
3291
3292        INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3293
3294        for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3295                struct net_device *netdev;
3296
3297                netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3298                if (!netdev) {
3299                        err = -ENOMEM;
3300                        goto out_free_dev;
3301                }
3302
3303                SET_NETDEV_DEV(netdev, &pdev->dev);
3304
3305                adapter->port[i] = netdev;
3306                pi = netdev_priv(netdev);
3307                pi->adapter = adapter;
3308                pi->port_id = i;
3309                netif_carrier_off(netdev);
3310                netdev->irq = pdev->irq;
3311                netdev->mem_start = mmio_start;
3312                netdev->mem_end = mmio_start + mmio_len - 1;
3313                netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3314                        NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3315                netdev->features |= netdev->hw_features |
3316                                    NETIF_F_HW_VLAN_CTAG_TX;
3317                netdev->vlan_features |= netdev->features & VLAN_FEAT;
3318                if (pci_using_dac)
3319                        netdev->features |= NETIF_F_HIGHDMA;
3320
3321                netdev->netdev_ops = &cxgb_netdev_ops;
3322                netdev->ethtool_ops = &cxgb_ethtool_ops;
3323                netdev->min_mtu = 81;
3324                netdev->max_mtu = ETH_MAX_MTU;
3325                netdev->dev_port = pi->port_id;
3326        }
3327
3328        pci_set_drvdata(pdev, adapter);
3329        if (t3_prep_adapter(adapter, ai, 1) < 0) {
3330                err = -ENODEV;
3331                goto out_free_dev;
3332        }
3333
3334        /*
3335         * The card is now ready to go.  If any errors occur during device
3336         * registration we do not fail the whole card but rather proceed only
3337         * with the ports we manage to register successfully.  However we must
3338         * register at least one net device.
3339         */
3340        for_each_port(adapter, i) {
3341                err = register_netdev(adapter->port[i]);
3342                if (err)
3343                        dev_warn(&pdev->dev,
3344                                 "cannot register net device %s, skipping\n",
3345                                 adapter->port[i]->name);
3346                else {
3347                        /*
3348                         * Change the name we use for messages to the name of
3349                         * the first successfully registered interface.
3350                         */
3351                        if (!adapter->registered_device_map)
3352                                adapter->name = adapter->port[i]->name;
3353
3354                        __set_bit(i, &adapter->registered_device_map);
3355                }
3356        }
3357        if (!adapter->registered_device_map) {
3358                dev_err(&pdev->dev, "could not register any net devices\n");
3359                goto out_free_dev;
3360        }
3361
3362        for_each_port(adapter, i)
3363                cxgb3_init_iscsi_mac(adapter->port[i]);
3364
3365        /* Driver's ready. Reflect it on LEDs */
3366        t3_led_ready(adapter);
3367
3368        if (is_offload(adapter)) {
3369                __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3370                cxgb3_adapter_ofld(adapter);
3371        }
3372
3373        /* See what interrupts we'll be using */
3374        if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3375                adapter->flags |= USING_MSIX;
3376        else if (msi > 0 && pci_enable_msi(pdev) == 0)
3377                adapter->flags |= USING_MSI;
3378
3379        set_nqsets(adapter);
3380
3381        err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3382                                 &cxgb3_attr_group);
3383        if (err) {
3384                dev_err(&pdev->dev, "cannot create sysfs group\n");
3385                goto out_close_led;
3386        }
3387
3388        print_port_info(adapter, ai);
3389        return 0;
3390
3391out_close_led:
3392        t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3393
3394out_free_dev:
3395        iounmap(adapter->regs);
3396        for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3397                if (adapter->port[i])
3398                        free_netdev(adapter->port[i]);
3399
3400out_free_adapter_nofail:
3401        kfree_skb(adapter->nofail_skb);
3402
3403out_free_adapter:
3404        kfree(adapter);
3405
3406out_release_regions:
3407        pci_release_regions(pdev);
3408out_disable_device:
3409        pci_disable_device(pdev);
3410out:
3411        return err;
3412}
3413
3414static void remove_one(struct pci_dev *pdev)
3415{
3416        struct adapter *adapter = pci_get_drvdata(pdev);
3417
3418        if (adapter) {
3419                int i;
3420
3421                t3_sge_stop(adapter);
3422                sysfs_remove_group(&adapter->port[0]->dev.kobj,
3423                                   &cxgb3_attr_group);
3424
3425                if (is_offload(adapter)) {
3426                        cxgb3_adapter_unofld(adapter);
3427                        if (test_bit(OFFLOAD_DEVMAP_BIT,
3428                                     &adapter->open_device_map))
3429                                offload_close(&adapter->tdev);
3430                }
3431
3432                for_each_port(adapter, i)
3433                    if (test_bit(i, &adapter->registered_device_map))
3434                        unregister_netdev(adapter->port[i]);
3435
3436                t3_stop_sge_timers(adapter);
3437                t3_free_sge_resources(adapter);
3438                cxgb_disable_msi(adapter);
3439
3440                for_each_port(adapter, i)
3441                        if (adapter->port[i])
3442                                free_netdev(adapter->port[i]);
3443
3444                iounmap(adapter->regs);
3445                kfree_skb(adapter->nofail_skb);
3446                kfree(adapter);
3447                pci_release_regions(pdev);
3448                pci_disable_device(pdev);
3449        }
3450}
3451
3452static struct pci_driver driver = {
3453        .name = DRV_NAME,
3454        .id_table = cxgb3_pci_tbl,
3455        .probe = init_one,
3456        .remove = remove_one,
3457        .err_handler = &t3_err_handler,
3458};
3459
3460static int __init cxgb3_init_module(void)
3461{
3462        int ret;
3463
3464        cxgb3_offload_init();
3465
3466        ret = pci_register_driver(&driver);
3467        return ret;
3468}
3469
3470static void __exit cxgb3_cleanup_module(void)
3471{
3472        pci_unregister_driver(&driver);
3473        if (cxgb3_wq)
3474                destroy_workqueue(cxgb3_wq);
3475}
3476
3477module_init(cxgb3_init_module);
3478module_exit(cxgb3_cleanup_module);
3479