linux/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/pci.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/netdevice.h>
  40#include <linux/etherdevice.h>
  41#include <linux/if_vlan.h>
  42#include <linux/mdio.h>
  43#include <linux/sockios.h>
  44#include <linux/workqueue.h>
  45#include <linux/proc_fs.h>
  46#include <linux/rtnetlink.h>
  47#include <linux/firmware.h>
  48#include <linux/log2.h>
  49#include <linux/stringify.h>
  50#include <linux/sched.h>
  51#include <linux/slab.h>
  52#include <linux/uaccess.h>
  53#include <linux/nospec.h>
  54
  55#include "common.h"
  56#include "cxgb3_ioctl.h"
  57#include "regs.h"
  58#include "cxgb3_offload.h"
  59#include "version.h"
  60
  61#include "cxgb3_ctl_defs.h"
  62#include "t3_cpl.h"
  63#include "firmware_exports.h"
  64
  65enum {
  66        MAX_TXQ_ENTRIES = 16384,
  67        MAX_CTRL_TXQ_ENTRIES = 1024,
  68        MAX_RSPQ_ENTRIES = 16384,
  69        MAX_RX_BUFFERS = 16384,
  70        MAX_RX_JUMBO_BUFFERS = 16384,
  71        MIN_TXQ_ENTRIES = 4,
  72        MIN_CTRL_TXQ_ENTRIES = 4,
  73        MIN_RSPQ_ENTRIES = 32,
  74        MIN_FL_ENTRIES = 32
  75};
  76
  77#define PORT_MASK ((1 << MAX_NPORTS) - 1)
  78
  79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  80                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  81                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  82
  83#define EEPROM_MAGIC 0x38E2F10C
  84
  85#define CH_DEVICE(devid, idx) \
  86        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  87
  88static const struct pci_device_id cxgb3_pci_tbl[] = {
  89        CH_DEVICE(0x20, 0),     /* PE9000 */
  90        CH_DEVICE(0x21, 1),     /* T302E */
  91        CH_DEVICE(0x22, 2),     /* T310E */
  92        CH_DEVICE(0x23, 3),     /* T320X */
  93        CH_DEVICE(0x24, 1),     /* T302X */
  94        CH_DEVICE(0x25, 3),     /* T320E */
  95        CH_DEVICE(0x26, 2),     /* T310X */
  96        CH_DEVICE(0x30, 2),     /* T3B10 */
  97        CH_DEVICE(0x31, 3),     /* T3B20 */
  98        CH_DEVICE(0x32, 1),     /* T3B02 */
  99        CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
 100        CH_DEVICE(0x36, 3),     /* S320E-CR */
 101        CH_DEVICE(0x37, 7),     /* N320E-G2 */
 102        {0,}
 103};
 104
 105MODULE_DESCRIPTION(DRV_DESC);
 106MODULE_AUTHOR("Chelsio Communications");
 107MODULE_LICENSE("Dual BSD/GPL");
 108MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 109
 110static int dflt_msg_enable = DFLT_MSG_ENABLE;
 111
 112module_param(dflt_msg_enable, int, 0644);
 113MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 114
 115/*
 116 * The driver uses the best interrupt scheme available on a platform in the
 117 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 118 * of these schemes the driver may consider as follows:
 119 *
 120 * msi = 2: choose from among all three options
 121 * msi = 1: only consider MSI and pin interrupts
 122 * msi = 0: force pin interrupts
 123 */
 124static int msi = 2;
 125
 126module_param(msi, int, 0644);
 127MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 128
 129/*
 130 * The driver enables offload as a default.
 131 * To disable it, use ofld_disable = 1.
 132 */
 133
 134static int ofld_disable = 0;
 135
 136module_param(ofld_disable, int, 0644);
 137MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 138
 139/*
 140 * We have work elements that we need to cancel when an interface is taken
 141 * down.  Normally the work elements would be executed by keventd but that
 142 * can deadlock because of linkwatch.  If our close method takes the rtnl
 143 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 144 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 145 * for our work to complete.  Get our own work queue to solve this.
 146 */
 147struct workqueue_struct *cxgb3_wq;
 148
 149/**
 150 *      link_report - show link status and link speed/duplex
 151 *      @dev: the port whose settings are to be reported
 152 *
 153 *      Shows the link status, speed, and duplex of a port.
 154 */
 155static void link_report(struct net_device *dev)
 156{
 157        if (!netif_carrier_ok(dev))
 158                netdev_info(dev, "link down\n");
 159        else {
 160                const char *s = "10Mbps";
 161                const struct port_info *p = netdev_priv(dev);
 162
 163                switch (p->link_config.speed) {
 164                case SPEED_10000:
 165                        s = "10Gbps";
 166                        break;
 167                case SPEED_1000:
 168                        s = "1000Mbps";
 169                        break;
 170                case SPEED_100:
 171                        s = "100Mbps";
 172                        break;
 173                }
 174
 175                netdev_info(dev, "link up, %s, %s-duplex\n",
 176                            s, p->link_config.duplex == DUPLEX_FULL
 177                            ? "full" : "half");
 178        }
 179}
 180
 181static void enable_tx_fifo_drain(struct adapter *adapter,
 182                                 struct port_info *pi)
 183{
 184        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
 185                         F_ENDROPPKT);
 186        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
 187        t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
 188        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
 189}
 190
 191static void disable_tx_fifo_drain(struct adapter *adapter,
 192                                  struct port_info *pi)
 193{
 194        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 195                         F_ENDROPPKT, 0);
 196}
 197
 198void t3_os_link_fault(struct adapter *adap, int port_id, int state)
 199{
 200        struct net_device *dev = adap->port[port_id];
 201        struct port_info *pi = netdev_priv(dev);
 202
 203        if (state == netif_carrier_ok(dev))
 204                return;
 205
 206        if (state) {
 207                struct cmac *mac = &pi->mac;
 208
 209                netif_carrier_on(dev);
 210
 211                disable_tx_fifo_drain(adap, pi);
 212
 213                /* Clear local faults */
 214                t3_xgm_intr_disable(adap, pi->port_id);
 215                t3_read_reg(adap, A_XGM_INT_STATUS +
 216                                    pi->mac.offset);
 217                t3_write_reg(adap,
 218                             A_XGM_INT_CAUSE + pi->mac.offset,
 219                             F_XGM_INT);
 220
 221                t3_set_reg_field(adap,
 222                                 A_XGM_INT_ENABLE +
 223                                 pi->mac.offset,
 224                                 F_XGM_INT, F_XGM_INT);
 225                t3_xgm_intr_enable(adap, pi->port_id);
 226
 227                t3_mac_enable(mac, MAC_DIRECTION_TX);
 228        } else {
 229                netif_carrier_off(dev);
 230
 231                /* Flush TX FIFO */
 232                enable_tx_fifo_drain(adap, pi);
 233        }
 234        link_report(dev);
 235}
 236
 237/**
 238 *      t3_os_link_changed - handle link status changes
 239 *      @adapter: the adapter associated with the link change
 240 *      @port_id: the port index whose limk status has changed
 241 *      @link_stat: the new status of the link
 242 *      @speed: the new speed setting
 243 *      @duplex: the new duplex setting
 244 *      @pause: the new flow-control setting
 245 *
 246 *      This is the OS-dependent handler for link status changes.  The OS
 247 *      neutral handler takes care of most of the processing for these events,
 248 *      then calls this handler for any OS-specific processing.
 249 */
 250void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 251                        int speed, int duplex, int pause)
 252{
 253        struct net_device *dev = adapter->port[port_id];
 254        struct port_info *pi = netdev_priv(dev);
 255        struct cmac *mac = &pi->mac;
 256
 257        /* Skip changes from disabled ports. */
 258        if (!netif_running(dev))
 259                return;
 260
 261        if (link_stat != netif_carrier_ok(dev)) {
 262                if (link_stat) {
 263                        disable_tx_fifo_drain(adapter, pi);
 264
 265                        t3_mac_enable(mac, MAC_DIRECTION_RX);
 266
 267                        /* Clear local faults */
 268                        t3_xgm_intr_disable(adapter, pi->port_id);
 269                        t3_read_reg(adapter, A_XGM_INT_STATUS +
 270                                    pi->mac.offset);
 271                        t3_write_reg(adapter,
 272                                     A_XGM_INT_CAUSE + pi->mac.offset,
 273                                     F_XGM_INT);
 274
 275                        t3_set_reg_field(adapter,
 276                                         A_XGM_INT_ENABLE + pi->mac.offset,
 277                                         F_XGM_INT, F_XGM_INT);
 278                        t3_xgm_intr_enable(adapter, pi->port_id);
 279
 280                        netif_carrier_on(dev);
 281                } else {
 282                        netif_carrier_off(dev);
 283
 284                        t3_xgm_intr_disable(adapter, pi->port_id);
 285                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 286                        t3_set_reg_field(adapter,
 287                                         A_XGM_INT_ENABLE + pi->mac.offset,
 288                                         F_XGM_INT, 0);
 289
 290                        if (is_10G(adapter))
 291                                pi->phy.ops->power_down(&pi->phy, 1);
 292
 293                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 294                        t3_mac_disable(mac, MAC_DIRECTION_RX);
 295                        t3_link_start(&pi->phy, mac, &pi->link_config);
 296
 297                        /* Flush TX FIFO */
 298                        enable_tx_fifo_drain(adapter, pi);
 299                }
 300
 301                link_report(dev);
 302        }
 303}
 304
 305/**
 306 *      t3_os_phymod_changed - handle PHY module changes
 307 *      @adap: the adapter associated with the link change
 308 *      @port_id: the port index whose limk status has changed
 309 *
 310 *      This is the OS-dependent handler for PHY module changes.  It is
 311 *      invoked when a PHY module is removed or inserted for any OS-specific
 312 *      processing.
 313 */
 314void t3_os_phymod_changed(struct adapter *adap, int port_id)
 315{
 316        static const char *mod_str[] = {
 317                NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 318        };
 319
 320        const struct net_device *dev = adap->port[port_id];
 321        const struct port_info *pi = netdev_priv(dev);
 322
 323        if (pi->phy.modtype == phy_modtype_none)
 324                netdev_info(dev, "PHY module unplugged\n");
 325        else
 326                netdev_info(dev, "%s PHY module inserted\n",
 327                            mod_str[pi->phy.modtype]);
 328}
 329
 330static void cxgb_set_rxmode(struct net_device *dev)
 331{
 332        struct port_info *pi = netdev_priv(dev);
 333
 334        t3_mac_set_rx_mode(&pi->mac, dev);
 335}
 336
 337/**
 338 *      link_start - enable a port
 339 *      @dev: the device to enable
 340 *
 341 *      Performs the MAC and PHY actions needed to enable a port.
 342 */
 343static void link_start(struct net_device *dev)
 344{
 345        struct port_info *pi = netdev_priv(dev);
 346        struct cmac *mac = &pi->mac;
 347
 348        t3_mac_reset(mac);
 349        t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 350        t3_mac_set_mtu(mac, dev->mtu);
 351        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 352        t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 353        t3_mac_set_rx_mode(mac, dev);
 354        t3_link_start(&pi->phy, mac, &pi->link_config);
 355        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 356}
 357
 358static inline void cxgb_disable_msi(struct adapter *adapter)
 359{
 360        if (adapter->flags & USING_MSIX) {
 361                pci_disable_msix(adapter->pdev);
 362                adapter->flags &= ~USING_MSIX;
 363        } else if (adapter->flags & USING_MSI) {
 364                pci_disable_msi(adapter->pdev);
 365                adapter->flags &= ~USING_MSI;
 366        }
 367}
 368
 369/*
 370 * Interrupt handler for asynchronous events used with MSI-X.
 371 */
 372static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 373{
 374        t3_slow_intr_handler(cookie);
 375        return IRQ_HANDLED;
 376}
 377
 378/*
 379 * Name the MSI-X interrupts.
 380 */
 381static void name_msix_vecs(struct adapter *adap)
 382{
 383        int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 384
 385        snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 386        adap->msix_info[0].desc[n] = 0;
 387
 388        for_each_port(adap, j) {
 389                struct net_device *d = adap->port[j];
 390                const struct port_info *pi = netdev_priv(d);
 391
 392                for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 393                        snprintf(adap->msix_info[msi_idx].desc, n,
 394                                 "%s-%d", d->name, pi->first_qset + i);
 395                        adap->msix_info[msi_idx].desc[n] = 0;
 396                }
 397        }
 398}
 399
 400static int request_msix_data_irqs(struct adapter *adap)
 401{
 402        int i, j, err, qidx = 0;
 403
 404        for_each_port(adap, i) {
 405                int nqsets = adap2pinfo(adap, i)->nqsets;
 406
 407                for (j = 0; j < nqsets; ++j) {
 408                        err = request_irq(adap->msix_info[qidx + 1].vec,
 409                                          t3_intr_handler(adap,
 410                                                          adap->sge.qs[qidx].
 411                                                          rspq.polling), 0,
 412                                          adap->msix_info[qidx + 1].desc,
 413                                          &adap->sge.qs[qidx]);
 414                        if (err) {
 415                                while (--qidx >= 0)
 416                                        free_irq(adap->msix_info[qidx + 1].vec,
 417                                                 &adap->sge.qs[qidx]);
 418                                return err;
 419                        }
 420                        qidx++;
 421                }
 422        }
 423        return 0;
 424}
 425
 426static void free_irq_resources(struct adapter *adapter)
 427{
 428        if (adapter->flags & USING_MSIX) {
 429                int i, n = 0;
 430
 431                free_irq(adapter->msix_info[0].vec, adapter);
 432                for_each_port(adapter, i)
 433                        n += adap2pinfo(adapter, i)->nqsets;
 434
 435                for (i = 0; i < n; ++i)
 436                        free_irq(adapter->msix_info[i + 1].vec,
 437                                 &adapter->sge.qs[i]);
 438        } else
 439                free_irq(adapter->pdev->irq, adapter);
 440}
 441
 442static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 443                              unsigned long n)
 444{
 445        int attempts = 10;
 446
 447        while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 448                if (!--attempts)
 449                        return -ETIMEDOUT;
 450                msleep(10);
 451        }
 452        return 0;
 453}
 454
 455static int init_tp_parity(struct adapter *adap)
 456{
 457        int i;
 458        struct sk_buff *skb;
 459        struct cpl_set_tcb_field *greq;
 460        unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 461
 462        t3_tp_set_offload_mode(adap, 1);
 463
 464        for (i = 0; i < 16; i++) {
 465                struct cpl_smt_write_req *req;
 466
 467                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 468                if (!skb)
 469                        skb = adap->nofail_skb;
 470                if (!skb)
 471                        goto alloc_skb_fail;
 472
 473                req = __skb_put_zero(skb, sizeof(*req));
 474                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 475                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 476                req->mtu_idx = NMTUS - 1;
 477                req->iff = i;
 478                t3_mgmt_tx(adap, skb);
 479                if (skb == adap->nofail_skb) {
 480                        await_mgmt_replies(adap, cnt, i + 1);
 481                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 482                        if (!adap->nofail_skb)
 483                                goto alloc_skb_fail;
 484                }
 485        }
 486
 487        for (i = 0; i < 2048; i++) {
 488                struct cpl_l2t_write_req *req;
 489
 490                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 491                if (!skb)
 492                        skb = adap->nofail_skb;
 493                if (!skb)
 494                        goto alloc_skb_fail;
 495
 496                req = __skb_put_zero(skb, sizeof(*req));
 497                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 498                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 499                req->params = htonl(V_L2T_W_IDX(i));
 500                t3_mgmt_tx(adap, skb);
 501                if (skb == adap->nofail_skb) {
 502                        await_mgmt_replies(adap, cnt, 16 + i + 1);
 503                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 504                        if (!adap->nofail_skb)
 505                                goto alloc_skb_fail;
 506                }
 507        }
 508
 509        for (i = 0; i < 2048; i++) {
 510                struct cpl_rte_write_req *req;
 511
 512                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 513                if (!skb)
 514                        skb = adap->nofail_skb;
 515                if (!skb)
 516                        goto alloc_skb_fail;
 517
 518                req = __skb_put_zero(skb, sizeof(*req));
 519                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 520                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 521                req->l2t_idx = htonl(V_L2T_W_IDX(i));
 522                t3_mgmt_tx(adap, skb);
 523                if (skb == adap->nofail_skb) {
 524                        await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
 525                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 526                        if (!adap->nofail_skb)
 527                                goto alloc_skb_fail;
 528                }
 529        }
 530
 531        skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 532        if (!skb)
 533                skb = adap->nofail_skb;
 534        if (!skb)
 535                goto alloc_skb_fail;
 536
 537        greq = __skb_put_zero(skb, sizeof(*greq));
 538        greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 539        OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 540        greq->mask = cpu_to_be64(1);
 541        t3_mgmt_tx(adap, skb);
 542
 543        i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 544        if (skb == adap->nofail_skb) {
 545                i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 546                adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 547        }
 548
 549        t3_tp_set_offload_mode(adap, 0);
 550        return i;
 551
 552alloc_skb_fail:
 553        t3_tp_set_offload_mode(adap, 0);
 554        return -ENOMEM;
 555}
 556
 557/**
 558 *      setup_rss - configure RSS
 559 *      @adap: the adapter
 560 *
 561 *      Sets up RSS to distribute packets to multiple receive queues.  We
 562 *      configure the RSS CPU lookup table to distribute to the number of HW
 563 *      receive queues, and the response queue lookup table to narrow that
 564 *      down to the response queues actually configured for each port.
 565 *      We always configure the RSS mapping for two ports since the mapping
 566 *      table has plenty of entries.
 567 */
 568static void setup_rss(struct adapter *adap)
 569{
 570        int i;
 571        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 572        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 573        u8 cpus[SGE_QSETS + 1];
 574        u16 rspq_map[RSS_TABLE_SIZE + 1];
 575
 576        for (i = 0; i < SGE_QSETS; ++i)
 577                cpus[i] = i;
 578        cpus[SGE_QSETS] = 0xff; /* terminator */
 579
 580        for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 581                rspq_map[i] = i % nq0;
 582                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 583        }
 584        rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 585
 586        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 587                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 588                      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 589}
 590
 591static void ring_dbs(struct adapter *adap)
 592{
 593        int i, j;
 594
 595        for (i = 0; i < SGE_QSETS; i++) {
 596                struct sge_qset *qs = &adap->sge.qs[i];
 597
 598                if (qs->adap)
 599                        for (j = 0; j < SGE_TXQ_PER_SET; j++)
 600                                t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
 601        }
 602}
 603
 604static void init_napi(struct adapter *adap)
 605{
 606        int i;
 607
 608        for (i = 0; i < SGE_QSETS; i++) {
 609                struct sge_qset *qs = &adap->sge.qs[i];
 610
 611                if (qs->adap)
 612                        netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 613                                       64);
 614        }
 615
 616        /*
 617         * netif_napi_add() can be called only once per napi_struct because it
 618         * adds each new napi_struct to a list.  Be careful not to call it a
 619         * second time, e.g., during EEH recovery, by making a note of it.
 620         */
 621        adap->flags |= NAPI_INIT;
 622}
 623
 624/*
 625 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 626 * both netdevices representing interfaces and the dummy ones for the extra
 627 * queues.
 628 */
 629static void quiesce_rx(struct adapter *adap)
 630{
 631        int i;
 632
 633        for (i = 0; i < SGE_QSETS; i++)
 634                if (adap->sge.qs[i].adap)
 635                        napi_disable(&adap->sge.qs[i].napi);
 636}
 637
 638static void enable_all_napi(struct adapter *adap)
 639{
 640        int i;
 641        for (i = 0; i < SGE_QSETS; i++)
 642                if (adap->sge.qs[i].adap)
 643                        napi_enable(&adap->sge.qs[i].napi);
 644}
 645
 646/**
 647 *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 648 *      @adap: the adapter
 649 *
 650 *      Determines how many sets of SGE queues to use and initializes them.
 651 *      We support multiple queue sets per port if we have MSI-X, otherwise
 652 *      just one queue set per port.
 653 */
 654static int setup_sge_qsets(struct adapter *adap)
 655{
 656        int i, j, err, irq_idx = 0, qset_idx = 0;
 657        unsigned int ntxq = SGE_TXQ_PER_SET;
 658
 659        if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 660                irq_idx = -1;
 661
 662        for_each_port(adap, i) {
 663                struct net_device *dev = adap->port[i];
 664                struct port_info *pi = netdev_priv(dev);
 665
 666                pi->qs = &adap->sge.qs[pi->first_qset];
 667                for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 668                        err = t3_sge_alloc_qset(adap, qset_idx, 1,
 669                                (adap->flags & USING_MSIX) ? qset_idx + 1 :
 670                                                             irq_idx,
 671                                &adap->params.sge.qset[qset_idx], ntxq, dev,
 672                                netdev_get_tx_queue(dev, j));
 673                        if (err) {
 674                                t3_free_sge_resources(adap);
 675                                return err;
 676                        }
 677                }
 678        }
 679
 680        return 0;
 681}
 682
 683static ssize_t attr_show(struct device *d, char *buf,
 684                         ssize_t(*format) (struct net_device *, char *))
 685{
 686        ssize_t len;
 687
 688        /* Synchronize with ioctls that may shut down the device */
 689        rtnl_lock();
 690        len = (*format) (to_net_dev(d), buf);
 691        rtnl_unlock();
 692        return len;
 693}
 694
 695static ssize_t attr_store(struct device *d,
 696                          const char *buf, size_t len,
 697                          ssize_t(*set) (struct net_device *, unsigned int),
 698                          unsigned int min_val, unsigned int max_val)
 699{
 700        ssize_t ret;
 701        unsigned int val;
 702
 703        if (!capable(CAP_NET_ADMIN))
 704                return -EPERM;
 705
 706        ret = kstrtouint(buf, 0, &val);
 707        if (ret)
 708                return ret;
 709        if (val < min_val || val > max_val)
 710                return -EINVAL;
 711
 712        rtnl_lock();
 713        ret = (*set) (to_net_dev(d), val);
 714        if (!ret)
 715                ret = len;
 716        rtnl_unlock();
 717        return ret;
 718}
 719
 720#define CXGB3_SHOW(name, val_expr) \
 721static ssize_t format_##name(struct net_device *dev, char *buf) \
 722{ \
 723        struct port_info *pi = netdev_priv(dev); \
 724        struct adapter *adap = pi->adapter; \
 725        return sprintf(buf, "%u\n", val_expr); \
 726} \
 727static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 728                           char *buf) \
 729{ \
 730        return attr_show(d, buf, format_##name); \
 731}
 732
 733static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 734{
 735        struct port_info *pi = netdev_priv(dev);
 736        struct adapter *adap = pi->adapter;
 737        int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 738
 739        if (adap->flags & FULL_INIT_DONE)
 740                return -EBUSY;
 741        if (val && adap->params.rev == 0)
 742                return -EINVAL;
 743        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 744            min_tids)
 745                return -EINVAL;
 746        adap->params.mc5.nfilters = val;
 747        return 0;
 748}
 749
 750static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 751                              const char *buf, size_t len)
 752{
 753        return attr_store(d, buf, len, set_nfilters, 0, ~0);
 754}
 755
 756static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 757{
 758        struct port_info *pi = netdev_priv(dev);
 759        struct adapter *adap = pi->adapter;
 760
 761        if (adap->flags & FULL_INIT_DONE)
 762                return -EBUSY;
 763        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 764            MC5_MIN_TIDS)
 765                return -EINVAL;
 766        adap->params.mc5.nservers = val;
 767        return 0;
 768}
 769
 770static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 771                              const char *buf, size_t len)
 772{
 773        return attr_store(d, buf, len, set_nservers, 0, ~0);
 774}
 775
 776#define CXGB3_ATTR_R(name, val_expr) \
 777CXGB3_SHOW(name, val_expr) \
 778static DEVICE_ATTR(name, 0444, show_##name, NULL)
 779
 780#define CXGB3_ATTR_RW(name, val_expr, store_method) \
 781CXGB3_SHOW(name, val_expr) \
 782static DEVICE_ATTR(name, 0644, show_##name, store_method)
 783
 784CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 785CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 786CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 787
 788static struct attribute *cxgb3_attrs[] = {
 789        &dev_attr_cam_size.attr,
 790        &dev_attr_nfilters.attr,
 791        &dev_attr_nservers.attr,
 792        NULL
 793};
 794
 795static const struct attribute_group cxgb3_attr_group = {
 796        .attrs = cxgb3_attrs,
 797};
 798
 799static ssize_t tm_attr_show(struct device *d,
 800                            char *buf, int sched)
 801{
 802        struct port_info *pi = netdev_priv(to_net_dev(d));
 803        struct adapter *adap = pi->adapter;
 804        unsigned int v, addr, bpt, cpt;
 805        ssize_t len;
 806
 807        addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 808        rtnl_lock();
 809        t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 810        v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 811        if (sched & 1)
 812                v >>= 16;
 813        bpt = (v >> 8) & 0xff;
 814        cpt = v & 0xff;
 815        if (!cpt)
 816                len = sprintf(buf, "disabled\n");
 817        else {
 818                v = (adap->params.vpd.cclk * 1000) / cpt;
 819                len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 820        }
 821        rtnl_unlock();
 822        return len;
 823}
 824
 825static ssize_t tm_attr_store(struct device *d,
 826                             const char *buf, size_t len, int sched)
 827{
 828        struct port_info *pi = netdev_priv(to_net_dev(d));
 829        struct adapter *adap = pi->adapter;
 830        unsigned int val;
 831        ssize_t ret;
 832
 833        if (!capable(CAP_NET_ADMIN))
 834                return -EPERM;
 835
 836        ret = kstrtouint(buf, 0, &val);
 837        if (ret)
 838                return ret;
 839        if (val > 10000000)
 840                return -EINVAL;
 841
 842        rtnl_lock();
 843        ret = t3_config_sched(adap, val, sched);
 844        if (!ret)
 845                ret = len;
 846        rtnl_unlock();
 847        return ret;
 848}
 849
 850#define TM_ATTR(name, sched) \
 851static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 852                           char *buf) \
 853{ \
 854        return tm_attr_show(d, buf, sched); \
 855} \
 856static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 857                            const char *buf, size_t len) \
 858{ \
 859        return tm_attr_store(d, buf, len, sched); \
 860} \
 861static DEVICE_ATTR(name, 0644, show_##name, store_##name)
 862
 863TM_ATTR(sched0, 0);
 864TM_ATTR(sched1, 1);
 865TM_ATTR(sched2, 2);
 866TM_ATTR(sched3, 3);
 867TM_ATTR(sched4, 4);
 868TM_ATTR(sched5, 5);
 869TM_ATTR(sched6, 6);
 870TM_ATTR(sched7, 7);
 871
 872static struct attribute *offload_attrs[] = {
 873        &dev_attr_sched0.attr,
 874        &dev_attr_sched1.attr,
 875        &dev_attr_sched2.attr,
 876        &dev_attr_sched3.attr,
 877        &dev_attr_sched4.attr,
 878        &dev_attr_sched5.attr,
 879        &dev_attr_sched6.attr,
 880        &dev_attr_sched7.attr,
 881        NULL
 882};
 883
 884static const struct attribute_group offload_attr_group = {
 885        .attrs = offload_attrs,
 886};
 887
 888/*
 889 * Sends an sk_buff to an offload queue driver
 890 * after dealing with any active network taps.
 891 */
 892static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 893{
 894        int ret;
 895
 896        local_bh_disable();
 897        ret = t3_offload_tx(tdev, skb);
 898        local_bh_enable();
 899        return ret;
 900}
 901
 902static int write_smt_entry(struct adapter *adapter, int idx)
 903{
 904        struct cpl_smt_write_req *req;
 905        struct port_info *pi = netdev_priv(adapter->port[idx]);
 906        struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 907
 908        if (!skb)
 909                return -ENOMEM;
 910
 911        req = __skb_put(skb, sizeof(*req));
 912        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 913        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 914        req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 915        req->iff = idx;
 916        memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 917        memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 918        skb->priority = 1;
 919        offload_tx(&adapter->tdev, skb);
 920        return 0;
 921}
 922
 923static int init_smt(struct adapter *adapter)
 924{
 925        int i;
 926
 927        for_each_port(adapter, i)
 928            write_smt_entry(adapter, i);
 929        return 0;
 930}
 931
 932static void init_port_mtus(struct adapter *adapter)
 933{
 934        unsigned int mtus = adapter->port[0]->mtu;
 935
 936        if (adapter->port[1])
 937                mtus |= adapter->port[1]->mtu << 16;
 938        t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 939}
 940
 941static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 942                              int hi, int port)
 943{
 944        struct sk_buff *skb;
 945        struct mngt_pktsched_wr *req;
 946        int ret;
 947
 948        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 949        if (!skb)
 950                skb = adap->nofail_skb;
 951        if (!skb)
 952                return -ENOMEM;
 953
 954        req = skb_put(skb, sizeof(*req));
 955        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 956        req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 957        req->sched = sched;
 958        req->idx = qidx;
 959        req->min = lo;
 960        req->max = hi;
 961        req->binding = port;
 962        ret = t3_mgmt_tx(adap, skb);
 963        if (skb == adap->nofail_skb) {
 964                adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
 965                                             GFP_KERNEL);
 966                if (!adap->nofail_skb)
 967                        ret = -ENOMEM;
 968        }
 969
 970        return ret;
 971}
 972
 973static int bind_qsets(struct adapter *adap)
 974{
 975        int i, j, err = 0;
 976
 977        for_each_port(adap, i) {
 978                const struct port_info *pi = adap2pinfo(adap, i);
 979
 980                for (j = 0; j < pi->nqsets; ++j) {
 981                        int ret = send_pktsched_cmd(adap, 1,
 982                                                    pi->first_qset + j, -1,
 983                                                    -1, i);
 984                        if (ret)
 985                                err = ret;
 986                }
 987        }
 988
 989        return err;
 990}
 991
 992#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
 993        __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
 994#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
 995#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
 996        __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
 997#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
 998#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
 999#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1000#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001MODULE_FIRMWARE(FW_FNAME);
1002MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1007
1008static inline const char *get_edc_fw_name(int edc_idx)
1009{
1010        const char *fw_name = NULL;
1011
1012        switch (edc_idx) {
1013        case EDC_OPT_AEL2005:
1014                fw_name = AEL2005_OPT_EDC_NAME;
1015                break;
1016        case EDC_TWX_AEL2005:
1017                fw_name = AEL2005_TWX_EDC_NAME;
1018                break;
1019        case EDC_TWX_AEL2020:
1020                fw_name = AEL2020_TWX_EDC_NAME;
1021                break;
1022        }
1023        return fw_name;
1024}
1025
1026int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1027{
1028        struct adapter *adapter = phy->adapter;
1029        const struct firmware *fw;
1030        const char *fw_name;
1031        u32 csum;
1032        const __be32 *p;
1033        u16 *cache = phy->phy_cache;
1034        int i, ret = -EINVAL;
1035
1036        fw_name = get_edc_fw_name(edc_idx);
1037        if (fw_name)
1038                ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1039        if (ret < 0) {
1040                dev_err(&adapter->pdev->dev,
1041                        "could not upgrade firmware: unable to load %s\n",
1042                        fw_name);
1043                return ret;
1044        }
1045
1046        /* check size, take checksum in account */
1047        if (fw->size > size + 4) {
1048                CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049                       (unsigned int)fw->size, size + 4);
1050                ret = -EINVAL;
1051        }
1052
1053        /* compute checksum */
1054        p = (const __be32 *)fw->data;
1055        for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056                csum += ntohl(p[i]);
1057
1058        if (csum != 0xffffffff) {
1059                CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1060                       csum);
1061                ret = -EINVAL;
1062        }
1063
1064        for (i = 0; i < size / 4 ; i++) {
1065                *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066                *cache++ = be32_to_cpu(p[i]) & 0xffff;
1067        }
1068
1069        release_firmware(fw);
1070
1071        return ret;
1072}
1073
1074static int upgrade_fw(struct adapter *adap)
1075{
1076        int ret;
1077        const struct firmware *fw;
1078        struct device *dev = &adap->pdev->dev;
1079
1080        ret = request_firmware(&fw, FW_FNAME, dev);
1081        if (ret < 0) {
1082                dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1083                        FW_FNAME);
1084                return ret;
1085        }
1086        ret = t3_load_fw(adap, fw->data, fw->size);
1087        release_firmware(fw);
1088
1089        if (ret == 0)
1090                dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1092        else
1093                dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094                        FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1095
1096        return ret;
1097}
1098
1099static inline char t3rev2char(struct adapter *adapter)
1100{
1101        char rev = 0;
1102
1103        switch(adapter->params.rev) {
1104        case T3_REV_B:
1105        case T3_REV_B2:
1106                rev = 'b';
1107                break;
1108        case T3_REV_C:
1109                rev = 'c';
1110                break;
1111        }
1112        return rev;
1113}
1114
1115static int update_tpsram(struct adapter *adap)
1116{
1117        const struct firmware *tpsram;
1118        char buf[64];
1119        struct device *dev = &adap->pdev->dev;
1120        int ret;
1121        char rev;
1122
1123        rev = t3rev2char(adap);
1124        if (!rev)
1125                return 0;
1126
1127        snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1128
1129        ret = request_firmware(&tpsram, buf, dev);
1130        if (ret < 0) {
1131                dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1132                        buf);
1133                return ret;
1134        }
1135
1136        ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1137        if (ret)
1138                goto release_tpsram;
1139
1140        ret = t3_set_proto_sram(adap, tpsram->data);
1141        if (ret == 0)
1142                dev_info(dev,
1143                         "successful update of protocol engine "
1144                         "to %d.%d.%d\n",
1145                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1146        else
1147                dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148                        TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1149        if (ret)
1150                dev_err(dev, "loading protocol SRAM failed\n");
1151
1152release_tpsram:
1153        release_firmware(tpsram);
1154
1155        return ret;
1156}
1157
1158/**
1159 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1160 * @adap: the adapter
1161 * @p: the port
1162 *
1163 * Ensures that current Rx processing on any of the queues associated with
1164 * the given port completes before returning.  We do this by acquiring and
1165 * releasing the locks of the response queues associated with the port.
1166 */
1167static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1168{
1169        int i;
1170
1171        for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1172                struct sge_rspq *q = &adap->sge.qs[i].rspq;
1173
1174                spin_lock_irq(&q->lock);
1175                spin_unlock_irq(&q->lock);
1176        }
1177}
1178
1179static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1180{
1181        struct port_info *pi = netdev_priv(dev);
1182        struct adapter *adapter = pi->adapter;
1183
1184        if (adapter->params.rev > 0) {
1185                t3_set_vlan_accel(adapter, 1 << pi->port_id,
1186                                  features & NETIF_F_HW_VLAN_CTAG_RX);
1187        } else {
1188                /* single control for all ports */
1189                unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                for_each_port(adapter, i)
1192                        have_vlans |=
1193                                adapter->port[i]->features &
1194                                NETIF_F_HW_VLAN_CTAG_RX;
1195
1196                t3_set_vlan_accel(adapter, 1, have_vlans);
1197        }
1198        t3_synchronize_rx(adapter, pi);
1199}
1200
1201/**
1202 *      cxgb_up - enable the adapter
1203 *      @adap: adapter being enabled
1204 *
1205 *      Called when the first port is enabled, this function performs the
1206 *      actions necessary to make an adapter operational, such as completing
1207 *      the initialization of HW modules, and enabling interrupts.
1208 *
1209 *      Must be called with the rtnl lock held.
1210 */
1211static int cxgb_up(struct adapter *adap)
1212{
1213        int i, err;
1214
1215        if (!(adap->flags & FULL_INIT_DONE)) {
1216                err = t3_check_fw_version(adap);
1217                if (err == -EINVAL) {
1218                        err = upgrade_fw(adap);
1219                        CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1220                                FW_VERSION_MAJOR, FW_VERSION_MINOR,
1221                                FW_VERSION_MICRO, err ? "failed" : "succeeded");
1222                }
1223
1224                err = t3_check_tpsram_version(adap);
1225                if (err == -EINVAL) {
1226                        err = update_tpsram(adap);
1227                        CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1228                                TP_VERSION_MAJOR, TP_VERSION_MINOR,
1229                                TP_VERSION_MICRO, err ? "failed" : "succeeded");
1230                }
1231
1232                /*
1233                 * Clear interrupts now to catch errors if t3_init_hw fails.
1234                 * We clear them again later as initialization may trigger
1235                 * conditions that can interrupt.
1236                 */
1237                t3_intr_clear(adap);
1238
1239                err = t3_init_hw(adap, 0);
1240                if (err)
1241                        goto out;
1242
1243                t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1244                t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1245
1246                err = setup_sge_qsets(adap);
1247                if (err)
1248                        goto out;
1249
1250                for_each_port(adap, i)
1251                        cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1252
1253                setup_rss(adap);
1254                if (!(adap->flags & NAPI_INIT))
1255                        init_napi(adap);
1256
1257                t3_start_sge_timers(adap);
1258                adap->flags |= FULL_INIT_DONE;
1259        }
1260
1261        t3_intr_clear(adap);
1262
1263        if (adap->flags & USING_MSIX) {
1264                name_msix_vecs(adap);
1265                err = request_irq(adap->msix_info[0].vec,
1266                                  t3_async_intr_handler, 0,
1267                                  adap->msix_info[0].desc, adap);
1268                if (err)
1269                        goto irq_err;
1270
1271                err = request_msix_data_irqs(adap);
1272                if (err) {
1273                        free_irq(adap->msix_info[0].vec, adap);
1274                        goto irq_err;
1275                }
1276        } else {
1277                err = request_irq(adap->pdev->irq,
1278                                  t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
1279                                  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
1280                                  adap->name, adap);
1281                if (err)
1282                        goto irq_err;
1283        }
1284
1285        enable_all_napi(adap);
1286        t3_sge_start(adap);
1287        t3_intr_enable(adap);
1288
1289        if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1290            is_offload(adap) && init_tp_parity(adap) == 0)
1291                adap->flags |= TP_PARITY_INIT;
1292
1293        if (adap->flags & TP_PARITY_INIT) {
1294                t3_write_reg(adap, A_TP_INT_CAUSE,
1295                             F_CMCACHEPERR | F_ARPLUTPERR);
1296                t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1297        }
1298
1299        if (!(adap->flags & QUEUES_BOUND)) {
1300                int ret = bind_qsets(adap);
1301
1302                if (ret < 0) {
1303                        CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1304                        t3_intr_disable(adap);
1305                        free_irq_resources(adap);
1306                        err = ret;
1307                        goto out;
1308                }
1309                adap->flags |= QUEUES_BOUND;
1310        }
1311
1312out:
1313        return err;
1314irq_err:
1315        CH_ERR(adap, "request_irq failed, err %d\n", err);
1316        goto out;
1317}
1318
1319/*
1320 * Release resources when all the ports and offloading have been stopped.
1321 */
1322static void cxgb_down(struct adapter *adapter, int on_wq)
1323{
1324        t3_sge_stop(adapter);
1325        spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1326        t3_intr_disable(adapter);
1327        spin_unlock_irq(&adapter->work_lock);
1328
1329        free_irq_resources(adapter);
1330        quiesce_rx(adapter);
1331        t3_sge_stop(adapter);
1332        if (!on_wq)
1333                flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1334}
1335
1336static void schedule_chk_task(struct adapter *adap)
1337{
1338        unsigned int timeo;
1339
1340        timeo = adap->params.linkpoll_period ?
1341            (HZ * adap->params.linkpoll_period) / 10 :
1342            adap->params.stats_update_period * HZ;
1343        if (timeo)
1344                queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1345}
1346
1347static int offload_open(struct net_device *dev)
1348{
1349        struct port_info *pi = netdev_priv(dev);
1350        struct adapter *adapter = pi->adapter;
1351        struct t3cdev *tdev = dev2t3cdev(dev);
1352        int adap_up = adapter->open_device_map & PORT_MASK;
1353        int err;
1354
1355        if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1356                return 0;
1357
1358        if (!adap_up && (err = cxgb_up(adapter)) < 0)
1359                goto out;
1360
1361        t3_tp_set_offload_mode(adapter, 1);
1362        tdev->lldev = adapter->port[0];
1363        err = cxgb3_offload_activate(adapter);
1364        if (err)
1365                goto out;
1366
1367        init_port_mtus(adapter);
1368        t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1369                     adapter->params.b_wnd,
1370                     adapter->params.rev == 0 ?
1371                     adapter->port[0]->mtu : 0xffff);
1372        init_smt(adapter);
1373
1374        if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1375                dev_dbg(&dev->dev, "cannot create sysfs group\n");
1376
1377        /* Call back all registered clients */
1378        cxgb3_add_clients(tdev);
1379
1380out:
1381        /* restore them in case the offload module has changed them */
1382        if (err) {
1383                t3_tp_set_offload_mode(adapter, 0);
1384                clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1385                cxgb3_set_dummy_ops(tdev);
1386        }
1387        return err;
1388}
1389
1390static int offload_close(struct t3cdev *tdev)
1391{
1392        struct adapter *adapter = tdev2adap(tdev);
1393        struct t3c_data *td = T3C_DATA(tdev);
1394
1395        if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1396                return 0;
1397
1398        /* Call back all registered clients */
1399        cxgb3_remove_clients(tdev);
1400
1401        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1402
1403        /* Flush work scheduled while releasing TIDs */
1404        flush_work(&td->tid_release_task);
1405
1406        tdev->lldev = NULL;
1407        cxgb3_set_dummy_ops(tdev);
1408        t3_tp_set_offload_mode(adapter, 0);
1409        clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1410
1411        if (!adapter->open_device_map)
1412                cxgb_down(adapter, 0);
1413
1414        cxgb3_offload_deactivate(adapter);
1415        return 0;
1416}
1417
1418static int cxgb_open(struct net_device *dev)
1419{
1420        struct port_info *pi = netdev_priv(dev);
1421        struct adapter *adapter = pi->adapter;
1422        int other_ports = adapter->open_device_map & PORT_MASK;
1423        int err;
1424
1425        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1426                return err;
1427
1428        set_bit(pi->port_id, &adapter->open_device_map);
1429        if (is_offload(adapter) && !ofld_disable) {
1430                err = offload_open(dev);
1431                if (err)
1432                        pr_warn("Could not initialize offload capabilities\n");
1433        }
1434
1435        netif_set_real_num_tx_queues(dev, pi->nqsets);
1436        err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1437        if (err)
1438                return err;
1439        link_start(dev);
1440        t3_port_intr_enable(adapter, pi->port_id);
1441        netif_tx_start_all_queues(dev);
1442        if (!other_ports)
1443                schedule_chk_task(adapter);
1444
1445        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1446        return 0;
1447}
1448
1449static int __cxgb_close(struct net_device *dev, int on_wq)
1450{
1451        struct port_info *pi = netdev_priv(dev);
1452        struct adapter *adapter = pi->adapter;
1453
1454        
1455        if (!adapter->open_device_map)
1456                return 0;
1457
1458        /* Stop link fault interrupts */
1459        t3_xgm_intr_disable(adapter, pi->port_id);
1460        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1461
1462        t3_port_intr_disable(adapter, pi->port_id);
1463        netif_tx_stop_all_queues(dev);
1464        pi->phy.ops->power_down(&pi->phy, 1);
1465        netif_carrier_off(dev);
1466        t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1467
1468        spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1469        clear_bit(pi->port_id, &adapter->open_device_map);
1470        spin_unlock_irq(&adapter->work_lock);
1471
1472        if (!(adapter->open_device_map & PORT_MASK))
1473                cancel_delayed_work_sync(&adapter->adap_check_task);
1474
1475        if (!adapter->open_device_map)
1476                cxgb_down(adapter, on_wq);
1477
1478        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1479        return 0;
1480}
1481
1482static int cxgb_close(struct net_device *dev)
1483{
1484        return __cxgb_close(dev, 0);
1485}
1486
1487static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1488{
1489        struct port_info *pi = netdev_priv(dev);
1490        struct adapter *adapter = pi->adapter;
1491        struct net_device_stats *ns = &dev->stats;
1492        const struct mac_stats *pstats;
1493
1494        spin_lock(&adapter->stats_lock);
1495        pstats = t3_mac_update_stats(&pi->mac);
1496        spin_unlock(&adapter->stats_lock);
1497
1498        ns->tx_bytes = pstats->tx_octets;
1499        ns->tx_packets = pstats->tx_frames;
1500        ns->rx_bytes = pstats->rx_octets;
1501        ns->rx_packets = pstats->rx_frames;
1502        ns->multicast = pstats->rx_mcast_frames;
1503
1504        ns->tx_errors = pstats->tx_underrun;
1505        ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1506            pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1507            pstats->rx_fifo_ovfl;
1508
1509        /* detailed rx_errors */
1510        ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1511        ns->rx_over_errors = 0;
1512        ns->rx_crc_errors = pstats->rx_fcs_errs;
1513        ns->rx_frame_errors = pstats->rx_symbol_errs;
1514        ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1515        ns->rx_missed_errors = pstats->rx_cong_drops;
1516
1517        /* detailed tx_errors */
1518        ns->tx_aborted_errors = 0;
1519        ns->tx_carrier_errors = 0;
1520        ns->tx_fifo_errors = pstats->tx_underrun;
1521        ns->tx_heartbeat_errors = 0;
1522        ns->tx_window_errors = 0;
1523        return ns;
1524}
1525
1526static u32 get_msglevel(struct net_device *dev)
1527{
1528        struct port_info *pi = netdev_priv(dev);
1529        struct adapter *adapter = pi->adapter;
1530
1531        return adapter->msg_enable;
1532}
1533
1534static void set_msglevel(struct net_device *dev, u32 val)
1535{
1536        struct port_info *pi = netdev_priv(dev);
1537        struct adapter *adapter = pi->adapter;
1538
1539        adapter->msg_enable = val;
1540}
1541
1542static const char stats_strings[][ETH_GSTRING_LEN] = {
1543        "TxOctetsOK         ",
1544        "TxFramesOK         ",
1545        "TxMulticastFramesOK",
1546        "TxBroadcastFramesOK",
1547        "TxPauseFrames      ",
1548        "TxUnderrun         ",
1549        "TxExtUnderrun      ",
1550
1551        "TxFrames64         ",
1552        "TxFrames65To127    ",
1553        "TxFrames128To255   ",
1554        "TxFrames256To511   ",
1555        "TxFrames512To1023  ",
1556        "TxFrames1024To1518 ",
1557        "TxFrames1519ToMax  ",
1558
1559        "RxOctetsOK         ",
1560        "RxFramesOK         ",
1561        "RxMulticastFramesOK",
1562        "RxBroadcastFramesOK",
1563        "RxPauseFrames      ",
1564        "RxFCSErrors        ",
1565        "RxSymbolErrors     ",
1566        "RxShortErrors      ",
1567        "RxJabberErrors     ",
1568        "RxLengthErrors     ",
1569        "RxFIFOoverflow     ",
1570
1571        "RxFrames64         ",
1572        "RxFrames65To127    ",
1573        "RxFrames128To255   ",
1574        "RxFrames256To511   ",
1575        "RxFrames512To1023  ",
1576        "RxFrames1024To1518 ",
1577        "RxFrames1519ToMax  ",
1578
1579        "PhyFIFOErrors      ",
1580        "TSO                ",
1581        "VLANextractions    ",
1582        "VLANinsertions     ",
1583        "TxCsumOffload      ",
1584        "RxCsumGood         ",
1585        "LroAggregated      ",
1586        "LroFlushed         ",
1587        "LroNoDesc          ",
1588        "RxDrops            ",
1589
1590        "CheckTXEnToggled   ",
1591        "CheckResets        ",
1592
1593        "LinkFaults         ",
1594};
1595
1596static int get_sset_count(struct net_device *dev, int sset)
1597{
1598        switch (sset) {
1599        case ETH_SS_STATS:
1600                return ARRAY_SIZE(stats_strings);
1601        default:
1602                return -EOPNOTSUPP;
1603        }
1604}
1605
1606#define T3_REGMAP_SIZE (3 * 1024)
1607
1608static int get_regs_len(struct net_device *dev)
1609{
1610        return T3_REGMAP_SIZE;
1611}
1612
1613static int get_eeprom_len(struct net_device *dev)
1614{
1615        return EEPROMSIZE;
1616}
1617
1618static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1619{
1620        struct port_info *pi = netdev_priv(dev);
1621        struct adapter *adapter = pi->adapter;
1622        u32 fw_vers = 0;
1623        u32 tp_vers = 0;
1624
1625        spin_lock(&adapter->stats_lock);
1626        t3_get_fw_version(adapter, &fw_vers);
1627        t3_get_tp_version(adapter, &tp_vers);
1628        spin_unlock(&adapter->stats_lock);
1629
1630        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1631        strlcpy(info->bus_info, pci_name(adapter->pdev),
1632                sizeof(info->bus_info));
1633        if (fw_vers)
1634                snprintf(info->fw_version, sizeof(info->fw_version),
1635                         "%s %u.%u.%u TP %u.%u.%u",
1636                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1637                         G_FW_VERSION_MAJOR(fw_vers),
1638                         G_FW_VERSION_MINOR(fw_vers),
1639                         G_FW_VERSION_MICRO(fw_vers),
1640                         G_TP_VERSION_MAJOR(tp_vers),
1641                         G_TP_VERSION_MINOR(tp_vers),
1642                         G_TP_VERSION_MICRO(tp_vers));
1643}
1644
1645static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1646{
1647        if (stringset == ETH_SS_STATS)
1648                memcpy(data, stats_strings, sizeof(stats_strings));
1649}
1650
1651static unsigned long collect_sge_port_stats(struct adapter *adapter,
1652                                            struct port_info *p, int idx)
1653{
1654        int i;
1655        unsigned long tot = 0;
1656
1657        for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1658                tot += adapter->sge.qs[i].port_stats[idx];
1659        return tot;
1660}
1661
1662static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1663                      u64 *data)
1664{
1665        struct port_info *pi = netdev_priv(dev);
1666        struct adapter *adapter = pi->adapter;
1667        const struct mac_stats *s;
1668
1669        spin_lock(&adapter->stats_lock);
1670        s = t3_mac_update_stats(&pi->mac);
1671        spin_unlock(&adapter->stats_lock);
1672
1673        *data++ = s->tx_octets;
1674        *data++ = s->tx_frames;
1675        *data++ = s->tx_mcast_frames;
1676        *data++ = s->tx_bcast_frames;
1677        *data++ = s->tx_pause;
1678        *data++ = s->tx_underrun;
1679        *data++ = s->tx_fifo_urun;
1680
1681        *data++ = s->tx_frames_64;
1682        *data++ = s->tx_frames_65_127;
1683        *data++ = s->tx_frames_128_255;
1684        *data++ = s->tx_frames_256_511;
1685        *data++ = s->tx_frames_512_1023;
1686        *data++ = s->tx_frames_1024_1518;
1687        *data++ = s->tx_frames_1519_max;
1688
1689        *data++ = s->rx_octets;
1690        *data++ = s->rx_frames;
1691        *data++ = s->rx_mcast_frames;
1692        *data++ = s->rx_bcast_frames;
1693        *data++ = s->rx_pause;
1694        *data++ = s->rx_fcs_errs;
1695        *data++ = s->rx_symbol_errs;
1696        *data++ = s->rx_short;
1697        *data++ = s->rx_jabber;
1698        *data++ = s->rx_too_long;
1699        *data++ = s->rx_fifo_ovfl;
1700
1701        *data++ = s->rx_frames_64;
1702        *data++ = s->rx_frames_65_127;
1703        *data++ = s->rx_frames_128_255;
1704        *data++ = s->rx_frames_256_511;
1705        *data++ = s->rx_frames_512_1023;
1706        *data++ = s->rx_frames_1024_1518;
1707        *data++ = s->rx_frames_1519_max;
1708
1709        *data++ = pi->phy.fifo_errors;
1710
1711        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1712        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1713        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1714        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1715        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1716        *data++ = 0;
1717        *data++ = 0;
1718        *data++ = 0;
1719        *data++ = s->rx_cong_drops;
1720
1721        *data++ = s->num_toggled;
1722        *data++ = s->num_resets;
1723
1724        *data++ = s->link_faults;
1725}
1726
1727static inline void reg_block_dump(struct adapter *ap, void *buf,
1728                                  unsigned int start, unsigned int end)
1729{
1730        u32 *p = buf + start;
1731
1732        for (; start <= end; start += sizeof(u32))
1733                *p++ = t3_read_reg(ap, start);
1734}
1735
1736static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1737                     void *buf)
1738{
1739        struct port_info *pi = netdev_priv(dev);
1740        struct adapter *ap = pi->adapter;
1741
1742        /*
1743         * Version scheme:
1744         * bits 0..9: chip version
1745         * bits 10..15: chip revision
1746         * bit 31: set for PCIe cards
1747         */
1748        regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1749
1750        /*
1751         * We skip the MAC statistics registers because they are clear-on-read.
1752         * Also reading multi-register stats would need to synchronize with the
1753         * periodic mac stats accumulation.  Hard to justify the complexity.
1754         */
1755        memset(buf, 0, T3_REGMAP_SIZE);
1756        reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1757        reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1758        reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1759        reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1760        reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1761        reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1762                       XGM_REG(A_XGM_SERDES_STAT3, 1));
1763        reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1764                       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1765}
1766
1767static int restart_autoneg(struct net_device *dev)
1768{
1769        struct port_info *p = netdev_priv(dev);
1770
1771        if (!netif_running(dev))
1772                return -EAGAIN;
1773        if (p->link_config.autoneg != AUTONEG_ENABLE)
1774                return -EINVAL;
1775        p->phy.ops->autoneg_restart(&p->phy);
1776        return 0;
1777}
1778
1779static int set_phys_id(struct net_device *dev,
1780                       enum ethtool_phys_id_state state)
1781{
1782        struct port_info *pi = netdev_priv(dev);
1783        struct adapter *adapter = pi->adapter;
1784
1785        switch (state) {
1786        case ETHTOOL_ID_ACTIVE:
1787                return 1;       /* cycle on/off once per second */
1788
1789        case ETHTOOL_ID_OFF:
1790                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1791                break;
1792
1793        case ETHTOOL_ID_ON:
1794        case ETHTOOL_ID_INACTIVE:
1795                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1796                         F_GPIO0_OUT_VAL);
1797        }
1798
1799        return 0;
1800}
1801
1802static int get_link_ksettings(struct net_device *dev,
1803                              struct ethtool_link_ksettings *cmd)
1804{
1805        struct port_info *p = netdev_priv(dev);
1806        u32 supported;
1807
1808        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1809                                                p->link_config.supported);
1810        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1811                                                p->link_config.advertising);
1812
1813        if (netif_carrier_ok(dev)) {
1814                cmd->base.speed = p->link_config.speed;
1815                cmd->base.duplex = p->link_config.duplex;
1816        } else {
1817                cmd->base.speed = SPEED_UNKNOWN;
1818                cmd->base.duplex = DUPLEX_UNKNOWN;
1819        }
1820
1821        ethtool_convert_link_mode_to_legacy_u32(&supported,
1822                                                cmd->link_modes.supported);
1823
1824        cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1825        cmd->base.phy_address = p->phy.mdio.prtad;
1826        cmd->base.autoneg = p->link_config.autoneg;
1827        return 0;
1828}
1829
1830static int speed_duplex_to_caps(int speed, int duplex)
1831{
1832        int cap = 0;
1833
1834        switch (speed) {
1835        case SPEED_10:
1836                if (duplex == DUPLEX_FULL)
1837                        cap = SUPPORTED_10baseT_Full;
1838                else
1839                        cap = SUPPORTED_10baseT_Half;
1840                break;
1841        case SPEED_100:
1842                if (duplex == DUPLEX_FULL)
1843                        cap = SUPPORTED_100baseT_Full;
1844                else
1845                        cap = SUPPORTED_100baseT_Half;
1846                break;
1847        case SPEED_1000:
1848                if (duplex == DUPLEX_FULL)
1849                        cap = SUPPORTED_1000baseT_Full;
1850                else
1851                        cap = SUPPORTED_1000baseT_Half;
1852                break;
1853        case SPEED_10000:
1854                if (duplex == DUPLEX_FULL)
1855                        cap = SUPPORTED_10000baseT_Full;
1856        }
1857        return cap;
1858}
1859
1860#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1861                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1862                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1863                      ADVERTISED_10000baseT_Full)
1864
1865static int set_link_ksettings(struct net_device *dev,
1866                              const struct ethtool_link_ksettings *cmd)
1867{
1868        struct port_info *p = netdev_priv(dev);
1869        struct link_config *lc = &p->link_config;
1870        u32 advertising;
1871
1872        ethtool_convert_link_mode_to_legacy_u32(&advertising,
1873                                                cmd->link_modes.advertising);
1874
1875        if (!(lc->supported & SUPPORTED_Autoneg)) {
1876                /*
1877                 * PHY offers a single speed/duplex.  See if that's what's
1878                 * being requested.
1879                 */
1880                if (cmd->base.autoneg == AUTONEG_DISABLE) {
1881                        u32 speed = cmd->base.speed;
1882                        int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1883                        if (lc->supported & cap)
1884                                return 0;
1885                }
1886                return -EINVAL;
1887        }
1888
1889        if (cmd->base.autoneg == AUTONEG_DISABLE) {
1890                u32 speed = cmd->base.speed;
1891                int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1892
1893                if (!(lc->supported & cap) || (speed == SPEED_1000))
1894                        return -EINVAL;
1895                lc->requested_speed = speed;
1896                lc->requested_duplex = cmd->base.duplex;
1897                lc->advertising = 0;
1898        } else {
1899                advertising &= ADVERTISED_MASK;
1900                advertising &= lc->supported;
1901                if (!advertising)
1902                        return -EINVAL;
1903                lc->requested_speed = SPEED_INVALID;
1904                lc->requested_duplex = DUPLEX_INVALID;
1905                lc->advertising = advertising | ADVERTISED_Autoneg;
1906        }
1907        lc->autoneg = cmd->base.autoneg;
1908        if (netif_running(dev))
1909                t3_link_start(&p->phy, &p->mac, lc);
1910        return 0;
1911}
1912
1913static void get_pauseparam(struct net_device *dev,
1914                           struct ethtool_pauseparam *epause)
1915{
1916        struct port_info *p = netdev_priv(dev);
1917
1918        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1919        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1920        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1921}
1922
1923static int set_pauseparam(struct net_device *dev,
1924                          struct ethtool_pauseparam *epause)
1925{
1926        struct port_info *p = netdev_priv(dev);
1927        struct link_config *lc = &p->link_config;
1928
1929        if (epause->autoneg == AUTONEG_DISABLE)
1930                lc->requested_fc = 0;
1931        else if (lc->supported & SUPPORTED_Autoneg)
1932                lc->requested_fc = PAUSE_AUTONEG;
1933        else
1934                return -EINVAL;
1935
1936        if (epause->rx_pause)
1937                lc->requested_fc |= PAUSE_RX;
1938        if (epause->tx_pause)
1939                lc->requested_fc |= PAUSE_TX;
1940        if (lc->autoneg == AUTONEG_ENABLE) {
1941                if (netif_running(dev))
1942                        t3_link_start(&p->phy, &p->mac, lc);
1943        } else {
1944                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1945                if (netif_running(dev))
1946                        t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1947        }
1948        return 0;
1949}
1950
1951static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1952{
1953        struct port_info *pi = netdev_priv(dev);
1954        struct adapter *adapter = pi->adapter;
1955        const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1956
1957        e->rx_max_pending = MAX_RX_BUFFERS;
1958        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1959        e->tx_max_pending = MAX_TXQ_ENTRIES;
1960
1961        e->rx_pending = q->fl_size;
1962        e->rx_mini_pending = q->rspq_size;
1963        e->rx_jumbo_pending = q->jumbo_size;
1964        e->tx_pending = q->txq_size[0];
1965}
1966
1967static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1968{
1969        struct port_info *pi = netdev_priv(dev);
1970        struct adapter *adapter = pi->adapter;
1971        struct qset_params *q;
1972        int i;
1973
1974        if (e->rx_pending > MAX_RX_BUFFERS ||
1975            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1976            e->tx_pending > MAX_TXQ_ENTRIES ||
1977            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1978            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1979            e->rx_pending < MIN_FL_ENTRIES ||
1980            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1981            e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1982                return -EINVAL;
1983
1984        if (adapter->flags & FULL_INIT_DONE)
1985                return -EBUSY;
1986
1987        q = &adapter->params.sge.qset[pi->first_qset];
1988        for (i = 0; i < pi->nqsets; ++i, ++q) {
1989                q->rspq_size = e->rx_mini_pending;
1990                q->fl_size = e->rx_pending;
1991                q->jumbo_size = e->rx_jumbo_pending;
1992                q->txq_size[0] = e->tx_pending;
1993                q->txq_size[1] = e->tx_pending;
1994                q->txq_size[2] = e->tx_pending;
1995        }
1996        return 0;
1997}
1998
1999static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2000                        struct kernel_ethtool_coalesce *kernel_coal,
2001                        struct netlink_ext_ack *extack)
2002{
2003        struct port_info *pi = netdev_priv(dev);
2004        struct adapter *adapter = pi->adapter;
2005        struct qset_params *qsp;
2006        struct sge_qset *qs;
2007        int i;
2008
2009        if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2010                return -EINVAL;
2011
2012        for (i = 0; i < pi->nqsets; i++) {
2013                qsp = &adapter->params.sge.qset[i];
2014                qs = &adapter->sge.qs[i];
2015                qsp->coalesce_usecs = c->rx_coalesce_usecs;
2016                t3_update_qset_coalesce(qs, qsp);
2017        }
2018
2019        return 0;
2020}
2021
2022static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2023                        struct kernel_ethtool_coalesce *kernel_coal,
2024                        struct netlink_ext_ack *extack)
2025{
2026        struct port_info *pi = netdev_priv(dev);
2027        struct adapter *adapter = pi->adapter;
2028        struct qset_params *q = adapter->params.sge.qset;
2029
2030        c->rx_coalesce_usecs = q->coalesce_usecs;
2031        return 0;
2032}
2033
2034static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2035                      u8 * data)
2036{
2037        struct port_info *pi = netdev_priv(dev);
2038        struct adapter *adapter = pi->adapter;
2039        int i, err = 0;
2040
2041        u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2042        if (!buf)
2043                return -ENOMEM;
2044
2045        e->magic = EEPROM_MAGIC;
2046        for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2047                err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
2048
2049        if (!err)
2050                memcpy(data, buf + e->offset, e->len);
2051        kfree(buf);
2052        return err;
2053}
2054
2055static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2056                      u8 * data)
2057{
2058        struct port_info *pi = netdev_priv(dev);
2059        struct adapter *adapter = pi->adapter;
2060        u32 aligned_offset, aligned_len;
2061        __le32 *p;
2062        u8 *buf;
2063        int err;
2064
2065        if (eeprom->magic != EEPROM_MAGIC)
2066                return -EINVAL;
2067
2068        aligned_offset = eeprom->offset & ~3;
2069        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2070
2071        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2072                buf = kmalloc(aligned_len, GFP_KERNEL);
2073                if (!buf)
2074                        return -ENOMEM;
2075                err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
2076                if (!err && aligned_len > 4)
2077                        err = t3_seeprom_read(adapter,
2078                                              aligned_offset + aligned_len - 4,
2079                                              (__le32 *) & buf[aligned_len - 4]);
2080                if (err)
2081                        goto out;
2082                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2083        } else
2084                buf = data;
2085
2086        err = t3_seeprom_wp(adapter, 0);
2087        if (err)
2088                goto out;
2089
2090        for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
2091                err = t3_seeprom_write(adapter, aligned_offset, *p);
2092                aligned_offset += 4;
2093        }
2094
2095        if (!err)
2096                err = t3_seeprom_wp(adapter, 1);
2097out:
2098        if (buf != data)
2099                kfree(buf);
2100        return err;
2101}
2102
2103static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2104{
2105        wol->supported = 0;
2106        wol->wolopts = 0;
2107        memset(&wol->sopass, 0, sizeof(wol->sopass));
2108}
2109
2110static const struct ethtool_ops cxgb_ethtool_ops = {
2111        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2112        .get_drvinfo = get_drvinfo,
2113        .get_msglevel = get_msglevel,
2114        .set_msglevel = set_msglevel,
2115        .get_ringparam = get_sge_param,
2116        .set_ringparam = set_sge_param,
2117        .get_coalesce = get_coalesce,
2118        .set_coalesce = set_coalesce,
2119        .get_eeprom_len = get_eeprom_len,
2120        .get_eeprom = get_eeprom,
2121        .set_eeprom = set_eeprom,
2122        .get_pauseparam = get_pauseparam,
2123        .set_pauseparam = set_pauseparam,
2124        .get_link = ethtool_op_get_link,
2125        .get_strings = get_strings,
2126        .set_phys_id = set_phys_id,
2127        .nway_reset = restart_autoneg,
2128        .get_sset_count = get_sset_count,
2129        .get_ethtool_stats = get_stats,
2130        .get_regs_len = get_regs_len,
2131        .get_regs = get_regs,
2132        .get_wol = get_wol,
2133        .get_link_ksettings = get_link_ksettings,
2134        .set_link_ksettings = set_link_ksettings,
2135};
2136
2137static int in_range(int val, int lo, int hi)
2138{
2139        return val < 0 || (val <= hi && val >= lo);
2140}
2141
2142static int cxgb_siocdevprivate(struct net_device *dev,
2143                               struct ifreq *ifreq,
2144                               void __user *useraddr,
2145                               int cmd)
2146{
2147        struct port_info *pi = netdev_priv(dev);
2148        struct adapter *adapter = pi->adapter;
2149        int ret;
2150
2151        if (cmd != SIOCCHIOCTL)
2152                return -EOPNOTSUPP;
2153
2154        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2155                return -EFAULT;
2156
2157        switch (cmd) {
2158        case CHELSIO_SET_QSET_PARAMS:{
2159                int i;
2160                struct qset_params *q;
2161                struct ch_qset_params t;
2162                int q1 = pi->first_qset;
2163                int nqsets = pi->nqsets;
2164
2165                if (!capable(CAP_NET_ADMIN))
2166                        return -EPERM;
2167                if (copy_from_user(&t, useraddr, sizeof(t)))
2168                        return -EFAULT;
2169                if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2170                        return -EINVAL;
2171                if (t.qset_idx >= SGE_QSETS)
2172                        return -EINVAL;
2173                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2174                    !in_range(t.cong_thres, 0, 255) ||
2175                    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2176                              MAX_TXQ_ENTRIES) ||
2177                    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2178                              MAX_TXQ_ENTRIES) ||
2179                    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2180                              MAX_CTRL_TXQ_ENTRIES) ||
2181                    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2182                              MAX_RX_BUFFERS) ||
2183                    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2184                              MAX_RX_JUMBO_BUFFERS) ||
2185                    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2186                              MAX_RSPQ_ENTRIES))
2187                        return -EINVAL;
2188
2189                if ((adapter->flags & FULL_INIT_DONE) &&
2190                        (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2191                        t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2192                        t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2193                        t.polling >= 0 || t.cong_thres >= 0))
2194                        return -EBUSY;
2195
2196                /* Allow setting of any available qset when offload enabled */
2197                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2198                        q1 = 0;
2199                        for_each_port(adapter, i) {
2200                                pi = adap2pinfo(adapter, i);
2201                                nqsets += pi->first_qset + pi->nqsets;
2202                        }
2203                }
2204
2205                if (t.qset_idx < q1)
2206                        return -EINVAL;
2207                if (t.qset_idx > q1 + nqsets - 1)
2208                        return -EINVAL;
2209
2210                q = &adapter->params.sge.qset[t.qset_idx];
2211
2212                if (t.rspq_size >= 0)
2213                        q->rspq_size = t.rspq_size;
2214                if (t.fl_size[0] >= 0)
2215                        q->fl_size = t.fl_size[0];
2216                if (t.fl_size[1] >= 0)
2217                        q->jumbo_size = t.fl_size[1];
2218                if (t.txq_size[0] >= 0)
2219                        q->txq_size[0] = t.txq_size[0];
2220                if (t.txq_size[1] >= 0)
2221                        q->txq_size[1] = t.txq_size[1];
2222                if (t.txq_size[2] >= 0)
2223                        q->txq_size[2] = t.txq_size[2];
2224                if (t.cong_thres >= 0)
2225                        q->cong_thres = t.cong_thres;
2226                if (t.intr_lat >= 0) {
2227                        struct sge_qset *qs =
2228                                &adapter->sge.qs[t.qset_idx];
2229
2230                        q->coalesce_usecs = t.intr_lat;
2231                        t3_update_qset_coalesce(qs, q);
2232                }
2233                if (t.polling >= 0) {
2234                        if (adapter->flags & USING_MSIX)
2235                                q->polling = t.polling;
2236                        else {
2237                                /* No polling with INTx for T3A */
2238                                if (adapter->params.rev == 0 &&
2239                                        !(adapter->flags & USING_MSI))
2240                                        t.polling = 0;
2241
2242                                for (i = 0; i < SGE_QSETS; i++) {
2243                                        q = &adapter->params.sge.
2244                                                qset[i];
2245                                        q->polling = t.polling;
2246                                }
2247                        }
2248                }
2249
2250                if (t.lro >= 0) {
2251                        if (t.lro)
2252                                dev->wanted_features |= NETIF_F_GRO;
2253                        else
2254                                dev->wanted_features &= ~NETIF_F_GRO;
2255                        netdev_update_features(dev);
2256                }
2257
2258                break;
2259        }
2260        case CHELSIO_GET_QSET_PARAMS:{
2261                struct qset_params *q;
2262                struct ch_qset_params t;
2263                int q1 = pi->first_qset;
2264                int nqsets = pi->nqsets;
2265                int i;
2266
2267                if (copy_from_user(&t, useraddr, sizeof(t)))
2268                        return -EFAULT;
2269
2270                if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2271                        return -EINVAL;
2272
2273                /* Display qsets for all ports when offload enabled */
2274                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2275                        q1 = 0;
2276                        for_each_port(adapter, i) {
2277                                pi = adap2pinfo(adapter, i);
2278                                nqsets = pi->first_qset + pi->nqsets;
2279                        }
2280                }
2281
2282                if (t.qset_idx >= nqsets)
2283                        return -EINVAL;
2284                t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2285
2286                q = &adapter->params.sge.qset[q1 + t.qset_idx];
2287                t.rspq_size = q->rspq_size;
2288                t.txq_size[0] = q->txq_size[0];
2289                t.txq_size[1] = q->txq_size[1];
2290                t.txq_size[2] = q->txq_size[2];
2291                t.fl_size[0] = q->fl_size;
2292                t.fl_size[1] = q->jumbo_size;
2293                t.polling = q->polling;
2294                t.lro = !!(dev->features & NETIF_F_GRO);
2295                t.intr_lat = q->coalesce_usecs;
2296                t.cong_thres = q->cong_thres;
2297                t.qnum = q1;
2298
2299                if (adapter->flags & USING_MSIX)
2300                        t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2301                else
2302                        t.vector = adapter->pdev->irq;
2303
2304                if (copy_to_user(useraddr, &t, sizeof(t)))
2305                        return -EFAULT;
2306                break;
2307        }
2308        case CHELSIO_SET_QSET_NUM:{
2309                struct ch_reg edata;
2310                unsigned int i, first_qset = 0, other_qsets = 0;
2311
2312                if (!capable(CAP_NET_ADMIN))
2313                        return -EPERM;
2314                if (adapter->flags & FULL_INIT_DONE)
2315                        return -EBUSY;
2316                if (copy_from_user(&edata, useraddr, sizeof(edata)))
2317                        return -EFAULT;
2318                if (edata.cmd != CHELSIO_SET_QSET_NUM)
2319                        return -EINVAL;
2320                if (edata.val < 1 ||
2321                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2322                        return -EINVAL;
2323
2324                for_each_port(adapter, i)
2325                        if (adapter->port[i] && adapter->port[i] != dev)
2326                                other_qsets += adap2pinfo(adapter, i)->nqsets;
2327
2328                if (edata.val + other_qsets > SGE_QSETS)
2329                        return -EINVAL;
2330
2331                pi->nqsets = edata.val;
2332
2333                for_each_port(adapter, i)
2334                        if (adapter->port[i]) {
2335                                pi = adap2pinfo(adapter, i);
2336                                pi->first_qset = first_qset;
2337                                first_qset += pi->nqsets;
2338                        }
2339                break;
2340        }
2341        case CHELSIO_GET_QSET_NUM:{
2342                struct ch_reg edata;
2343
2344                memset(&edata, 0, sizeof(struct ch_reg));
2345
2346                edata.cmd = CHELSIO_GET_QSET_NUM;
2347                edata.val = pi->nqsets;
2348                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2349                        return -EFAULT;
2350                break;
2351        }
2352        case CHELSIO_LOAD_FW:{
2353                u8 *fw_data;
2354                struct ch_mem_range t;
2355
2356                if (!capable(CAP_SYS_RAWIO))
2357                        return -EPERM;
2358                if (copy_from_user(&t, useraddr, sizeof(t)))
2359                        return -EFAULT;
2360                if (t.cmd != CHELSIO_LOAD_FW)
2361                        return -EINVAL;
2362                /* Check t.len sanity ? */
2363                fw_data = memdup_user(useraddr + sizeof(t), t.len);
2364                if (IS_ERR(fw_data))
2365                        return PTR_ERR(fw_data);
2366
2367                ret = t3_load_fw(adapter, fw_data, t.len);
2368                kfree(fw_data);
2369                if (ret)
2370                        return ret;
2371                break;
2372        }
2373        case CHELSIO_SETMTUTAB:{
2374                struct ch_mtus m;
2375                int i;
2376
2377                if (!is_offload(adapter))
2378                        return -EOPNOTSUPP;
2379                if (!capable(CAP_NET_ADMIN))
2380                        return -EPERM;
2381                if (offload_running(adapter))
2382                        return -EBUSY;
2383                if (copy_from_user(&m, useraddr, sizeof(m)))
2384                        return -EFAULT;
2385                if (m.cmd != CHELSIO_SETMTUTAB)
2386                        return -EINVAL;
2387                if (m.nmtus != NMTUS)
2388                        return -EINVAL;
2389                if (m.mtus[0] < 81)     /* accommodate SACK */
2390                        return -EINVAL;
2391
2392                /* MTUs must be in ascending order */
2393                for (i = 1; i < NMTUS; ++i)
2394                        if (m.mtus[i] < m.mtus[i - 1])
2395                                return -EINVAL;
2396
2397                memcpy(adapter->params.mtus, m.mtus,
2398                        sizeof(adapter->params.mtus));
2399                break;
2400        }
2401        case CHELSIO_GET_PM:{
2402                struct tp_params *p = &adapter->params.tp;
2403                struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2404
2405                if (!is_offload(adapter))
2406                        return -EOPNOTSUPP;
2407                m.tx_pg_sz = p->tx_pg_size;
2408                m.tx_num_pg = p->tx_num_pgs;
2409                m.rx_pg_sz = p->rx_pg_size;
2410                m.rx_num_pg = p->rx_num_pgs;
2411                m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2412                if (copy_to_user(useraddr, &m, sizeof(m)))
2413                        return -EFAULT;
2414                break;
2415        }
2416        case CHELSIO_SET_PM:{
2417                struct ch_pm m;
2418                struct tp_params *p = &adapter->params.tp;
2419
2420                if (!is_offload(adapter))
2421                        return -EOPNOTSUPP;
2422                if (!capable(CAP_NET_ADMIN))
2423                        return -EPERM;
2424                if (adapter->flags & FULL_INIT_DONE)
2425                        return -EBUSY;
2426                if (copy_from_user(&m, useraddr, sizeof(m)))
2427                        return -EFAULT;
2428                if (m.cmd != CHELSIO_SET_PM)
2429                        return -EINVAL;
2430                if (!is_power_of_2(m.rx_pg_sz) ||
2431                        !is_power_of_2(m.tx_pg_sz))
2432                        return -EINVAL; /* not power of 2 */
2433                if (!(m.rx_pg_sz & 0x14000))
2434                        return -EINVAL; /* not 16KB or 64KB */
2435                if (!(m.tx_pg_sz & 0x1554000))
2436                        return -EINVAL;
2437                if (m.tx_num_pg == -1)
2438                        m.tx_num_pg = p->tx_num_pgs;
2439                if (m.rx_num_pg == -1)
2440                        m.rx_num_pg = p->rx_num_pgs;
2441                if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2442                        return -EINVAL;
2443                if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2444                        m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2445                        return -EINVAL;
2446                p->rx_pg_size = m.rx_pg_sz;
2447                p->tx_pg_size = m.tx_pg_sz;
2448                p->rx_num_pgs = m.rx_num_pg;
2449                p->tx_num_pgs = m.tx_num_pg;
2450                break;
2451        }
2452        case CHELSIO_GET_MEM:{
2453                struct ch_mem_range t;
2454                struct mc7 *mem;
2455                u64 buf[32];
2456
2457                if (!is_offload(adapter))
2458                        return -EOPNOTSUPP;
2459                if (!capable(CAP_NET_ADMIN))
2460                        return -EPERM;
2461                if (!(adapter->flags & FULL_INIT_DONE))
2462                        return -EIO;    /* need the memory controllers */
2463                if (copy_from_user(&t, useraddr, sizeof(t)))
2464                        return -EFAULT;
2465                if (t.cmd != CHELSIO_GET_MEM)
2466                        return -EINVAL;
2467                if ((t.addr & 7) || (t.len & 7))
2468                        return -EINVAL;
2469                if (t.mem_id == MEM_CM)
2470                        mem = &adapter->cm;
2471                else if (t.mem_id == MEM_PMRX)
2472                        mem = &adapter->pmrx;
2473                else if (t.mem_id == MEM_PMTX)
2474                        mem = &adapter->pmtx;
2475                else
2476                        return -EINVAL;
2477
2478                /*
2479                 * Version scheme:
2480                 * bits 0..9: chip version
2481                 * bits 10..15: chip revision
2482                 */
2483                t.version = 3 | (adapter->params.rev << 10);
2484                if (copy_to_user(useraddr, &t, sizeof(t)))
2485                        return -EFAULT;
2486
2487                /*
2488                 * Read 256 bytes at a time as len can be large and we don't
2489                 * want to use huge intermediate buffers.
2490                 */
2491                useraddr += sizeof(t);  /* advance to start of buffer */
2492                while (t.len) {
2493                        unsigned int chunk =
2494                                min_t(unsigned int, t.len, sizeof(buf));
2495
2496                        ret =
2497                                t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2498                                                buf);
2499                        if (ret)
2500                                return ret;
2501                        if (copy_to_user(useraddr, buf, chunk))
2502                                return -EFAULT;
2503                        useraddr += chunk;
2504                        t.addr += chunk;
2505                        t.len -= chunk;
2506                }
2507                break;
2508        }
2509        case CHELSIO_SET_TRACE_FILTER:{
2510                struct ch_trace t;
2511                const struct trace_params *tp;
2512
2513                if (!capable(CAP_NET_ADMIN))
2514                        return -EPERM;
2515                if (!offload_running(adapter))
2516                        return -EAGAIN;
2517                if (copy_from_user(&t, useraddr, sizeof(t)))
2518                        return -EFAULT;
2519                if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2520                        return -EINVAL;
2521
2522                tp = (const struct trace_params *)&t.sip;
2523                if (t.config_tx)
2524                        t3_config_trace_filter(adapter, tp, 0,
2525                                                t.invert_match,
2526                                                t.trace_tx);
2527                if (t.config_rx)
2528                        t3_config_trace_filter(adapter, tp, 1,
2529                                                t.invert_match,
2530                                                t.trace_rx);
2531                break;
2532        }
2533        default:
2534                return -EOPNOTSUPP;
2535        }
2536        return 0;
2537}
2538
2539static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2540{
2541        struct mii_ioctl_data *data = if_mii(req);
2542        struct port_info *pi = netdev_priv(dev);
2543        struct adapter *adapter = pi->adapter;
2544
2545        switch (cmd) {
2546        case SIOCGMIIREG:
2547        case SIOCSMIIREG:
2548                /* Convert phy_id from older PRTAD/DEVAD format */
2549                if (is_10G(adapter) &&
2550                    !mdio_phy_id_is_c45(data->phy_id) &&
2551                    (data->phy_id & 0x1f00) &&
2552                    !(data->phy_id & 0xe0e0))
2553                        data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2554                                                       data->phy_id & 0x1f);
2555                fallthrough;
2556        case SIOCGMIIPHY:
2557                return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2558        default:
2559                return -EOPNOTSUPP;
2560        }
2561}
2562
2563static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2564{
2565        struct port_info *pi = netdev_priv(dev);
2566        struct adapter *adapter = pi->adapter;
2567        int ret;
2568
2569        if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2570                return ret;
2571        dev->mtu = new_mtu;
2572        init_port_mtus(adapter);
2573        if (adapter->params.rev == 0 && offload_running(adapter))
2574                t3_load_mtus(adapter, adapter->params.mtus,
2575                             adapter->params.a_wnd, adapter->params.b_wnd,
2576                             adapter->port[0]->mtu);
2577        return 0;
2578}
2579
2580static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2581{
2582        struct port_info *pi = netdev_priv(dev);
2583        struct adapter *adapter = pi->adapter;
2584        struct sockaddr *addr = p;
2585
2586        if (!is_valid_ether_addr(addr->sa_data))
2587                return -EADDRNOTAVAIL;
2588
2589        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2590        t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2591        if (offload_running(adapter))
2592                write_smt_entry(adapter, pi->port_id);
2593        return 0;
2594}
2595
2596static netdev_features_t cxgb_fix_features(struct net_device *dev,
2597        netdev_features_t features)
2598{
2599        /*
2600         * Since there is no support for separate rx/tx vlan accel
2601         * enable/disable make sure tx flag is always in same state as rx.
2602         */
2603        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2604                features |= NETIF_F_HW_VLAN_CTAG_TX;
2605        else
2606                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2607
2608        return features;
2609}
2610
2611static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2612{
2613        netdev_features_t changed = dev->features ^ features;
2614
2615        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2616                cxgb_vlan_mode(dev, features);
2617
2618        return 0;
2619}
2620
2621#ifdef CONFIG_NET_POLL_CONTROLLER
2622static void cxgb_netpoll(struct net_device *dev)
2623{
2624        struct port_info *pi = netdev_priv(dev);
2625        struct adapter *adapter = pi->adapter;
2626        int qidx;
2627
2628        for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2629                struct sge_qset *qs = &adapter->sge.qs[qidx];
2630                void *source;
2631
2632                if (adapter->flags & USING_MSIX)
2633                        source = qs;
2634                else
2635                        source = adapter;
2636
2637                t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2638        }
2639}
2640#endif
2641
2642/*
2643 * Periodic accumulation of MAC statistics.
2644 */
2645static void mac_stats_update(struct adapter *adapter)
2646{
2647        int i;
2648
2649        for_each_port(adapter, i) {
2650                struct net_device *dev = adapter->port[i];
2651                struct port_info *p = netdev_priv(dev);
2652
2653                if (netif_running(dev)) {
2654                        spin_lock(&adapter->stats_lock);
2655                        t3_mac_update_stats(&p->mac);
2656                        spin_unlock(&adapter->stats_lock);
2657                }
2658        }
2659}
2660
2661static void check_link_status(struct adapter *adapter)
2662{
2663        int i;
2664
2665        for_each_port(adapter, i) {
2666                struct net_device *dev = adapter->port[i];
2667                struct port_info *p = netdev_priv(dev);
2668                int link_fault;
2669
2670                spin_lock_irq(&adapter->work_lock);
2671                link_fault = p->link_fault;
2672                spin_unlock_irq(&adapter->work_lock);
2673
2674                if (link_fault) {
2675                        t3_link_fault(adapter, i);
2676                        continue;
2677                }
2678
2679                if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2680                        t3_xgm_intr_disable(adapter, i);
2681                        t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2682
2683                        t3_link_changed(adapter, i);
2684                        t3_xgm_intr_enable(adapter, i);
2685                }
2686        }
2687}
2688
2689static void check_t3b2_mac(struct adapter *adapter)
2690{
2691        int i;
2692
2693        if (!rtnl_trylock())    /* synchronize with ifdown */
2694                return;
2695
2696        for_each_port(adapter, i) {
2697                struct net_device *dev = adapter->port[i];
2698                struct port_info *p = netdev_priv(dev);
2699                int status;
2700
2701                if (!netif_running(dev))
2702                        continue;
2703
2704                status = 0;
2705                if (netif_running(dev) && netif_carrier_ok(dev))
2706                        status = t3b2_mac_watchdog_task(&p->mac);
2707                if (status == 1)
2708                        p->mac.stats.num_toggled++;
2709                else if (status == 2) {
2710                        struct cmac *mac = &p->mac;
2711
2712                        t3_mac_set_mtu(mac, dev->mtu);
2713                        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2714                        cxgb_set_rxmode(dev);
2715                        t3_link_start(&p->phy, mac, &p->link_config);
2716                        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2717                        t3_port_intr_enable(adapter, p->port_id);
2718                        p->mac.stats.num_resets++;
2719                }
2720        }
2721        rtnl_unlock();
2722}
2723
2724
2725static void t3_adap_check_task(struct work_struct *work)
2726{
2727        struct adapter *adapter = container_of(work, struct adapter,
2728                                               adap_check_task.work);
2729        const struct adapter_params *p = &adapter->params;
2730        int port;
2731        unsigned int v, status, reset;
2732
2733        adapter->check_task_cnt++;
2734
2735        check_link_status(adapter);
2736
2737        /* Accumulate MAC stats if needed */
2738        if (!p->linkpoll_period ||
2739            (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2740            p->stats_update_period) {
2741                mac_stats_update(adapter);
2742                adapter->check_task_cnt = 0;
2743        }
2744
2745        if (p->rev == T3_REV_B2)
2746                check_t3b2_mac(adapter);
2747
2748        /*
2749         * Scan the XGMAC's to check for various conditions which we want to
2750         * monitor in a periodic polling manner rather than via an interrupt
2751         * condition.  This is used for conditions which would otherwise flood
2752         * the system with interrupts and we only really need to know that the
2753         * conditions are "happening" ...  For each condition we count the
2754         * detection of the condition and reset it for the next polling loop.
2755         */
2756        for_each_port(adapter, port) {
2757                struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2758                u32 cause;
2759
2760                cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2761                reset = 0;
2762                if (cause & F_RXFIFO_OVERFLOW) {
2763                        mac->stats.rx_fifo_ovfl++;
2764                        reset |= F_RXFIFO_OVERFLOW;
2765                }
2766
2767                t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2768        }
2769
2770        /*
2771         * We do the same as above for FL_EMPTY interrupts.
2772         */
2773        status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2774        reset = 0;
2775
2776        if (status & F_FLEMPTY) {
2777                struct sge_qset *qs = &adapter->sge.qs[0];
2778                int i = 0;
2779
2780                reset |= F_FLEMPTY;
2781
2782                v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2783                    0xffff;
2784
2785                while (v) {
2786                        qs->fl[i].empty += (v & 1);
2787                        if (i)
2788                                qs++;
2789                        i ^= 1;
2790                        v >>= 1;
2791                }
2792        }
2793
2794        t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2795
2796        /* Schedule the next check update if any port is active. */
2797        spin_lock_irq(&adapter->work_lock);
2798        if (adapter->open_device_map & PORT_MASK)
2799                schedule_chk_task(adapter);
2800        spin_unlock_irq(&adapter->work_lock);
2801}
2802
2803static void db_full_task(struct work_struct *work)
2804{
2805        struct adapter *adapter = container_of(work, struct adapter,
2806                                               db_full_task);
2807
2808        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2809}
2810
2811static void db_empty_task(struct work_struct *work)
2812{
2813        struct adapter *adapter = container_of(work, struct adapter,
2814                                               db_empty_task);
2815
2816        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2817}
2818
2819static void db_drop_task(struct work_struct *work)
2820{
2821        struct adapter *adapter = container_of(work, struct adapter,
2822                                               db_drop_task);
2823        unsigned long delay = 1000;
2824        unsigned short r;
2825
2826        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2827
2828        /*
2829         * Sleep a while before ringing the driver qset dbs.
2830         * The delay is between 1000-2023 usecs.
2831         */
2832        get_random_bytes(&r, 2);
2833        delay += r & 1023;
2834        set_current_state(TASK_UNINTERRUPTIBLE);
2835        schedule_timeout(usecs_to_jiffies(delay));
2836        ring_dbs(adapter);
2837}
2838
2839/*
2840 * Processes external (PHY) interrupts in process context.
2841 */
2842static void ext_intr_task(struct work_struct *work)
2843{
2844        struct adapter *adapter = container_of(work, struct adapter,
2845                                               ext_intr_handler_task);
2846        int i;
2847
2848        /* Disable link fault interrupts */
2849        for_each_port(adapter, i) {
2850                struct net_device *dev = adapter->port[i];
2851                struct port_info *p = netdev_priv(dev);
2852
2853                t3_xgm_intr_disable(adapter, i);
2854                t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2855        }
2856
2857        /* Re-enable link fault interrupts */
2858        t3_phy_intr_handler(adapter);
2859
2860        for_each_port(adapter, i)
2861                t3_xgm_intr_enable(adapter, i);
2862
2863        /* Now reenable external interrupts */
2864        spin_lock_irq(&adapter->work_lock);
2865        if (adapter->slow_intr_mask) {
2866                adapter->slow_intr_mask |= F_T3DBG;
2867                t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2868                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2869                             adapter->slow_intr_mask);
2870        }
2871        spin_unlock_irq(&adapter->work_lock);
2872}
2873
2874/*
2875 * Interrupt-context handler for external (PHY) interrupts.
2876 */
2877void t3_os_ext_intr_handler(struct adapter *adapter)
2878{
2879        /*
2880         * Schedule a task to handle external interrupts as they may be slow
2881         * and we use a mutex to protect MDIO registers.  We disable PHY
2882         * interrupts in the meantime and let the task reenable them when
2883         * it's done.
2884         */
2885        spin_lock(&adapter->work_lock);
2886        if (adapter->slow_intr_mask) {
2887                adapter->slow_intr_mask &= ~F_T3DBG;
2888                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2889                             adapter->slow_intr_mask);
2890                queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2891        }
2892        spin_unlock(&adapter->work_lock);
2893}
2894
2895void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2896{
2897        struct net_device *netdev = adapter->port[port_id];
2898        struct port_info *pi = netdev_priv(netdev);
2899
2900        spin_lock(&adapter->work_lock);
2901        pi->link_fault = 1;
2902        spin_unlock(&adapter->work_lock);
2903}
2904
2905static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2906{
2907        int i, ret = 0;
2908
2909        if (is_offload(adapter) &&
2910            test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2911                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2912                offload_close(&adapter->tdev);
2913        }
2914
2915        /* Stop all ports */
2916        for_each_port(adapter, i) {
2917                struct net_device *netdev = adapter->port[i];
2918
2919                if (netif_running(netdev))
2920                        __cxgb_close(netdev, on_wq);
2921        }
2922
2923        /* Stop SGE timers */
2924        t3_stop_sge_timers(adapter);
2925
2926        adapter->flags &= ~FULL_INIT_DONE;
2927
2928        if (reset)
2929                ret = t3_reset_adapter(adapter);
2930
2931        pci_disable_device(adapter->pdev);
2932
2933        return ret;
2934}
2935
2936static int t3_reenable_adapter(struct adapter *adapter)
2937{
2938        if (pci_enable_device(adapter->pdev)) {
2939                dev_err(&adapter->pdev->dev,
2940                        "Cannot re-enable PCI device after reset.\n");
2941                goto err;
2942        }
2943        pci_set_master(adapter->pdev);
2944        pci_restore_state(adapter->pdev);
2945        pci_save_state(adapter->pdev);
2946
2947        /* Free sge resources */
2948        t3_free_sge_resources(adapter);
2949
2950        if (t3_replay_prep_adapter(adapter))
2951                goto err;
2952
2953        return 0;
2954err:
2955        return -1;
2956}
2957
2958static void t3_resume_ports(struct adapter *adapter)
2959{
2960        int i;
2961
2962        /* Restart the ports */
2963        for_each_port(adapter, i) {
2964                struct net_device *netdev = adapter->port[i];
2965
2966                if (netif_running(netdev)) {
2967                        if (cxgb_open(netdev)) {
2968                                dev_err(&adapter->pdev->dev,
2969                                        "can't bring device back up"
2970                                        " after reset\n");
2971                                continue;
2972                        }
2973                }
2974        }
2975
2976        if (is_offload(adapter) && !ofld_disable)
2977                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2978}
2979
2980/*
2981 * processes a fatal error.
2982 * Bring the ports down, reset the chip, bring the ports back up.
2983 */
2984static void fatal_error_task(struct work_struct *work)
2985{
2986        struct adapter *adapter = container_of(work, struct adapter,
2987                                               fatal_error_handler_task);
2988        int err = 0;
2989
2990        rtnl_lock();
2991        err = t3_adapter_error(adapter, 1, 1);
2992        if (!err)
2993                err = t3_reenable_adapter(adapter);
2994        if (!err)
2995                t3_resume_ports(adapter);
2996
2997        CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2998        rtnl_unlock();
2999}
3000
3001void t3_fatal_err(struct adapter *adapter)
3002{
3003        unsigned int fw_status[4];
3004
3005        if (adapter->flags & FULL_INIT_DONE) {
3006                t3_sge_stop_dma(adapter);
3007                t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3008                t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3009                t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3010                t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3011
3012                spin_lock(&adapter->work_lock);
3013                t3_intr_disable(adapter);
3014                queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3015                spin_unlock(&adapter->work_lock);
3016        }
3017        CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3018        if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3019                CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3020                         fw_status[0], fw_status[1],
3021                         fw_status[2], fw_status[3]);
3022}
3023
3024/**
3025 * t3_io_error_detected - called when PCI error is detected
3026 * @pdev: Pointer to PCI device
3027 * @state: The current pci connection state
3028 *
3029 * This function is called after a PCI bus error affecting
3030 * this device has been detected.
3031 */
3032static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3033                                             pci_channel_state_t state)
3034{
3035        struct adapter *adapter = pci_get_drvdata(pdev);
3036
3037        if (state == pci_channel_io_perm_failure)
3038                return PCI_ERS_RESULT_DISCONNECT;
3039
3040        t3_adapter_error(adapter, 0, 0);
3041
3042        /* Request a slot reset. */
3043        return PCI_ERS_RESULT_NEED_RESET;
3044}
3045
3046/**
3047 * t3_io_slot_reset - called after the pci bus has been reset.
3048 * @pdev: Pointer to PCI device
3049 *
3050 * Restart the card from scratch, as if from a cold-boot.
3051 */
3052static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3053{
3054        struct adapter *adapter = pci_get_drvdata(pdev);
3055
3056        if (!t3_reenable_adapter(adapter))
3057                return PCI_ERS_RESULT_RECOVERED;
3058
3059        return PCI_ERS_RESULT_DISCONNECT;
3060}
3061
3062/**
3063 * t3_io_resume - called when traffic can start flowing again.
3064 * @pdev: Pointer to PCI device
3065 *
3066 * This callback is called when the error recovery driver tells us that
3067 * its OK to resume normal operation.
3068 */
3069static void t3_io_resume(struct pci_dev *pdev)
3070{
3071        struct adapter *adapter = pci_get_drvdata(pdev);
3072
3073        CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3074                 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3075
3076        rtnl_lock();
3077        t3_resume_ports(adapter);
3078        rtnl_unlock();
3079}
3080
3081static const struct pci_error_handlers t3_err_handler = {
3082        .error_detected = t3_io_error_detected,
3083        .slot_reset = t3_io_slot_reset,
3084        .resume = t3_io_resume,
3085};
3086
3087/*
3088 * Set the number of qsets based on the number of CPUs and the number of ports,
3089 * not to exceed the number of available qsets, assuming there are enough qsets
3090 * per port in HW.
3091 */
3092static void set_nqsets(struct adapter *adap)
3093{
3094        int i, j = 0;
3095        int num_cpus = netif_get_num_default_rss_queues();
3096        int hwports = adap->params.nports;
3097        int nqsets = adap->msix_nvectors - 1;
3098
3099        if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3100                if (hwports == 2 &&
3101                    (hwports * nqsets > SGE_QSETS ||
3102                     num_cpus >= nqsets / hwports))
3103                        nqsets /= hwports;
3104                if (nqsets > num_cpus)
3105                        nqsets = num_cpus;
3106                if (nqsets < 1 || hwports == 4)
3107                        nqsets = 1;
3108        } else {
3109                nqsets = 1;
3110        }
3111
3112        for_each_port(adap, i) {
3113                struct port_info *pi = adap2pinfo(adap, i);
3114
3115                pi->first_qset = j;
3116                pi->nqsets = nqsets;
3117                j = pi->first_qset + nqsets;
3118
3119                dev_info(&adap->pdev->dev,
3120                         "Port %d using %d queue sets.\n", i, nqsets);
3121        }
3122}
3123
3124static int cxgb_enable_msix(struct adapter *adap)
3125{
3126        struct msix_entry entries[SGE_QSETS + 1];
3127        int vectors;
3128        int i;
3129
3130        vectors = ARRAY_SIZE(entries);
3131        for (i = 0; i < vectors; ++i)
3132                entries[i].entry = i;
3133
3134        vectors = pci_enable_msix_range(adap->pdev, entries,
3135                                        adap->params.nports + 1, vectors);
3136        if (vectors < 0)
3137                return vectors;
3138
3139        for (i = 0; i < vectors; ++i)
3140                adap->msix_info[i].vec = entries[i].vector;
3141        adap->msix_nvectors = vectors;
3142
3143        return 0;
3144}
3145
3146static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3147{
3148        static const char *pci_variant[] = {
3149                "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3150        };
3151
3152        int i;
3153        char buf[80];
3154
3155        if (is_pcie(adap))
3156                snprintf(buf, sizeof(buf), "%s x%d",
3157                         pci_variant[adap->params.pci.variant],
3158                         adap->params.pci.width);
3159        else
3160                snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3161                         pci_variant[adap->params.pci.variant],
3162                         adap->params.pci.speed, adap->params.pci.width);
3163
3164        for_each_port(adap, i) {
3165                struct net_device *dev = adap->port[i];
3166                const struct port_info *pi = netdev_priv(dev);
3167
3168                if (!test_bit(i, &adap->registered_device_map))
3169                        continue;
3170                netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3171                            ai->desc, pi->phy.desc,
3172                            is_offload(adap) ? "R" : "", adap->params.rev, buf,
3173                            (adap->flags & USING_MSIX) ? " MSI-X" :
3174                            (adap->flags & USING_MSI) ? " MSI" : "");
3175                if (adap->name == dev->name && adap->params.vpd.mclk)
3176                        pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3177                               adap->name, t3_mc7_size(&adap->cm) >> 20,
3178                               t3_mc7_size(&adap->pmtx) >> 20,
3179                               t3_mc7_size(&adap->pmrx) >> 20,
3180                               adap->params.vpd.sn);
3181        }
3182}
3183
3184static const struct net_device_ops cxgb_netdev_ops = {
3185        .ndo_open               = cxgb_open,
3186        .ndo_stop               = cxgb_close,
3187        .ndo_start_xmit         = t3_eth_xmit,
3188        .ndo_get_stats          = cxgb_get_stats,
3189        .ndo_validate_addr      = eth_validate_addr,
3190        .ndo_set_rx_mode        = cxgb_set_rxmode,
3191        .ndo_eth_ioctl          = cxgb_ioctl,
3192        .ndo_siocdevprivate     = cxgb_siocdevprivate,
3193        .ndo_change_mtu         = cxgb_change_mtu,
3194        .ndo_set_mac_address    = cxgb_set_mac_addr,
3195        .ndo_fix_features       = cxgb_fix_features,
3196        .ndo_set_features       = cxgb_set_features,
3197#ifdef CONFIG_NET_POLL_CONTROLLER
3198        .ndo_poll_controller    = cxgb_netpoll,
3199#endif
3200};
3201
3202static void cxgb3_init_iscsi_mac(struct net_device *dev)
3203{
3204        struct port_info *pi = netdev_priv(dev);
3205
3206        memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3207        pi->iscsic.mac_addr[3] |= 0x80;
3208}
3209
3210#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3211#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3212                        NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3213static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3214{
3215        int i, err, pci_using_dac = 0;
3216        resource_size_t mmio_start, mmio_len;
3217        const struct adapter_info *ai;
3218        struct adapter *adapter = NULL;
3219        struct port_info *pi;
3220
3221        if (!cxgb3_wq) {
3222                cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3223                if (!cxgb3_wq) {
3224                        pr_err("cannot initialize work queue\n");
3225                        return -ENOMEM;
3226                }
3227        }
3228
3229        err = pci_enable_device(pdev);
3230        if (err) {
3231                dev_err(&pdev->dev, "cannot enable PCI device\n");
3232                goto out;
3233        }
3234
3235        err = pci_request_regions(pdev, DRV_NAME);
3236        if (err) {
3237                /* Just info, some other driver may have claimed the device. */
3238                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3239                goto out_disable_device;
3240        }
3241
3242        if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3243                pci_using_dac = 1;
3244        } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
3245                dev_err(&pdev->dev, "no usable DMA configuration\n");
3246                goto out_release_regions;
3247        }
3248
3249        pci_set_master(pdev);
3250        pci_save_state(pdev);
3251
3252        mmio_start = pci_resource_start(pdev, 0);
3253        mmio_len = pci_resource_len(pdev, 0);
3254        ai = t3_get_adapter_info(ent->driver_data);
3255
3256        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3257        if (!adapter) {
3258                err = -ENOMEM;
3259                goto out_release_regions;
3260        }
3261
3262        adapter->nofail_skb =
3263                alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3264        if (!adapter->nofail_skb) {
3265                dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3266                err = -ENOMEM;
3267                goto out_free_adapter;
3268        }
3269
3270        adapter->regs = ioremap(mmio_start, mmio_len);
3271        if (!adapter->regs) {
3272                dev_err(&pdev->dev, "cannot map device registers\n");
3273                err = -ENOMEM;
3274                goto out_free_adapter_nofail;
3275        }
3276
3277        adapter->pdev = pdev;
3278        adapter->name = pci_name(pdev);
3279        adapter->msg_enable = dflt_msg_enable;
3280        adapter->mmio_len = mmio_len;
3281
3282        mutex_init(&adapter->mdio_lock);
3283        spin_lock_init(&adapter->work_lock);
3284        spin_lock_init(&adapter->stats_lock);
3285
3286        INIT_LIST_HEAD(&adapter->adapter_list);
3287        INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3288        INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3289
3290        INIT_WORK(&adapter->db_full_task, db_full_task);
3291        INIT_WORK(&adapter->db_empty_task, db_empty_task);
3292        INIT_WORK(&adapter->db_drop_task, db_drop_task);
3293
3294        INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3295
3296        for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3297                struct net_device *netdev;
3298
3299                netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3300                if (!netdev) {
3301                        err = -ENOMEM;
3302                        goto out_free_dev;
3303                }
3304
3305                SET_NETDEV_DEV(netdev, &pdev->dev);
3306
3307                adapter->port[i] = netdev;
3308                pi = netdev_priv(netdev);
3309                pi->adapter = adapter;
3310                pi->port_id = i;
3311                netif_carrier_off(netdev);
3312                netdev->irq = pdev->irq;
3313                netdev->mem_start = mmio_start;
3314                netdev->mem_end = mmio_start + mmio_len - 1;
3315                netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3316                        NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3317                netdev->features |= netdev->hw_features |
3318                                    NETIF_F_HW_VLAN_CTAG_TX;
3319                netdev->vlan_features |= netdev->features & VLAN_FEAT;
3320                if (pci_using_dac)
3321                        netdev->features |= NETIF_F_HIGHDMA;
3322
3323                netdev->netdev_ops = &cxgb_netdev_ops;
3324                netdev->ethtool_ops = &cxgb_ethtool_ops;
3325                netdev->min_mtu = 81;
3326                netdev->max_mtu = ETH_MAX_MTU;
3327                netdev->dev_port = pi->port_id;
3328        }
3329
3330        pci_set_drvdata(pdev, adapter);
3331        if (t3_prep_adapter(adapter, ai, 1) < 0) {
3332                err = -ENODEV;
3333                goto out_free_dev;
3334        }
3335
3336        /*
3337         * The card is now ready to go.  If any errors occur during device
3338         * registration we do not fail the whole card but rather proceed only
3339         * with the ports we manage to register successfully.  However we must
3340         * register at least one net device.
3341         */
3342        for_each_port(adapter, i) {
3343                err = register_netdev(adapter->port[i]);
3344                if (err)
3345                        dev_warn(&pdev->dev,
3346                                 "cannot register net device %s, skipping\n",
3347                                 adapter->port[i]->name);
3348                else {
3349                        /*
3350                         * Change the name we use for messages to the name of
3351                         * the first successfully registered interface.
3352                         */
3353                        if (!adapter->registered_device_map)
3354                                adapter->name = adapter->port[i]->name;
3355
3356                        __set_bit(i, &adapter->registered_device_map);
3357                }
3358        }
3359        if (!adapter->registered_device_map) {
3360                dev_err(&pdev->dev, "could not register any net devices\n");
3361                goto out_free_dev;
3362        }
3363
3364        for_each_port(adapter, i)
3365                cxgb3_init_iscsi_mac(adapter->port[i]);
3366
3367        /* Driver's ready. Reflect it on LEDs */
3368        t3_led_ready(adapter);
3369
3370        if (is_offload(adapter)) {
3371                __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3372                cxgb3_adapter_ofld(adapter);
3373        }
3374
3375        /* See what interrupts we'll be using */
3376        if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3377                adapter->flags |= USING_MSIX;
3378        else if (msi > 0 && pci_enable_msi(pdev) == 0)
3379                adapter->flags |= USING_MSI;
3380
3381        set_nqsets(adapter);
3382
3383        err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3384                                 &cxgb3_attr_group);
3385        if (err) {
3386                dev_err(&pdev->dev, "cannot create sysfs group\n");
3387                goto out_close_led;
3388        }
3389
3390        print_port_info(adapter, ai);
3391        return 0;
3392
3393out_close_led:
3394        t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3395
3396out_free_dev:
3397        iounmap(adapter->regs);
3398        for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3399                if (adapter->port[i])
3400                        free_netdev(adapter->port[i]);
3401
3402out_free_adapter_nofail:
3403        kfree_skb(adapter->nofail_skb);
3404
3405out_free_adapter:
3406        kfree(adapter);
3407
3408out_release_regions:
3409        pci_release_regions(pdev);
3410out_disable_device:
3411        pci_disable_device(pdev);
3412out:
3413        return err;
3414}
3415
3416static void remove_one(struct pci_dev *pdev)
3417{
3418        struct adapter *adapter = pci_get_drvdata(pdev);
3419
3420        if (adapter) {
3421                int i;
3422
3423                t3_sge_stop(adapter);
3424                sysfs_remove_group(&adapter->port[0]->dev.kobj,
3425                                   &cxgb3_attr_group);
3426
3427                if (is_offload(adapter)) {
3428                        cxgb3_adapter_unofld(adapter);
3429                        if (test_bit(OFFLOAD_DEVMAP_BIT,
3430                                     &adapter->open_device_map))
3431                                offload_close(&adapter->tdev);
3432                }
3433
3434                for_each_port(adapter, i)
3435                    if (test_bit(i, &adapter->registered_device_map))
3436                        unregister_netdev(adapter->port[i]);
3437
3438                t3_stop_sge_timers(adapter);
3439                t3_free_sge_resources(adapter);
3440                cxgb_disable_msi(adapter);
3441
3442                for_each_port(adapter, i)
3443                        if (adapter->port[i])
3444                                free_netdev(adapter->port[i]);
3445
3446                iounmap(adapter->regs);
3447                kfree_skb(adapter->nofail_skb);
3448                kfree(adapter);
3449                pci_release_regions(pdev);
3450                pci_disable_device(pdev);
3451        }
3452}
3453
3454static struct pci_driver driver = {
3455        .name = DRV_NAME,
3456        .id_table = cxgb3_pci_tbl,
3457        .probe = init_one,
3458        .remove = remove_one,
3459        .err_handler = &t3_err_handler,
3460};
3461
3462static int __init cxgb3_init_module(void)
3463{
3464        int ret;
3465
3466        cxgb3_offload_init();
3467
3468        ret = pci_register_driver(&driver);
3469        return ret;
3470}
3471
3472static void __exit cxgb3_cleanup_module(void)
3473{
3474        pci_unregister_driver(&driver);
3475        if (cxgb3_wq)
3476                destroy_workqueue(cxgb3_wq);
3477}
3478
3479module_init(cxgb3_init_module);
3480module_exit(cxgb3_cleanup_module);
3481