linux/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *        copyright notice, this list of conditions and the following
  16 *        disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *        copyright notice, this list of conditions and the following
  20 *        disclaimer in the documentation and/or other materials
  21 *        provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32
  33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  34
  35#include <linux/module.h>
  36#include <linux/init.h>
  37#include <linux/pci.h>
  38#include <linux/dma-mapping.h>
  39#include <linux/netdevice.h>
  40#include <linux/etherdevice.h>
  41#include <linux/if_vlan.h>
  42#include <linux/mdio.h>
  43#include <linux/sockios.h>
  44#include <linux/workqueue.h>
  45#include <linux/proc_fs.h>
  46#include <linux/rtnetlink.h>
  47#include <linux/firmware.h>
  48#include <linux/log2.h>
  49#include <linux/stringify.h>
  50#include <linux/sched.h>
  51#include <linux/slab.h>
  52#include <linux/uaccess.h>
  53#include <linux/nospec.h>
  54
  55#include "common.h"
  56#include "cxgb3_ioctl.h"
  57#include "regs.h"
  58#include "cxgb3_offload.h"
  59#include "version.h"
  60
  61#include "cxgb3_ctl_defs.h"
  62#include "t3_cpl.h"
  63#include "firmware_exports.h"
  64
  65enum {
  66        MAX_TXQ_ENTRIES = 16384,
  67        MAX_CTRL_TXQ_ENTRIES = 1024,
  68        MAX_RSPQ_ENTRIES = 16384,
  69        MAX_RX_BUFFERS = 16384,
  70        MAX_RX_JUMBO_BUFFERS = 16384,
  71        MIN_TXQ_ENTRIES = 4,
  72        MIN_CTRL_TXQ_ENTRIES = 4,
  73        MIN_RSPQ_ENTRIES = 32,
  74        MIN_FL_ENTRIES = 32
  75};
  76
  77#define PORT_MASK ((1 << MAX_NPORTS) - 1)
  78
  79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  80                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  81                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  82
  83#define EEPROM_MAGIC 0x38E2F10C
  84
  85#define CH_DEVICE(devid, idx) \
  86        { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  87
  88static const struct pci_device_id cxgb3_pci_tbl[] = {
  89        CH_DEVICE(0x20, 0),     /* PE9000 */
  90        CH_DEVICE(0x21, 1),     /* T302E */
  91        CH_DEVICE(0x22, 2),     /* T310E */
  92        CH_DEVICE(0x23, 3),     /* T320X */
  93        CH_DEVICE(0x24, 1),     /* T302X */
  94        CH_DEVICE(0x25, 3),     /* T320E */
  95        CH_DEVICE(0x26, 2),     /* T310X */
  96        CH_DEVICE(0x30, 2),     /* T3B10 */
  97        CH_DEVICE(0x31, 3),     /* T3B20 */
  98        CH_DEVICE(0x32, 1),     /* T3B02 */
  99        CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
 100        CH_DEVICE(0x36, 3),     /* S320E-CR */
 101        CH_DEVICE(0x37, 7),     /* N320E-G2 */
 102        {0,}
 103};
 104
 105MODULE_DESCRIPTION(DRV_DESC);
 106MODULE_AUTHOR("Chelsio Communications");
 107MODULE_LICENSE("Dual BSD/GPL");
 108MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 109
 110static int dflt_msg_enable = DFLT_MSG_ENABLE;
 111
 112module_param(dflt_msg_enable, int, 0644);
 113MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 114
 115/*
 116 * The driver uses the best interrupt scheme available on a platform in the
 117 * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 118 * of these schemes the driver may consider as follows:
 119 *
 120 * msi = 2: choose from among all three options
 121 * msi = 1: only consider MSI and pin interrupts
 122 * msi = 0: force pin interrupts
 123 */
 124static int msi = 2;
 125
 126module_param(msi, int, 0644);
 127MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 128
 129/*
 130 * The driver enables offload as a default.
 131 * To disable it, use ofld_disable = 1.
 132 */
 133
 134static int ofld_disable = 0;
 135
 136module_param(ofld_disable, int, 0644);
 137MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 138
 139/*
 140 * We have work elements that we need to cancel when an interface is taken
 141 * down.  Normally the work elements would be executed by keventd but that
 142 * can deadlock because of linkwatch.  If our close method takes the rtnl
 143 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 144 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 145 * for our work to complete.  Get our own work queue to solve this.
 146 */
 147struct workqueue_struct *cxgb3_wq;
 148
 149/**
 150 *      link_report - show link status and link speed/duplex
 151 *      @dev: the port whose settings are to be reported
 152 *
 153 *      Shows the link status, speed, and duplex of a port.
 154 */
 155static void link_report(struct net_device *dev)
 156{
 157        if (!netif_carrier_ok(dev))
 158                netdev_info(dev, "link down\n");
 159        else {
 160                const char *s = "10Mbps";
 161                const struct port_info *p = netdev_priv(dev);
 162
 163                switch (p->link_config.speed) {
 164                case SPEED_10000:
 165                        s = "10Gbps";
 166                        break;
 167                case SPEED_1000:
 168                        s = "1000Mbps";
 169                        break;
 170                case SPEED_100:
 171                        s = "100Mbps";
 172                        break;
 173                }
 174
 175                netdev_info(dev, "link up, %s, %s-duplex\n",
 176                            s, p->link_config.duplex == DUPLEX_FULL
 177                            ? "full" : "half");
 178        }
 179}
 180
 181static void enable_tx_fifo_drain(struct adapter *adapter,
 182                                 struct port_info *pi)
 183{
 184        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
 185                         F_ENDROPPKT);
 186        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
 187        t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
 188        t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
 189}
 190
 191static void disable_tx_fifo_drain(struct adapter *adapter,
 192                                  struct port_info *pi)
 193{
 194        t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
 195                         F_ENDROPPKT, 0);
 196}
 197
 198void t3_os_link_fault(struct adapter *adap, int port_id, int state)
 199{
 200        struct net_device *dev = adap->port[port_id];
 201        struct port_info *pi = netdev_priv(dev);
 202
 203        if (state == netif_carrier_ok(dev))
 204                return;
 205
 206        if (state) {
 207                struct cmac *mac = &pi->mac;
 208
 209                netif_carrier_on(dev);
 210
 211                disable_tx_fifo_drain(adap, pi);
 212
 213                /* Clear local faults */
 214                t3_xgm_intr_disable(adap, pi->port_id);
 215                t3_read_reg(adap, A_XGM_INT_STATUS +
 216                                    pi->mac.offset);
 217                t3_write_reg(adap,
 218                             A_XGM_INT_CAUSE + pi->mac.offset,
 219                             F_XGM_INT);
 220
 221                t3_set_reg_field(adap,
 222                                 A_XGM_INT_ENABLE +
 223                                 pi->mac.offset,
 224                                 F_XGM_INT, F_XGM_INT);
 225                t3_xgm_intr_enable(adap, pi->port_id);
 226
 227                t3_mac_enable(mac, MAC_DIRECTION_TX);
 228        } else {
 229                netif_carrier_off(dev);
 230
 231                /* Flush TX FIFO */
 232                enable_tx_fifo_drain(adap, pi);
 233        }
 234        link_report(dev);
 235}
 236
 237/**
 238 *      t3_os_link_changed - handle link status changes
 239 *      @adapter: the adapter associated with the link change
 240 *      @port_id: the port index whose limk status has changed
 241 *      @link_stat: the new status of the link
 242 *      @speed: the new speed setting
 243 *      @duplex: the new duplex setting
 244 *      @pause: the new flow-control setting
 245 *
 246 *      This is the OS-dependent handler for link status changes.  The OS
 247 *      neutral handler takes care of most of the processing for these events,
 248 *      then calls this handler for any OS-specific processing.
 249 */
 250void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 251                        int speed, int duplex, int pause)
 252{
 253        struct net_device *dev = adapter->port[port_id];
 254        struct port_info *pi = netdev_priv(dev);
 255        struct cmac *mac = &pi->mac;
 256
 257        /* Skip changes from disabled ports. */
 258        if (!netif_running(dev))
 259                return;
 260
 261        if (link_stat != netif_carrier_ok(dev)) {
 262                if (link_stat) {
 263                        disable_tx_fifo_drain(adapter, pi);
 264
 265                        t3_mac_enable(mac, MAC_DIRECTION_RX);
 266
 267                        /* Clear local faults */
 268                        t3_xgm_intr_disable(adapter, pi->port_id);
 269                        t3_read_reg(adapter, A_XGM_INT_STATUS +
 270                                    pi->mac.offset);
 271                        t3_write_reg(adapter,
 272                                     A_XGM_INT_CAUSE + pi->mac.offset,
 273                                     F_XGM_INT);
 274
 275                        t3_set_reg_field(adapter,
 276                                         A_XGM_INT_ENABLE + pi->mac.offset,
 277                                         F_XGM_INT, F_XGM_INT);
 278                        t3_xgm_intr_enable(adapter, pi->port_id);
 279
 280                        netif_carrier_on(dev);
 281                } else {
 282                        netif_carrier_off(dev);
 283
 284                        t3_xgm_intr_disable(adapter, pi->port_id);
 285                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 286                        t3_set_reg_field(adapter,
 287                                         A_XGM_INT_ENABLE + pi->mac.offset,
 288                                         F_XGM_INT, 0);
 289
 290                        if (is_10G(adapter))
 291                                pi->phy.ops->power_down(&pi->phy, 1);
 292
 293                        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
 294                        t3_mac_disable(mac, MAC_DIRECTION_RX);
 295                        t3_link_start(&pi->phy, mac, &pi->link_config);
 296
 297                        /* Flush TX FIFO */
 298                        enable_tx_fifo_drain(adapter, pi);
 299                }
 300
 301                link_report(dev);
 302        }
 303}
 304
 305/**
 306 *      t3_os_phymod_changed - handle PHY module changes
 307 *      @adap: the adapter associated with the link change
 308 *      @port_id: the port index whose limk status has changed
 309 *
 310 *      This is the OS-dependent handler for PHY module changes.  It is
 311 *      invoked when a PHY module is removed or inserted for any OS-specific
 312 *      processing.
 313 */
 314void t3_os_phymod_changed(struct adapter *adap, int port_id)
 315{
 316        static const char *mod_str[] = {
 317                NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 318        };
 319
 320        const struct net_device *dev = adap->port[port_id];
 321        const struct port_info *pi = netdev_priv(dev);
 322
 323        if (pi->phy.modtype == phy_modtype_none)
 324                netdev_info(dev, "PHY module unplugged\n");
 325        else
 326                netdev_info(dev, "%s PHY module inserted\n",
 327                            mod_str[pi->phy.modtype]);
 328}
 329
 330static void cxgb_set_rxmode(struct net_device *dev)
 331{
 332        struct port_info *pi = netdev_priv(dev);
 333
 334        t3_mac_set_rx_mode(&pi->mac, dev);
 335}
 336
 337/**
 338 *      link_start - enable a port
 339 *      @dev: the device to enable
 340 *
 341 *      Performs the MAC and PHY actions needed to enable a port.
 342 */
 343static void link_start(struct net_device *dev)
 344{
 345        struct port_info *pi = netdev_priv(dev);
 346        struct cmac *mac = &pi->mac;
 347
 348        t3_mac_reset(mac);
 349        t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
 350        t3_mac_set_mtu(mac, dev->mtu);
 351        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
 352        t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
 353        t3_mac_set_rx_mode(mac, dev);
 354        t3_link_start(&pi->phy, mac, &pi->link_config);
 355        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 356}
 357
 358static inline void cxgb_disable_msi(struct adapter *adapter)
 359{
 360        if (adapter->flags & USING_MSIX) {
 361                pci_disable_msix(adapter->pdev);
 362                adapter->flags &= ~USING_MSIX;
 363        } else if (adapter->flags & USING_MSI) {
 364                pci_disable_msi(adapter->pdev);
 365                adapter->flags &= ~USING_MSI;
 366        }
 367}
 368
 369/*
 370 * Interrupt handler for asynchronous events used with MSI-X.
 371 */
 372static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 373{
 374        t3_slow_intr_handler(cookie);
 375        return IRQ_HANDLED;
 376}
 377
 378/*
 379 * Name the MSI-X interrupts.
 380 */
 381static void name_msix_vecs(struct adapter *adap)
 382{
 383        int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 384
 385        snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 386        adap->msix_info[0].desc[n] = 0;
 387
 388        for_each_port(adap, j) {
 389                struct net_device *d = adap->port[j];
 390                const struct port_info *pi = netdev_priv(d);
 391
 392                for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 393                        snprintf(adap->msix_info[msi_idx].desc, n,
 394                                 "%s-%d", d->name, pi->first_qset + i);
 395                        adap->msix_info[msi_idx].desc[n] = 0;
 396                }
 397        }
 398}
 399
 400static int request_msix_data_irqs(struct adapter *adap)
 401{
 402        int i, j, err, qidx = 0;
 403
 404        for_each_port(adap, i) {
 405                int nqsets = adap2pinfo(adap, i)->nqsets;
 406
 407                for (j = 0; j < nqsets; ++j) {
 408                        err = request_irq(adap->msix_info[qidx + 1].vec,
 409                                          t3_intr_handler(adap,
 410                                                          adap->sge.qs[qidx].
 411                                                          rspq.polling), 0,
 412                                          adap->msix_info[qidx + 1].desc,
 413                                          &adap->sge.qs[qidx]);
 414                        if (err) {
 415                                while (--qidx >= 0)
 416                                        free_irq(adap->msix_info[qidx + 1].vec,
 417                                                 &adap->sge.qs[qidx]);
 418                                return err;
 419                        }
 420                        qidx++;
 421                }
 422        }
 423        return 0;
 424}
 425
 426static void free_irq_resources(struct adapter *adapter)
 427{
 428        if (adapter->flags & USING_MSIX) {
 429                int i, n = 0;
 430
 431                free_irq(adapter->msix_info[0].vec, adapter);
 432                for_each_port(adapter, i)
 433                        n += adap2pinfo(adapter, i)->nqsets;
 434
 435                for (i = 0; i < n; ++i)
 436                        free_irq(adapter->msix_info[i + 1].vec,
 437                                 &adapter->sge.qs[i]);
 438        } else
 439                free_irq(adapter->pdev->irq, adapter);
 440}
 441
 442static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 443                              unsigned long n)
 444{
 445        int attempts = 10;
 446
 447        while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 448                if (!--attempts)
 449                        return -ETIMEDOUT;
 450                msleep(10);
 451        }
 452        return 0;
 453}
 454
 455static int init_tp_parity(struct adapter *adap)
 456{
 457        int i;
 458        struct sk_buff *skb;
 459        struct cpl_set_tcb_field *greq;
 460        unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 461
 462        t3_tp_set_offload_mode(adap, 1);
 463
 464        for (i = 0; i < 16; i++) {
 465                struct cpl_smt_write_req *req;
 466
 467                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 468                if (!skb)
 469                        skb = adap->nofail_skb;
 470                if (!skb)
 471                        goto alloc_skb_fail;
 472
 473                req = __skb_put_zero(skb, sizeof(*req));
 474                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 475                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 476                req->mtu_idx = NMTUS - 1;
 477                req->iff = i;
 478                t3_mgmt_tx(adap, skb);
 479                if (skb == adap->nofail_skb) {
 480                        await_mgmt_replies(adap, cnt, i + 1);
 481                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 482                        if (!adap->nofail_skb)
 483                                goto alloc_skb_fail;
 484                }
 485        }
 486
 487        for (i = 0; i < 2048; i++) {
 488                struct cpl_l2t_write_req *req;
 489
 490                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 491                if (!skb)
 492                        skb = adap->nofail_skb;
 493                if (!skb)
 494                        goto alloc_skb_fail;
 495
 496                req = __skb_put_zero(skb, sizeof(*req));
 497                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 498                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 499                req->params = htonl(V_L2T_W_IDX(i));
 500                t3_mgmt_tx(adap, skb);
 501                if (skb == adap->nofail_skb) {
 502                        await_mgmt_replies(adap, cnt, 16 + i + 1);
 503                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 504                        if (!adap->nofail_skb)
 505                                goto alloc_skb_fail;
 506                }
 507        }
 508
 509        for (i = 0; i < 2048; i++) {
 510                struct cpl_rte_write_req *req;
 511
 512                skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 513                if (!skb)
 514                        skb = adap->nofail_skb;
 515                if (!skb)
 516                        goto alloc_skb_fail;
 517
 518                req = __skb_put_zero(skb, sizeof(*req));
 519                req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 520                OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 521                req->l2t_idx = htonl(V_L2T_W_IDX(i));
 522                t3_mgmt_tx(adap, skb);
 523                if (skb == adap->nofail_skb) {
 524                        await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
 525                        adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 526                        if (!adap->nofail_skb)
 527                                goto alloc_skb_fail;
 528                }
 529        }
 530
 531        skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 532        if (!skb)
 533                skb = adap->nofail_skb;
 534        if (!skb)
 535                goto alloc_skb_fail;
 536
 537        greq = __skb_put_zero(skb, sizeof(*greq));
 538        greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 539        OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 540        greq->mask = cpu_to_be64(1);
 541        t3_mgmt_tx(adap, skb);
 542
 543        i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 544        if (skb == adap->nofail_skb) {
 545                i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 546                adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
 547        }
 548
 549        t3_tp_set_offload_mode(adap, 0);
 550        return i;
 551
 552alloc_skb_fail:
 553        t3_tp_set_offload_mode(adap, 0);
 554        return -ENOMEM;
 555}
 556
 557/**
 558 *      setup_rss - configure RSS
 559 *      @adap: the adapter
 560 *
 561 *      Sets up RSS to distribute packets to multiple receive queues.  We
 562 *      configure the RSS CPU lookup table to distribute to the number of HW
 563 *      receive queues, and the response queue lookup table to narrow that
 564 *      down to the response queues actually configured for each port.
 565 *      We always configure the RSS mapping for two ports since the mapping
 566 *      table has plenty of entries.
 567 */
 568static void setup_rss(struct adapter *adap)
 569{
 570        int i;
 571        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 572        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 573        u8 cpus[SGE_QSETS + 1];
 574        u16 rspq_map[RSS_TABLE_SIZE + 1];
 575
 576        for (i = 0; i < SGE_QSETS; ++i)
 577                cpus[i] = i;
 578        cpus[SGE_QSETS] = 0xff; /* terminator */
 579
 580        for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 581                rspq_map[i] = i % nq0;
 582                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 583        }
 584        rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 585
 586        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 587                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 588                      V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 589}
 590
 591static void ring_dbs(struct adapter *adap)
 592{
 593        int i, j;
 594
 595        for (i = 0; i < SGE_QSETS; i++) {
 596                struct sge_qset *qs = &adap->sge.qs[i];
 597
 598                if (qs->adap)
 599                        for (j = 0; j < SGE_TXQ_PER_SET; j++)
 600                                t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
 601        }
 602}
 603
 604static void init_napi(struct adapter *adap)
 605{
 606        int i;
 607
 608        for (i = 0; i < SGE_QSETS; i++) {
 609                struct sge_qset *qs = &adap->sge.qs[i];
 610
 611                if (qs->adap)
 612                        netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 613                                       64);
 614        }
 615
 616        /*
 617         * netif_napi_add() can be called only once per napi_struct because it
 618         * adds each new napi_struct to a list.  Be careful not to call it a
 619         * second time, e.g., during EEH recovery, by making a note of it.
 620         */
 621        adap->flags |= NAPI_INIT;
 622}
 623
 624/*
 625 * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 626 * both netdevices representing interfaces and the dummy ones for the extra
 627 * queues.
 628 */
 629static void quiesce_rx(struct adapter *adap)
 630{
 631        int i;
 632
 633        for (i = 0; i < SGE_QSETS; i++)
 634                if (adap->sge.qs[i].adap)
 635                        napi_disable(&adap->sge.qs[i].napi);
 636}
 637
 638static void enable_all_napi(struct adapter *adap)
 639{
 640        int i;
 641        for (i = 0; i < SGE_QSETS; i++)
 642                if (adap->sge.qs[i].adap)
 643                        napi_enable(&adap->sge.qs[i].napi);
 644}
 645
 646/**
 647 *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 648 *      @adap: the adapter
 649 *
 650 *      Determines how many sets of SGE queues to use and initializes them.
 651 *      We support multiple queue sets per port if we have MSI-X, otherwise
 652 *      just one queue set per port.
 653 */
 654static int setup_sge_qsets(struct adapter *adap)
 655{
 656        int i, j, err, irq_idx = 0, qset_idx = 0;
 657        unsigned int ntxq = SGE_TXQ_PER_SET;
 658
 659        if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 660                irq_idx = -1;
 661
 662        for_each_port(adap, i) {
 663                struct net_device *dev = adap->port[i];
 664                struct port_info *pi = netdev_priv(dev);
 665
 666                pi->qs = &adap->sge.qs[pi->first_qset];
 667                for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 668                        err = t3_sge_alloc_qset(adap, qset_idx, 1,
 669                                (adap->flags & USING_MSIX) ? qset_idx + 1 :
 670                                                             irq_idx,
 671                                &adap->params.sge.qset[qset_idx], ntxq, dev,
 672                                netdev_get_tx_queue(dev, j));
 673                        if (err) {
 674                                t3_free_sge_resources(adap);
 675                                return err;
 676                        }
 677                }
 678        }
 679
 680        return 0;
 681}
 682
 683static ssize_t attr_show(struct device *d, char *buf,
 684                         ssize_t(*format) (struct net_device *, char *))
 685{
 686        ssize_t len;
 687
 688        /* Synchronize with ioctls that may shut down the device */
 689        rtnl_lock();
 690        len = (*format) (to_net_dev(d), buf);
 691        rtnl_unlock();
 692        return len;
 693}
 694
 695static ssize_t attr_store(struct device *d,
 696                          const char *buf, size_t len,
 697                          ssize_t(*set) (struct net_device *, unsigned int),
 698                          unsigned int min_val, unsigned int max_val)
 699{
 700        ssize_t ret;
 701        unsigned int val;
 702
 703        if (!capable(CAP_NET_ADMIN))
 704                return -EPERM;
 705
 706        ret = kstrtouint(buf, 0, &val);
 707        if (ret)
 708                return ret;
 709        if (val < min_val || val > max_val)
 710                return -EINVAL;
 711
 712        rtnl_lock();
 713        ret = (*set) (to_net_dev(d), val);
 714        if (!ret)
 715                ret = len;
 716        rtnl_unlock();
 717        return ret;
 718}
 719
 720#define CXGB3_SHOW(name, val_expr) \
 721static ssize_t format_##name(struct net_device *dev, char *buf) \
 722{ \
 723        struct port_info *pi = netdev_priv(dev); \
 724        struct adapter *adap = pi->adapter; \
 725        return sprintf(buf, "%u\n", val_expr); \
 726} \
 727static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 728                           char *buf) \
 729{ \
 730        return attr_show(d, buf, format_##name); \
 731}
 732
 733static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 734{
 735        struct port_info *pi = netdev_priv(dev);
 736        struct adapter *adap = pi->adapter;
 737        int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 738
 739        if (adap->flags & FULL_INIT_DONE)
 740                return -EBUSY;
 741        if (val && adap->params.rev == 0)
 742                return -EINVAL;
 743        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 744            min_tids)
 745                return -EINVAL;
 746        adap->params.mc5.nfilters = val;
 747        return 0;
 748}
 749
 750static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 751                              const char *buf, size_t len)
 752{
 753        return attr_store(d, buf, len, set_nfilters, 0, ~0);
 754}
 755
 756static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 757{
 758        struct port_info *pi = netdev_priv(dev);
 759        struct adapter *adap = pi->adapter;
 760
 761        if (adap->flags & FULL_INIT_DONE)
 762                return -EBUSY;
 763        if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 764            MC5_MIN_TIDS)
 765                return -EINVAL;
 766        adap->params.mc5.nservers = val;
 767        return 0;
 768}
 769
 770static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 771                              const char *buf, size_t len)
 772{
 773        return attr_store(d, buf, len, set_nservers, 0, ~0);
 774}
 775
 776#define CXGB3_ATTR_R(name, val_expr) \
 777CXGB3_SHOW(name, val_expr) \
 778static DEVICE_ATTR(name, 0444, show_##name, NULL)
 779
 780#define CXGB3_ATTR_RW(name, val_expr, store_method) \
 781CXGB3_SHOW(name, val_expr) \
 782static DEVICE_ATTR(name, 0644, show_##name, store_method)
 783
 784CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 785CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 786CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 787
 788static struct attribute *cxgb3_attrs[] = {
 789        &dev_attr_cam_size.attr,
 790        &dev_attr_nfilters.attr,
 791        &dev_attr_nservers.attr,
 792        NULL
 793};
 794
 795static const struct attribute_group cxgb3_attr_group = {
 796        .attrs = cxgb3_attrs,
 797};
 798
 799static ssize_t tm_attr_show(struct device *d,
 800                            char *buf, int sched)
 801{
 802        struct port_info *pi = netdev_priv(to_net_dev(d));
 803        struct adapter *adap = pi->adapter;
 804        unsigned int v, addr, bpt, cpt;
 805        ssize_t len;
 806
 807        addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 808        rtnl_lock();
 809        t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 810        v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 811        if (sched & 1)
 812                v >>= 16;
 813        bpt = (v >> 8) & 0xff;
 814        cpt = v & 0xff;
 815        if (!cpt)
 816                len = sprintf(buf, "disabled\n");
 817        else {
 818                v = (adap->params.vpd.cclk * 1000) / cpt;
 819                len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 820        }
 821        rtnl_unlock();
 822        return len;
 823}
 824
 825static ssize_t tm_attr_store(struct device *d,
 826                             const char *buf, size_t len, int sched)
 827{
 828        struct port_info *pi = netdev_priv(to_net_dev(d));
 829        struct adapter *adap = pi->adapter;
 830        unsigned int val;
 831        ssize_t ret;
 832
 833        if (!capable(CAP_NET_ADMIN))
 834                return -EPERM;
 835
 836        ret = kstrtouint(buf, 0, &val);
 837        if (ret)
 838                return ret;
 839        if (val > 10000000)
 840                return -EINVAL;
 841
 842        rtnl_lock();
 843        ret = t3_config_sched(adap, val, sched);
 844        if (!ret)
 845                ret = len;
 846        rtnl_unlock();
 847        return ret;
 848}
 849
 850#define TM_ATTR(name, sched) \
 851static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 852                           char *buf) \
 853{ \
 854        return tm_attr_show(d, buf, sched); \
 855} \
 856static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 857                            const char *buf, size_t len) \
 858{ \
 859        return tm_attr_store(d, buf, len, sched); \
 860} \
 861static DEVICE_ATTR(name, 0644, show_##name, store_##name)
 862
 863TM_ATTR(sched0, 0);
 864TM_ATTR(sched1, 1);
 865TM_ATTR(sched2, 2);
 866TM_ATTR(sched3, 3);
 867TM_ATTR(sched4, 4);
 868TM_ATTR(sched5, 5);
 869TM_ATTR(sched6, 6);
 870TM_ATTR(sched7, 7);
 871
 872static struct attribute *offload_attrs[] = {
 873        &dev_attr_sched0.attr,
 874        &dev_attr_sched1.attr,
 875        &dev_attr_sched2.attr,
 876        &dev_attr_sched3.attr,
 877        &dev_attr_sched4.attr,
 878        &dev_attr_sched5.attr,
 879        &dev_attr_sched6.attr,
 880        &dev_attr_sched7.attr,
 881        NULL
 882};
 883
 884static const struct attribute_group offload_attr_group = {
 885        .attrs = offload_attrs,
 886};
 887
 888/*
 889 * Sends an sk_buff to an offload queue driver
 890 * after dealing with any active network taps.
 891 */
 892static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 893{
 894        int ret;
 895
 896        local_bh_disable();
 897        ret = t3_offload_tx(tdev, skb);
 898        local_bh_enable();
 899        return ret;
 900}
 901
 902static int write_smt_entry(struct adapter *adapter, int idx)
 903{
 904        struct cpl_smt_write_req *req;
 905        struct port_info *pi = netdev_priv(adapter->port[idx]);
 906        struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 907
 908        if (!skb)
 909                return -ENOMEM;
 910
 911        req = __skb_put(skb, sizeof(*req));
 912        req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 913        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 914        req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 915        req->iff = idx;
 916        memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 917        memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
 918        skb->priority = 1;
 919        offload_tx(&adapter->tdev, skb);
 920        return 0;
 921}
 922
 923static int init_smt(struct adapter *adapter)
 924{
 925        int i;
 926
 927        for_each_port(adapter, i)
 928            write_smt_entry(adapter, i);
 929        return 0;
 930}
 931
 932static void init_port_mtus(struct adapter *adapter)
 933{
 934        unsigned int mtus = adapter->port[0]->mtu;
 935
 936        if (adapter->port[1])
 937                mtus |= adapter->port[1]->mtu << 16;
 938        t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 939}
 940
 941static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 942                              int hi, int port)
 943{
 944        struct sk_buff *skb;
 945        struct mngt_pktsched_wr *req;
 946        int ret;
 947
 948        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 949        if (!skb)
 950                skb = adap->nofail_skb;
 951        if (!skb)
 952                return -ENOMEM;
 953
 954        req = skb_put(skb, sizeof(*req));
 955        req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 956        req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 957        req->sched = sched;
 958        req->idx = qidx;
 959        req->min = lo;
 960        req->max = hi;
 961        req->binding = port;
 962        ret = t3_mgmt_tx(adap, skb);
 963        if (skb == adap->nofail_skb) {
 964                adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
 965                                             GFP_KERNEL);
 966                if (!adap->nofail_skb)
 967                        ret = -ENOMEM;
 968        }
 969
 970        return ret;
 971}
 972
 973static int bind_qsets(struct adapter *adap)
 974{
 975        int i, j, err = 0;
 976
 977        for_each_port(adap, i) {
 978                const struct port_info *pi = adap2pinfo(adap, i);
 979
 980                for (j = 0; j < pi->nqsets; ++j) {
 981                        int ret = send_pktsched_cmd(adap, 1,
 982                                                    pi->first_qset + j, -1,
 983                                                    -1, i);
 984                        if (ret)
 985                                err = ret;
 986                }
 987        }
 988
 989        return err;
 990}
 991
 992#define FW_VERSION __stringify(FW_VERSION_MAJOR) "."                    \
 993        __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
 994#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
 995#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "."                \
 996        __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
 997#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
 998#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
 999#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1000#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1001MODULE_FIRMWARE(FW_FNAME);
1002MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1003MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1004MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1005MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1006MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
1007
1008static inline const char *get_edc_fw_name(int edc_idx)
1009{
1010        const char *fw_name = NULL;
1011
1012        switch (edc_idx) {
1013        case EDC_OPT_AEL2005:
1014                fw_name = AEL2005_OPT_EDC_NAME;
1015                break;
1016        case EDC_TWX_AEL2005:
1017                fw_name = AEL2005_TWX_EDC_NAME;
1018                break;
1019        case EDC_TWX_AEL2020:
1020                fw_name = AEL2020_TWX_EDC_NAME;
1021                break;
1022        }
1023        return fw_name;
1024}
1025
1026int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1027{
1028        struct adapter *adapter = phy->adapter;
1029        const struct firmware *fw;
1030        const char *fw_name;
1031        u32 csum;
1032        const __be32 *p;
1033        u16 *cache = phy->phy_cache;
1034        int i, ret = -EINVAL;
1035
1036        fw_name = get_edc_fw_name(edc_idx);
1037        if (fw_name)
1038                ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
1039        if (ret < 0) {
1040                dev_err(&adapter->pdev->dev,
1041                        "could not upgrade firmware: unable to load %s\n",
1042                        fw_name);
1043                return ret;
1044        }
1045
1046        /* check size, take checksum in account */
1047        if (fw->size > size + 4) {
1048                CH_ERR(adapter, "firmware image too large %u, expected %d\n",
1049                       (unsigned int)fw->size, size + 4);
1050                ret = -EINVAL;
1051        }
1052
1053        /* compute checksum */
1054        p = (const __be32 *)fw->data;
1055        for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
1056                csum += ntohl(p[i]);
1057
1058        if (csum != 0xffffffff) {
1059                CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1060                       csum);
1061                ret = -EINVAL;
1062        }
1063
1064        for (i = 0; i < size / 4 ; i++) {
1065                *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
1066                *cache++ = be32_to_cpu(p[i]) & 0xffff;
1067        }
1068
1069        release_firmware(fw);
1070
1071        return ret;
1072}
1073
1074static int upgrade_fw(struct adapter *adap)
1075{
1076        int ret;
1077        const struct firmware *fw;
1078        struct device *dev = &adap->pdev->dev;
1079
1080        ret = request_firmware(&fw, FW_FNAME, dev);
1081        if (ret < 0) {
1082                dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1083                        FW_FNAME);
1084                return ret;
1085        }
1086        ret = t3_load_fw(adap, fw->data, fw->size);
1087        release_firmware(fw);
1088
1089        if (ret == 0)
1090                dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
1091                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1092        else
1093                dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
1094                        FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
1095
1096        return ret;
1097}
1098
1099static inline char t3rev2char(struct adapter *adapter)
1100{
1101        char rev = 0;
1102
1103        switch(adapter->params.rev) {
1104        case T3_REV_B:
1105        case T3_REV_B2:
1106                rev = 'b';
1107                break;
1108        case T3_REV_C:
1109                rev = 'c';
1110                break;
1111        }
1112        return rev;
1113}
1114
1115static int update_tpsram(struct adapter *adap)
1116{
1117        const struct firmware *tpsram;
1118        char buf[64];
1119        struct device *dev = &adap->pdev->dev;
1120        int ret;
1121        char rev;
1122
1123        rev = t3rev2char(adap);
1124        if (!rev)
1125                return 0;
1126
1127        snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1128
1129        ret = request_firmware(&tpsram, buf, dev);
1130        if (ret < 0) {
1131                dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1132                        buf);
1133                return ret;
1134        }
1135
1136        ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1137        if (ret)
1138                goto release_tpsram;
1139
1140        ret = t3_set_proto_sram(adap, tpsram->data);
1141        if (ret == 0)
1142                dev_info(dev,
1143                         "successful update of protocol engine "
1144                         "to %d.%d.%d\n",
1145                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1146        else
1147                dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1148                        TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1149        if (ret)
1150                dev_err(dev, "loading protocol SRAM failed\n");
1151
1152release_tpsram:
1153        release_firmware(tpsram);
1154
1155        return ret;
1156}
1157
1158/**
1159 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1160 * @adap: the adapter
1161 * @p: the port
1162 *
1163 * Ensures that current Rx processing on any of the queues associated with
1164 * the given port completes before returning.  We do this by acquiring and
1165 * releasing the locks of the response queues associated with the port.
1166 */
1167static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1168{
1169        int i;
1170
1171        for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
1172                struct sge_rspq *q = &adap->sge.qs[i].rspq;
1173
1174                spin_lock_irq(&q->lock);
1175                spin_unlock_irq(&q->lock);
1176        }
1177}
1178
1179static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
1180{
1181        struct port_info *pi = netdev_priv(dev);
1182        struct adapter *adapter = pi->adapter;
1183
1184        if (adapter->params.rev > 0) {
1185                t3_set_vlan_accel(adapter, 1 << pi->port_id,
1186                                  features & NETIF_F_HW_VLAN_CTAG_RX);
1187        } else {
1188                /* single control for all ports */
1189                unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
1190
1191                for_each_port(adapter, i)
1192                        have_vlans |=
1193                                adapter->port[i]->features &
1194                                NETIF_F_HW_VLAN_CTAG_RX;
1195
1196                t3_set_vlan_accel(adapter, 1, have_vlans);
1197        }
1198        t3_synchronize_rx(adapter, pi);
1199}
1200
1201/**
1202 *      cxgb_up - enable the adapter
1203 *      @adap: adapter being enabled
1204 *
1205 *      Called when the first port is enabled, this function performs the
1206 *      actions necessary to make an adapter operational, such as completing
1207 *      the initialization of HW modules, and enabling interrupts.
1208 *
1209 *      Must be called with the rtnl lock held.
1210 */
1211static int cxgb_up(struct adapter *adap)
1212{
1213        int i, err;
1214
1215        if (!(adap->flags & FULL_INIT_DONE)) {
1216                err = t3_check_fw_version(adap);
1217                if (err == -EINVAL) {
1218                        err = upgrade_fw(adap);
1219                        CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1220                                FW_VERSION_MAJOR, FW_VERSION_MINOR,
1221                                FW_VERSION_MICRO, err ? "failed" : "succeeded");
1222                }
1223
1224                err = t3_check_tpsram_version(adap);
1225                if (err == -EINVAL) {
1226                        err = update_tpsram(adap);
1227                        CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1228                                TP_VERSION_MAJOR, TP_VERSION_MINOR,
1229                                TP_VERSION_MICRO, err ? "failed" : "succeeded");
1230                }
1231
1232                /*
1233                 * Clear interrupts now to catch errors if t3_init_hw fails.
1234                 * We clear them again later as initialization may trigger
1235                 * conditions that can interrupt.
1236                 */
1237                t3_intr_clear(adap);
1238
1239                err = t3_init_hw(adap, 0);
1240                if (err)
1241                        goto out;
1242
1243                t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1244                t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1245
1246                err = setup_sge_qsets(adap);
1247                if (err)
1248                        goto out;
1249
1250                for_each_port(adap, i)
1251                        cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
1252
1253                setup_rss(adap);
1254                if (!(adap->flags & NAPI_INIT))
1255                        init_napi(adap);
1256
1257                t3_start_sge_timers(adap);
1258                adap->flags |= FULL_INIT_DONE;
1259        }
1260
1261        t3_intr_clear(adap);
1262
1263        if (adap->flags & USING_MSIX) {
1264                name_msix_vecs(adap);
1265                err = request_irq(adap->msix_info[0].vec,
1266                                  t3_async_intr_handler, 0,
1267                                  adap->msix_info[0].desc, adap);
1268                if (err)
1269                        goto irq_err;
1270
1271                err = request_msix_data_irqs(adap);
1272                if (err) {
1273                        free_irq(adap->msix_info[0].vec, adap);
1274                        goto irq_err;
1275                }
1276        } else {
1277                err = request_irq(adap->pdev->irq,
1278                                  t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
1279                                  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
1280                                  adap->name, adap);
1281                if (err)
1282                        goto irq_err;
1283        }
1284
1285        enable_all_napi(adap);
1286        t3_sge_start(adap);
1287        t3_intr_enable(adap);
1288
1289        if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1290            is_offload(adap) && init_tp_parity(adap) == 0)
1291                adap->flags |= TP_PARITY_INIT;
1292
1293        if (adap->flags & TP_PARITY_INIT) {
1294                t3_write_reg(adap, A_TP_INT_CAUSE,
1295                             F_CMCACHEPERR | F_ARPLUTPERR);
1296                t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1297        }
1298
1299        if (!(adap->flags & QUEUES_BOUND)) {
1300                int ret = bind_qsets(adap);
1301
1302                if (ret < 0) {
1303                        CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
1304                        t3_intr_disable(adap);
1305                        free_irq_resources(adap);
1306                        err = ret;
1307                        goto out;
1308                }
1309                adap->flags |= QUEUES_BOUND;
1310        }
1311
1312out:
1313        return err;
1314irq_err:
1315        CH_ERR(adap, "request_irq failed, err %d\n", err);
1316        goto out;
1317}
1318
1319/*
1320 * Release resources when all the ports and offloading have been stopped.
1321 */
1322static void cxgb_down(struct adapter *adapter, int on_wq)
1323{
1324        t3_sge_stop(adapter);
1325        spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1326        t3_intr_disable(adapter);
1327        spin_unlock_irq(&adapter->work_lock);
1328
1329        free_irq_resources(adapter);
1330        quiesce_rx(adapter);
1331        t3_sge_stop(adapter);
1332        if (!on_wq)
1333                flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
1334}
1335
1336static void schedule_chk_task(struct adapter *adap)
1337{
1338        unsigned int timeo;
1339
1340        timeo = adap->params.linkpoll_period ?
1341            (HZ * adap->params.linkpoll_period) / 10 :
1342            adap->params.stats_update_period * HZ;
1343        if (timeo)
1344                queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1345}
1346
1347static int offload_open(struct net_device *dev)
1348{
1349        struct port_info *pi = netdev_priv(dev);
1350        struct adapter *adapter = pi->adapter;
1351        struct t3cdev *tdev = dev2t3cdev(dev);
1352        int adap_up = adapter->open_device_map & PORT_MASK;
1353        int err;
1354
1355        if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1356                return 0;
1357
1358        if (!adap_up && (err = cxgb_up(adapter)) < 0)
1359                goto out;
1360
1361        t3_tp_set_offload_mode(adapter, 1);
1362        tdev->lldev = adapter->port[0];
1363        err = cxgb3_offload_activate(adapter);
1364        if (err)
1365                goto out;
1366
1367        init_port_mtus(adapter);
1368        t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1369                     adapter->params.b_wnd,
1370                     adapter->params.rev == 0 ?
1371                     adapter->port[0]->mtu : 0xffff);
1372        init_smt(adapter);
1373
1374        if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1375                dev_dbg(&dev->dev, "cannot create sysfs group\n");
1376
1377        /* Call back all registered clients */
1378        cxgb3_add_clients(tdev);
1379
1380out:
1381        /* restore them in case the offload module has changed them */
1382        if (err) {
1383                t3_tp_set_offload_mode(adapter, 0);
1384                clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1385                cxgb3_set_dummy_ops(tdev);
1386        }
1387        return err;
1388}
1389
1390static int offload_close(struct t3cdev *tdev)
1391{
1392        struct adapter *adapter = tdev2adap(tdev);
1393        struct t3c_data *td = T3C_DATA(tdev);
1394
1395        if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1396                return 0;
1397
1398        /* Call back all registered clients */
1399        cxgb3_remove_clients(tdev);
1400
1401        sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1402
1403        /* Flush work scheduled while releasing TIDs */
1404        flush_work(&td->tid_release_task);
1405
1406        tdev->lldev = NULL;
1407        cxgb3_set_dummy_ops(tdev);
1408        t3_tp_set_offload_mode(adapter, 0);
1409        clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1410
1411        if (!adapter->open_device_map)
1412                cxgb_down(adapter, 0);
1413
1414        cxgb3_offload_deactivate(adapter);
1415        return 0;
1416}
1417
1418static int cxgb_open(struct net_device *dev)
1419{
1420        struct port_info *pi = netdev_priv(dev);
1421        struct adapter *adapter = pi->adapter;
1422        int other_ports = adapter->open_device_map & PORT_MASK;
1423        int err;
1424
1425        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1426                return err;
1427
1428        set_bit(pi->port_id, &adapter->open_device_map);
1429        if (is_offload(adapter) && !ofld_disable) {
1430                err = offload_open(dev);
1431                if (err)
1432                        pr_warn("Could not initialize offload capabilities\n");
1433        }
1434
1435        netif_set_real_num_tx_queues(dev, pi->nqsets);
1436        err = netif_set_real_num_rx_queues(dev, pi->nqsets);
1437        if (err)
1438                return err;
1439        link_start(dev);
1440        t3_port_intr_enable(adapter, pi->port_id);
1441        netif_tx_start_all_queues(dev);
1442        if (!other_ports)
1443                schedule_chk_task(adapter);
1444
1445        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1446        return 0;
1447}
1448
1449static int __cxgb_close(struct net_device *dev, int on_wq)
1450{
1451        struct port_info *pi = netdev_priv(dev);
1452        struct adapter *adapter = pi->adapter;
1453
1454        
1455        if (!adapter->open_device_map)
1456                return 0;
1457
1458        /* Stop link fault interrupts */
1459        t3_xgm_intr_disable(adapter, pi->port_id);
1460        t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1461
1462        t3_port_intr_disable(adapter, pi->port_id);
1463        netif_tx_stop_all_queues(dev);
1464        pi->phy.ops->power_down(&pi->phy, 1);
1465        netif_carrier_off(dev);
1466        t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1467
1468        spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1469        clear_bit(pi->port_id, &adapter->open_device_map);
1470        spin_unlock_irq(&adapter->work_lock);
1471
1472        if (!(adapter->open_device_map & PORT_MASK))
1473                cancel_delayed_work_sync(&adapter->adap_check_task);
1474
1475        if (!adapter->open_device_map)
1476                cxgb_down(adapter, on_wq);
1477
1478        cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1479        return 0;
1480}
1481
1482static int cxgb_close(struct net_device *dev)
1483{
1484        return __cxgb_close(dev, 0);
1485}
1486
1487static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1488{
1489        struct port_info *pi = netdev_priv(dev);
1490        struct adapter *adapter = pi->adapter;
1491        struct net_device_stats *ns = &dev->stats;
1492        const struct mac_stats *pstats;
1493
1494        spin_lock(&adapter->stats_lock);
1495        pstats = t3_mac_update_stats(&pi->mac);
1496        spin_unlock(&adapter->stats_lock);
1497
1498        ns->tx_bytes = pstats->tx_octets;
1499        ns->tx_packets = pstats->tx_frames;
1500        ns->rx_bytes = pstats->rx_octets;
1501        ns->rx_packets = pstats->rx_frames;
1502        ns->multicast = pstats->rx_mcast_frames;
1503
1504        ns->tx_errors = pstats->tx_underrun;
1505        ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1506            pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1507            pstats->rx_fifo_ovfl;
1508
1509        /* detailed rx_errors */
1510        ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1511        ns->rx_over_errors = 0;
1512        ns->rx_crc_errors = pstats->rx_fcs_errs;
1513        ns->rx_frame_errors = pstats->rx_symbol_errs;
1514        ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1515        ns->rx_missed_errors = pstats->rx_cong_drops;
1516
1517        /* detailed tx_errors */
1518        ns->tx_aborted_errors = 0;
1519        ns->tx_carrier_errors = 0;
1520        ns->tx_fifo_errors = pstats->tx_underrun;
1521        ns->tx_heartbeat_errors = 0;
1522        ns->tx_window_errors = 0;
1523        return ns;
1524}
1525
1526static u32 get_msglevel(struct net_device *dev)
1527{
1528        struct port_info *pi = netdev_priv(dev);
1529        struct adapter *adapter = pi->adapter;
1530
1531        return adapter->msg_enable;
1532}
1533
1534static void set_msglevel(struct net_device *dev, u32 val)
1535{
1536        struct port_info *pi = netdev_priv(dev);
1537        struct adapter *adapter = pi->adapter;
1538
1539        adapter->msg_enable = val;
1540}
1541
1542static const char stats_strings[][ETH_GSTRING_LEN] = {
1543        "TxOctetsOK         ",
1544        "TxFramesOK         ",
1545        "TxMulticastFramesOK",
1546        "TxBroadcastFramesOK",
1547        "TxPauseFrames      ",
1548        "TxUnderrun         ",
1549        "TxExtUnderrun      ",
1550
1551        "TxFrames64         ",
1552        "TxFrames65To127    ",
1553        "TxFrames128To255   ",
1554        "TxFrames256To511   ",
1555        "TxFrames512To1023  ",
1556        "TxFrames1024To1518 ",
1557        "TxFrames1519ToMax  ",
1558
1559        "RxOctetsOK         ",
1560        "RxFramesOK         ",
1561        "RxMulticastFramesOK",
1562        "RxBroadcastFramesOK",
1563        "RxPauseFrames      ",
1564        "RxFCSErrors        ",
1565        "RxSymbolErrors     ",
1566        "RxShortErrors      ",
1567        "RxJabberErrors     ",
1568        "RxLengthErrors     ",
1569        "RxFIFOoverflow     ",
1570
1571        "RxFrames64         ",
1572        "RxFrames65To127    ",
1573        "RxFrames128To255   ",
1574        "RxFrames256To511   ",
1575        "RxFrames512To1023  ",
1576        "RxFrames1024To1518 ",
1577        "RxFrames1519ToMax  ",
1578
1579        "PhyFIFOErrors      ",
1580        "TSO                ",
1581        "VLANextractions    ",
1582        "VLANinsertions     ",
1583        "TxCsumOffload      ",
1584        "RxCsumGood         ",
1585        "LroAggregated      ",
1586        "LroFlushed         ",
1587        "LroNoDesc          ",
1588        "RxDrops            ",
1589
1590        "CheckTXEnToggled   ",
1591        "CheckResets        ",
1592
1593        "LinkFaults         ",
1594};
1595
1596static int get_sset_count(struct net_device *dev, int sset)
1597{
1598        switch (sset) {
1599        case ETH_SS_STATS:
1600                return ARRAY_SIZE(stats_strings);
1601        default:
1602                return -EOPNOTSUPP;
1603        }
1604}
1605
1606#define T3_REGMAP_SIZE (3 * 1024)
1607
1608static int get_regs_len(struct net_device *dev)
1609{
1610        return T3_REGMAP_SIZE;
1611}
1612
1613static int get_eeprom_len(struct net_device *dev)
1614{
1615        return EEPROMSIZE;
1616}
1617
1618static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1619{
1620        struct port_info *pi = netdev_priv(dev);
1621        struct adapter *adapter = pi->adapter;
1622        u32 fw_vers = 0;
1623        u32 tp_vers = 0;
1624
1625        spin_lock(&adapter->stats_lock);
1626        t3_get_fw_version(adapter, &fw_vers);
1627        t3_get_tp_version(adapter, &tp_vers);
1628        spin_unlock(&adapter->stats_lock);
1629
1630        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1631        strlcpy(info->bus_info, pci_name(adapter->pdev),
1632                sizeof(info->bus_info));
1633        if (fw_vers)
1634                snprintf(info->fw_version, sizeof(info->fw_version),
1635                         "%s %u.%u.%u TP %u.%u.%u",
1636                         G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1637                         G_FW_VERSION_MAJOR(fw_vers),
1638                         G_FW_VERSION_MINOR(fw_vers),
1639                         G_FW_VERSION_MICRO(fw_vers),
1640                         G_TP_VERSION_MAJOR(tp_vers),
1641                         G_TP_VERSION_MINOR(tp_vers),
1642                         G_TP_VERSION_MICRO(tp_vers));
1643}
1644
1645static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1646{
1647        if (stringset == ETH_SS_STATS)
1648                memcpy(data, stats_strings, sizeof(stats_strings));
1649}
1650
1651static unsigned long collect_sge_port_stats(struct adapter *adapter,
1652                                            struct port_info *p, int idx)
1653{
1654        int i;
1655        unsigned long tot = 0;
1656
1657        for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1658                tot += adapter->sge.qs[i].port_stats[idx];
1659        return tot;
1660}
1661
1662static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1663                      u64 *data)
1664{
1665        struct port_info *pi = netdev_priv(dev);
1666        struct adapter *adapter = pi->adapter;
1667        const struct mac_stats *s;
1668
1669        spin_lock(&adapter->stats_lock);
1670        s = t3_mac_update_stats(&pi->mac);
1671        spin_unlock(&adapter->stats_lock);
1672
1673        *data++ = s->tx_octets;
1674        *data++ = s->tx_frames;
1675        *data++ = s->tx_mcast_frames;
1676        *data++ = s->tx_bcast_frames;
1677        *data++ = s->tx_pause;
1678        *data++ = s->tx_underrun;
1679        *data++ = s->tx_fifo_urun;
1680
1681        *data++ = s->tx_frames_64;
1682        *data++ = s->tx_frames_65_127;
1683        *data++ = s->tx_frames_128_255;
1684        *data++ = s->tx_frames_256_511;
1685        *data++ = s->tx_frames_512_1023;
1686        *data++ = s->tx_frames_1024_1518;
1687        *data++ = s->tx_frames_1519_max;
1688
1689        *data++ = s->rx_octets;
1690        *data++ = s->rx_frames;
1691        *data++ = s->rx_mcast_frames;
1692        *data++ = s->rx_bcast_frames;
1693        *data++ = s->rx_pause;
1694        *data++ = s->rx_fcs_errs;
1695        *data++ = s->rx_symbol_errs;
1696        *data++ = s->rx_short;
1697        *data++ = s->rx_jabber;
1698        *data++ = s->rx_too_long;
1699        *data++ = s->rx_fifo_ovfl;
1700
1701        *data++ = s->rx_frames_64;
1702        *data++ = s->rx_frames_65_127;
1703        *data++ = s->rx_frames_128_255;
1704        *data++ = s->rx_frames_256_511;
1705        *data++ = s->rx_frames_512_1023;
1706        *data++ = s->rx_frames_1024_1518;
1707        *data++ = s->rx_frames_1519_max;
1708
1709        *data++ = pi->phy.fifo_errors;
1710
1711        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1712        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1713        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1714        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1715        *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1716        *data++ = 0;
1717        *data++ = 0;
1718        *data++ = 0;
1719        *data++ = s->rx_cong_drops;
1720
1721        *data++ = s->num_toggled;
1722        *data++ = s->num_resets;
1723
1724        *data++ = s->link_faults;
1725}
1726
1727static inline void reg_block_dump(struct adapter *ap, void *buf,
1728                                  unsigned int start, unsigned int end)
1729{
1730        u32 *p = buf + start;
1731
1732        for (; start <= end; start += sizeof(u32))
1733                *p++ = t3_read_reg(ap, start);
1734}
1735
1736static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1737                     void *buf)
1738{
1739        struct port_info *pi = netdev_priv(dev);
1740        struct adapter *ap = pi->adapter;
1741
1742        /*
1743         * Version scheme:
1744         * bits 0..9: chip version
1745         * bits 10..15: chip revision
1746         * bit 31: set for PCIe cards
1747         */
1748        regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1749
1750        /*
1751         * We skip the MAC statistics registers because they are clear-on-read.
1752         * Also reading multi-register stats would need to synchronize with the
1753         * periodic mac stats accumulation.  Hard to justify the complexity.
1754         */
1755        memset(buf, 0, T3_REGMAP_SIZE);
1756        reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1757        reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1758        reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1759        reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1760        reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1761        reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1762                       XGM_REG(A_XGM_SERDES_STAT3, 1));
1763        reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1764                       XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1765}
1766
1767static int restart_autoneg(struct net_device *dev)
1768{
1769        struct port_info *p = netdev_priv(dev);
1770
1771        if (!netif_running(dev))
1772                return -EAGAIN;
1773        if (p->link_config.autoneg != AUTONEG_ENABLE)
1774                return -EINVAL;
1775        p->phy.ops->autoneg_restart(&p->phy);
1776        return 0;
1777}
1778
1779static int set_phys_id(struct net_device *dev,
1780                       enum ethtool_phys_id_state state)
1781{
1782        struct port_info *pi = netdev_priv(dev);
1783        struct adapter *adapter = pi->adapter;
1784
1785        switch (state) {
1786        case ETHTOOL_ID_ACTIVE:
1787                return 1;       /* cycle on/off once per second */
1788
1789        case ETHTOOL_ID_OFF:
1790                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
1791                break;
1792
1793        case ETHTOOL_ID_ON:
1794        case ETHTOOL_ID_INACTIVE:
1795                t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1796                         F_GPIO0_OUT_VAL);
1797        }
1798
1799        return 0;
1800}
1801
1802static int get_link_ksettings(struct net_device *dev,
1803                              struct ethtool_link_ksettings *cmd)
1804{
1805        struct port_info *p = netdev_priv(dev);
1806        u32 supported;
1807
1808        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1809                                                p->link_config.supported);
1810        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1811                                                p->link_config.advertising);
1812
1813        if (netif_carrier_ok(dev)) {
1814                cmd->base.speed = p->link_config.speed;
1815                cmd->base.duplex = p->link_config.duplex;
1816        } else {
1817                cmd->base.speed = SPEED_UNKNOWN;
1818                cmd->base.duplex = DUPLEX_UNKNOWN;
1819        }
1820
1821        ethtool_convert_link_mode_to_legacy_u32(&supported,
1822                                                cmd->link_modes.supported);
1823
1824        cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1825        cmd->base.phy_address = p->phy.mdio.prtad;
1826        cmd->base.autoneg = p->link_config.autoneg;
1827        return 0;
1828}
1829
1830static int speed_duplex_to_caps(int speed, int duplex)
1831{
1832        int cap = 0;
1833
1834        switch (speed) {
1835        case SPEED_10:
1836                if (duplex == DUPLEX_FULL)
1837                        cap = SUPPORTED_10baseT_Full;
1838                else
1839                        cap = SUPPORTED_10baseT_Half;
1840                break;
1841        case SPEED_100:
1842                if (duplex == DUPLEX_FULL)
1843                        cap = SUPPORTED_100baseT_Full;
1844                else
1845                        cap = SUPPORTED_100baseT_Half;
1846                break;
1847        case SPEED_1000:
1848                if (duplex == DUPLEX_FULL)
1849                        cap = SUPPORTED_1000baseT_Full;
1850                else
1851                        cap = SUPPORTED_1000baseT_Half;
1852                break;
1853        case SPEED_10000:
1854                if (duplex == DUPLEX_FULL)
1855                        cap = SUPPORTED_10000baseT_Full;
1856        }
1857        return cap;
1858}
1859
1860#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1861                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1862                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1863                      ADVERTISED_10000baseT_Full)
1864
1865static int set_link_ksettings(struct net_device *dev,
1866                              const struct ethtool_link_ksettings *cmd)
1867{
1868        struct port_info *p = netdev_priv(dev);
1869        struct link_config *lc = &p->link_config;
1870        u32 advertising;
1871
1872        ethtool_convert_link_mode_to_legacy_u32(&advertising,
1873                                                cmd->link_modes.advertising);
1874
1875        if (!(lc->supported & SUPPORTED_Autoneg)) {
1876                /*
1877                 * PHY offers a single speed/duplex.  See if that's what's
1878                 * being requested.
1879                 */
1880                if (cmd->base.autoneg == AUTONEG_DISABLE) {
1881                        u32 speed = cmd->base.speed;
1882                        int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1883                        if (lc->supported & cap)
1884                                return 0;
1885                }
1886                return -EINVAL;
1887        }
1888
1889        if (cmd->base.autoneg == AUTONEG_DISABLE) {
1890                u32 speed = cmd->base.speed;
1891                int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
1892
1893                if (!(lc->supported & cap) || (speed == SPEED_1000))
1894                        return -EINVAL;
1895                lc->requested_speed = speed;
1896                lc->requested_duplex = cmd->base.duplex;
1897                lc->advertising = 0;
1898        } else {
1899                advertising &= ADVERTISED_MASK;
1900                advertising &= lc->supported;
1901                if (!advertising)
1902                        return -EINVAL;
1903                lc->requested_speed = SPEED_INVALID;
1904                lc->requested_duplex = DUPLEX_INVALID;
1905                lc->advertising = advertising | ADVERTISED_Autoneg;
1906        }
1907        lc->autoneg = cmd->base.autoneg;
1908        if (netif_running(dev))
1909                t3_link_start(&p->phy, &p->mac, lc);
1910        return 0;
1911}
1912
1913static void get_pauseparam(struct net_device *dev,
1914                           struct ethtool_pauseparam *epause)
1915{
1916        struct port_info *p = netdev_priv(dev);
1917
1918        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1919        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1920        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1921}
1922
1923static int set_pauseparam(struct net_device *dev,
1924                          struct ethtool_pauseparam *epause)
1925{
1926        struct port_info *p = netdev_priv(dev);
1927        struct link_config *lc = &p->link_config;
1928
1929        if (epause->autoneg == AUTONEG_DISABLE)
1930                lc->requested_fc = 0;
1931        else if (lc->supported & SUPPORTED_Autoneg)
1932                lc->requested_fc = PAUSE_AUTONEG;
1933        else
1934                return -EINVAL;
1935
1936        if (epause->rx_pause)
1937                lc->requested_fc |= PAUSE_RX;
1938        if (epause->tx_pause)
1939                lc->requested_fc |= PAUSE_TX;
1940        if (lc->autoneg == AUTONEG_ENABLE) {
1941                if (netif_running(dev))
1942                        t3_link_start(&p->phy, &p->mac, lc);
1943        } else {
1944                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1945                if (netif_running(dev))
1946                        t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1947        }
1948        return 0;
1949}
1950
1951static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1952                          struct kernel_ethtool_ringparam *kernel_e,
1953                          struct netlink_ext_ack *extack)
1954{
1955        struct port_info *pi = netdev_priv(dev);
1956        struct adapter *adapter = pi->adapter;
1957        const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1958
1959        e->rx_max_pending = MAX_RX_BUFFERS;
1960        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1961        e->tx_max_pending = MAX_TXQ_ENTRIES;
1962
1963        e->rx_pending = q->fl_size;
1964        e->rx_mini_pending = q->rspq_size;
1965        e->rx_jumbo_pending = q->jumbo_size;
1966        e->tx_pending = q->txq_size[0];
1967}
1968
1969static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
1970                         struct kernel_ethtool_ringparam *kernel_e,
1971                         struct netlink_ext_ack *extack)
1972{
1973        struct port_info *pi = netdev_priv(dev);
1974        struct adapter *adapter = pi->adapter;
1975        struct qset_params *q;
1976        int i;
1977
1978        if (e->rx_pending > MAX_RX_BUFFERS ||
1979            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1980            e->tx_pending > MAX_TXQ_ENTRIES ||
1981            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1982            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1983            e->rx_pending < MIN_FL_ENTRIES ||
1984            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1985            e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1986                return -EINVAL;
1987
1988        if (adapter->flags & FULL_INIT_DONE)
1989                return -EBUSY;
1990
1991        q = &adapter->params.sge.qset[pi->first_qset];
1992        for (i = 0; i < pi->nqsets; ++i, ++q) {
1993                q->rspq_size = e->rx_mini_pending;
1994                q->fl_size = e->rx_pending;
1995                q->jumbo_size = e->rx_jumbo_pending;
1996                q->txq_size[0] = e->tx_pending;
1997                q->txq_size[1] = e->tx_pending;
1998                q->txq_size[2] = e->tx_pending;
1999        }
2000        return 0;
2001}
2002
2003static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2004                        struct kernel_ethtool_coalesce *kernel_coal,
2005                        struct netlink_ext_ack *extack)
2006{
2007        struct port_info *pi = netdev_priv(dev);
2008        struct adapter *adapter = pi->adapter;
2009        struct qset_params *qsp;
2010        struct sge_qset *qs;
2011        int i;
2012
2013        if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
2014                return -EINVAL;
2015
2016        for (i = 0; i < pi->nqsets; i++) {
2017                qsp = &adapter->params.sge.qset[i];
2018                qs = &adapter->sge.qs[i];
2019                qsp->coalesce_usecs = c->rx_coalesce_usecs;
2020                t3_update_qset_coalesce(qs, qsp);
2021        }
2022
2023        return 0;
2024}
2025
2026static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
2027                        struct kernel_ethtool_coalesce *kernel_coal,
2028                        struct netlink_ext_ack *extack)
2029{
2030        struct port_info *pi = netdev_priv(dev);
2031        struct adapter *adapter = pi->adapter;
2032        struct qset_params *q = adapter->params.sge.qset;
2033
2034        c->rx_coalesce_usecs = q->coalesce_usecs;
2035        return 0;
2036}
2037
2038static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2039                      u8 * data)
2040{
2041        struct port_info *pi = netdev_priv(dev);
2042        struct adapter *adapter = pi->adapter;
2043        int cnt;
2044
2045        e->magic = EEPROM_MAGIC;
2046        cnt = pci_read_vpd(adapter->pdev, e->offset, e->len, data);
2047        if (cnt < 0)
2048                return cnt;
2049
2050        e->len = cnt;
2051
2052        return 0;
2053}
2054
2055static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2056                      u8 * data)
2057{
2058        struct port_info *pi = netdev_priv(dev);
2059        struct adapter *adapter = pi->adapter;
2060        u32 aligned_offset, aligned_len;
2061        u8 *buf;
2062        int err;
2063
2064        if (eeprom->magic != EEPROM_MAGIC)
2065                return -EINVAL;
2066
2067        aligned_offset = eeprom->offset & ~3;
2068        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2069
2070        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2071                buf = kmalloc(aligned_len, GFP_KERNEL);
2072                if (!buf)
2073                        return -ENOMEM;
2074                err = pci_read_vpd(adapter->pdev, aligned_offset, aligned_len,
2075                                   buf);
2076                if (err < 0)
2077                        goto out;
2078                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2079        } else
2080                buf = data;
2081
2082        err = t3_seeprom_wp(adapter, 0);
2083        if (err)
2084                goto out;
2085
2086        err = pci_write_vpd(adapter->pdev, aligned_offset, aligned_len, buf);
2087        if (err >= 0)
2088                err = t3_seeprom_wp(adapter, 1);
2089out:
2090        if (buf != data)
2091                kfree(buf);
2092        return err < 0 ? err : 0;
2093}
2094
2095static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2096{
2097        wol->supported = 0;
2098        wol->wolopts = 0;
2099        memset(&wol->sopass, 0, sizeof(wol->sopass));
2100}
2101
2102static const struct ethtool_ops cxgb_ethtool_ops = {
2103        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
2104        .get_drvinfo = get_drvinfo,
2105        .get_msglevel = get_msglevel,
2106        .set_msglevel = set_msglevel,
2107        .get_ringparam = get_sge_param,
2108        .set_ringparam = set_sge_param,
2109        .get_coalesce = get_coalesce,
2110        .set_coalesce = set_coalesce,
2111        .get_eeprom_len = get_eeprom_len,
2112        .get_eeprom = get_eeprom,
2113        .set_eeprom = set_eeprom,
2114        .get_pauseparam = get_pauseparam,
2115        .set_pauseparam = set_pauseparam,
2116        .get_link = ethtool_op_get_link,
2117        .get_strings = get_strings,
2118        .set_phys_id = set_phys_id,
2119        .nway_reset = restart_autoneg,
2120        .get_sset_count = get_sset_count,
2121        .get_ethtool_stats = get_stats,
2122        .get_regs_len = get_regs_len,
2123        .get_regs = get_regs,
2124        .get_wol = get_wol,
2125        .get_link_ksettings = get_link_ksettings,
2126        .set_link_ksettings = set_link_ksettings,
2127};
2128
2129static int in_range(int val, int lo, int hi)
2130{
2131        return val < 0 || (val <= hi && val >= lo);
2132}
2133
2134static int cxgb_siocdevprivate(struct net_device *dev,
2135                               struct ifreq *ifreq,
2136                               void __user *useraddr,
2137                               int cmd)
2138{
2139        struct port_info *pi = netdev_priv(dev);
2140        struct adapter *adapter = pi->adapter;
2141        int ret;
2142
2143        if (cmd != SIOCCHIOCTL)
2144                return -EOPNOTSUPP;
2145
2146        if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
2147                return -EFAULT;
2148
2149        switch (cmd) {
2150        case CHELSIO_SET_QSET_PARAMS:{
2151                int i;
2152                struct qset_params *q;
2153                struct ch_qset_params t;
2154                int q1 = pi->first_qset;
2155                int nqsets = pi->nqsets;
2156
2157                if (!capable(CAP_NET_ADMIN))
2158                        return -EPERM;
2159                if (copy_from_user(&t, useraddr, sizeof(t)))
2160                        return -EFAULT;
2161                if (t.cmd != CHELSIO_SET_QSET_PARAMS)
2162                        return -EINVAL;
2163                if (t.qset_idx >= SGE_QSETS)
2164                        return -EINVAL;
2165                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2166                    !in_range(t.cong_thres, 0, 255) ||
2167                    !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2168                              MAX_TXQ_ENTRIES) ||
2169                    !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2170                              MAX_TXQ_ENTRIES) ||
2171                    !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2172                              MAX_CTRL_TXQ_ENTRIES) ||
2173                    !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2174                              MAX_RX_BUFFERS) ||
2175                    !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2176                              MAX_RX_JUMBO_BUFFERS) ||
2177                    !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2178                              MAX_RSPQ_ENTRIES))
2179                        return -EINVAL;
2180
2181                if ((adapter->flags & FULL_INIT_DONE) &&
2182                        (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2183                        t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2184                        t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2185                        t.polling >= 0 || t.cong_thres >= 0))
2186                        return -EBUSY;
2187
2188                /* Allow setting of any available qset when offload enabled */
2189                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2190                        q1 = 0;
2191                        for_each_port(adapter, i) {
2192                                pi = adap2pinfo(adapter, i);
2193                                nqsets += pi->first_qset + pi->nqsets;
2194                        }
2195                }
2196
2197                if (t.qset_idx < q1)
2198                        return -EINVAL;
2199                if (t.qset_idx > q1 + nqsets - 1)
2200                        return -EINVAL;
2201
2202                q = &adapter->params.sge.qset[t.qset_idx];
2203
2204                if (t.rspq_size >= 0)
2205                        q->rspq_size = t.rspq_size;
2206                if (t.fl_size[0] >= 0)
2207                        q->fl_size = t.fl_size[0];
2208                if (t.fl_size[1] >= 0)
2209                        q->jumbo_size = t.fl_size[1];
2210                if (t.txq_size[0] >= 0)
2211                        q->txq_size[0] = t.txq_size[0];
2212                if (t.txq_size[1] >= 0)
2213                        q->txq_size[1] = t.txq_size[1];
2214                if (t.txq_size[2] >= 0)
2215                        q->txq_size[2] = t.txq_size[2];
2216                if (t.cong_thres >= 0)
2217                        q->cong_thres = t.cong_thres;
2218                if (t.intr_lat >= 0) {
2219                        struct sge_qset *qs =
2220                                &adapter->sge.qs[t.qset_idx];
2221
2222                        q->coalesce_usecs = t.intr_lat;
2223                        t3_update_qset_coalesce(qs, q);
2224                }
2225                if (t.polling >= 0) {
2226                        if (adapter->flags & USING_MSIX)
2227                                q->polling = t.polling;
2228                        else {
2229                                /* No polling with INTx for T3A */
2230                                if (adapter->params.rev == 0 &&
2231                                        !(adapter->flags & USING_MSI))
2232                                        t.polling = 0;
2233
2234                                for (i = 0; i < SGE_QSETS; i++) {
2235                                        q = &adapter->params.sge.
2236                                                qset[i];
2237                                        q->polling = t.polling;
2238                                }
2239                        }
2240                }
2241
2242                if (t.lro >= 0) {
2243                        if (t.lro)
2244                                dev->wanted_features |= NETIF_F_GRO;
2245                        else
2246                                dev->wanted_features &= ~NETIF_F_GRO;
2247                        netdev_update_features(dev);
2248                }
2249
2250                break;
2251        }
2252        case CHELSIO_GET_QSET_PARAMS:{
2253                struct qset_params *q;
2254                struct ch_qset_params t;
2255                int q1 = pi->first_qset;
2256                int nqsets = pi->nqsets;
2257                int i;
2258
2259                if (copy_from_user(&t, useraddr, sizeof(t)))
2260                        return -EFAULT;
2261
2262                if (t.cmd != CHELSIO_GET_QSET_PARAMS)
2263                        return -EINVAL;
2264
2265                /* Display qsets for all ports when offload enabled */
2266                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2267                        q1 = 0;
2268                        for_each_port(adapter, i) {
2269                                pi = adap2pinfo(adapter, i);
2270                                nqsets = pi->first_qset + pi->nqsets;
2271                        }
2272                }
2273
2274                if (t.qset_idx >= nqsets)
2275                        return -EINVAL;
2276                t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2277
2278                q = &adapter->params.sge.qset[q1 + t.qset_idx];
2279                t.rspq_size = q->rspq_size;
2280                t.txq_size[0] = q->txq_size[0];
2281                t.txq_size[1] = q->txq_size[1];
2282                t.txq_size[2] = q->txq_size[2];
2283                t.fl_size[0] = q->fl_size;
2284                t.fl_size[1] = q->jumbo_size;
2285                t.polling = q->polling;
2286                t.lro = !!(dev->features & NETIF_F_GRO);
2287                t.intr_lat = q->coalesce_usecs;
2288                t.cong_thres = q->cong_thres;
2289                t.qnum = q1;
2290
2291                if (adapter->flags & USING_MSIX)
2292                        t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2293                else
2294                        t.vector = adapter->pdev->irq;
2295
2296                if (copy_to_user(useraddr, &t, sizeof(t)))
2297                        return -EFAULT;
2298                break;
2299        }
2300        case CHELSIO_SET_QSET_NUM:{
2301                struct ch_reg edata;
2302                unsigned int i, first_qset = 0, other_qsets = 0;
2303
2304                if (!capable(CAP_NET_ADMIN))
2305                        return -EPERM;
2306                if (adapter->flags & FULL_INIT_DONE)
2307                        return -EBUSY;
2308                if (copy_from_user(&edata, useraddr, sizeof(edata)))
2309                        return -EFAULT;
2310                if (edata.cmd != CHELSIO_SET_QSET_NUM)
2311                        return -EINVAL;
2312                if (edata.val < 1 ||
2313                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2314                        return -EINVAL;
2315
2316                for_each_port(adapter, i)
2317                        if (adapter->port[i] && adapter->port[i] != dev)
2318                                other_qsets += adap2pinfo(adapter, i)->nqsets;
2319
2320                if (edata.val + other_qsets > SGE_QSETS)
2321                        return -EINVAL;
2322
2323                pi->nqsets = edata.val;
2324
2325                for_each_port(adapter, i)
2326                        if (adapter->port[i]) {
2327                                pi = adap2pinfo(adapter, i);
2328                                pi->first_qset = first_qset;
2329                                first_qset += pi->nqsets;
2330                        }
2331                break;
2332        }
2333        case CHELSIO_GET_QSET_NUM:{
2334                struct ch_reg edata;
2335
2336                memset(&edata, 0, sizeof(struct ch_reg));
2337
2338                edata.cmd = CHELSIO_GET_QSET_NUM;
2339                edata.val = pi->nqsets;
2340                if (copy_to_user(useraddr, &edata, sizeof(edata)))
2341                        return -EFAULT;
2342                break;
2343        }
2344        case CHELSIO_LOAD_FW:{
2345                u8 *fw_data;
2346                struct ch_mem_range t;
2347
2348                if (!capable(CAP_SYS_RAWIO))
2349                        return -EPERM;
2350                if (copy_from_user(&t, useraddr, sizeof(t)))
2351                        return -EFAULT;
2352                if (t.cmd != CHELSIO_LOAD_FW)
2353                        return -EINVAL;
2354                /* Check t.len sanity ? */
2355                fw_data = memdup_user(useraddr + sizeof(t), t.len);
2356                if (IS_ERR(fw_data))
2357                        return PTR_ERR(fw_data);
2358
2359                ret = t3_load_fw(adapter, fw_data, t.len);
2360                kfree(fw_data);
2361                if (ret)
2362                        return ret;
2363                break;
2364        }
2365        case CHELSIO_SETMTUTAB:{
2366                struct ch_mtus m;
2367                int i;
2368
2369                if (!is_offload(adapter))
2370                        return -EOPNOTSUPP;
2371                if (!capable(CAP_NET_ADMIN))
2372                        return -EPERM;
2373                if (offload_running(adapter))
2374                        return -EBUSY;
2375                if (copy_from_user(&m, useraddr, sizeof(m)))
2376                        return -EFAULT;
2377                if (m.cmd != CHELSIO_SETMTUTAB)
2378                        return -EINVAL;
2379                if (m.nmtus != NMTUS)
2380                        return -EINVAL;
2381                if (m.mtus[0] < 81)     /* accommodate SACK */
2382                        return -EINVAL;
2383
2384                /* MTUs must be in ascending order */
2385                for (i = 1; i < NMTUS; ++i)
2386                        if (m.mtus[i] < m.mtus[i - 1])
2387                                return -EINVAL;
2388
2389                memcpy(adapter->params.mtus, m.mtus,
2390                        sizeof(adapter->params.mtus));
2391                break;
2392        }
2393        case CHELSIO_GET_PM:{
2394                struct tp_params *p = &adapter->params.tp;
2395                struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2396
2397                if (!is_offload(adapter))
2398                        return -EOPNOTSUPP;
2399                m.tx_pg_sz = p->tx_pg_size;
2400                m.tx_num_pg = p->tx_num_pgs;
2401                m.rx_pg_sz = p->rx_pg_size;
2402                m.rx_num_pg = p->rx_num_pgs;
2403                m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2404                if (copy_to_user(useraddr, &m, sizeof(m)))
2405                        return -EFAULT;
2406                break;
2407        }
2408        case CHELSIO_SET_PM:{
2409                struct ch_pm m;
2410                struct tp_params *p = &adapter->params.tp;
2411
2412                if (!is_offload(adapter))
2413                        return -EOPNOTSUPP;
2414                if (!capable(CAP_NET_ADMIN))
2415                        return -EPERM;
2416                if (adapter->flags & FULL_INIT_DONE)
2417                        return -EBUSY;
2418                if (copy_from_user(&m, useraddr, sizeof(m)))
2419                        return -EFAULT;
2420                if (m.cmd != CHELSIO_SET_PM)
2421                        return -EINVAL;
2422                if (!is_power_of_2(m.rx_pg_sz) ||
2423                        !is_power_of_2(m.tx_pg_sz))
2424                        return -EINVAL; /* not power of 2 */
2425                if (!(m.rx_pg_sz & 0x14000))
2426                        return -EINVAL; /* not 16KB or 64KB */
2427                if (!(m.tx_pg_sz & 0x1554000))
2428                        return -EINVAL;
2429                if (m.tx_num_pg == -1)
2430                        m.tx_num_pg = p->tx_num_pgs;
2431                if (m.rx_num_pg == -1)
2432                        m.rx_num_pg = p->rx_num_pgs;
2433                if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2434                        return -EINVAL;
2435                if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2436                        m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2437                        return -EINVAL;
2438                p->rx_pg_size = m.rx_pg_sz;
2439                p->tx_pg_size = m.tx_pg_sz;
2440                p->rx_num_pgs = m.rx_num_pg;
2441                p->tx_num_pgs = m.tx_num_pg;
2442                break;
2443        }
2444        case CHELSIO_GET_MEM:{
2445                struct ch_mem_range t;
2446                struct mc7 *mem;
2447                u64 buf[32];
2448
2449                if (!is_offload(adapter))
2450                        return -EOPNOTSUPP;
2451                if (!capable(CAP_NET_ADMIN))
2452                        return -EPERM;
2453                if (!(adapter->flags & FULL_INIT_DONE))
2454                        return -EIO;    /* need the memory controllers */
2455                if (copy_from_user(&t, useraddr, sizeof(t)))
2456                        return -EFAULT;
2457                if (t.cmd != CHELSIO_GET_MEM)
2458                        return -EINVAL;
2459                if ((t.addr & 7) || (t.len & 7))
2460                        return -EINVAL;
2461                if (t.mem_id == MEM_CM)
2462                        mem = &adapter->cm;
2463                else if (t.mem_id == MEM_PMRX)
2464                        mem = &adapter->pmrx;
2465                else if (t.mem_id == MEM_PMTX)
2466                        mem = &adapter->pmtx;
2467                else
2468                        return -EINVAL;
2469
2470                /*
2471                 * Version scheme:
2472                 * bits 0..9: chip version
2473                 * bits 10..15: chip revision
2474                 */
2475                t.version = 3 | (adapter->params.rev << 10);
2476                if (copy_to_user(useraddr, &t, sizeof(t)))
2477                        return -EFAULT;
2478
2479                /*
2480                 * Read 256 bytes at a time as len can be large and we don't
2481                 * want to use huge intermediate buffers.
2482                 */
2483                useraddr += sizeof(t);  /* advance to start of buffer */
2484                while (t.len) {
2485                        unsigned int chunk =
2486                                min_t(unsigned int, t.len, sizeof(buf));
2487
2488                        ret =
2489                                t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2490                                                buf);
2491                        if (ret)
2492                                return ret;
2493                        if (copy_to_user(useraddr, buf, chunk))
2494                                return -EFAULT;
2495                        useraddr += chunk;
2496                        t.addr += chunk;
2497                        t.len -= chunk;
2498                }
2499                break;
2500        }
2501        case CHELSIO_SET_TRACE_FILTER:{
2502                struct ch_trace t;
2503                const struct trace_params *tp;
2504
2505                if (!capable(CAP_NET_ADMIN))
2506                        return -EPERM;
2507                if (!offload_running(adapter))
2508                        return -EAGAIN;
2509                if (copy_from_user(&t, useraddr, sizeof(t)))
2510                        return -EFAULT;
2511                if (t.cmd != CHELSIO_SET_TRACE_FILTER)
2512                        return -EINVAL;
2513
2514                tp = (const struct trace_params *)&t.sip;
2515                if (t.config_tx)
2516                        t3_config_trace_filter(adapter, tp, 0,
2517                                                t.invert_match,
2518                                                t.trace_tx);
2519                if (t.config_rx)
2520                        t3_config_trace_filter(adapter, tp, 1,
2521                                                t.invert_match,
2522                                                t.trace_rx);
2523                break;
2524        }
2525        default:
2526                return -EOPNOTSUPP;
2527        }
2528        return 0;
2529}
2530
2531static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2532{
2533        struct mii_ioctl_data *data = if_mii(req);
2534        struct port_info *pi = netdev_priv(dev);
2535        struct adapter *adapter = pi->adapter;
2536
2537        switch (cmd) {
2538        case SIOCGMIIREG:
2539        case SIOCSMIIREG:
2540                /* Convert phy_id from older PRTAD/DEVAD format */
2541                if (is_10G(adapter) &&
2542                    !mdio_phy_id_is_c45(data->phy_id) &&
2543                    (data->phy_id & 0x1f00) &&
2544                    !(data->phy_id & 0xe0e0))
2545                        data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2546                                                       data->phy_id & 0x1f);
2547                fallthrough;
2548        case SIOCGMIIPHY:
2549                return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2550        default:
2551                return -EOPNOTSUPP;
2552        }
2553}
2554
2555static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2556{
2557        struct port_info *pi = netdev_priv(dev);
2558        struct adapter *adapter = pi->adapter;
2559        int ret;
2560
2561        if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2562                return ret;
2563        dev->mtu = new_mtu;
2564        init_port_mtus(adapter);
2565        if (adapter->params.rev == 0 && offload_running(adapter))
2566                t3_load_mtus(adapter, adapter->params.mtus,
2567                             adapter->params.a_wnd, adapter->params.b_wnd,
2568                             adapter->port[0]->mtu);
2569        return 0;
2570}
2571
2572static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2573{
2574        struct port_info *pi = netdev_priv(dev);
2575        struct adapter *adapter = pi->adapter;
2576        struct sockaddr *addr = p;
2577
2578        if (!is_valid_ether_addr(addr->sa_data))
2579                return -EADDRNOTAVAIL;
2580
2581        eth_hw_addr_set(dev, addr->sa_data);
2582        t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2583        if (offload_running(adapter))
2584                write_smt_entry(adapter, pi->port_id);
2585        return 0;
2586}
2587
2588static netdev_features_t cxgb_fix_features(struct net_device *dev,
2589        netdev_features_t features)
2590{
2591        /*
2592         * Since there is no support for separate rx/tx vlan accel
2593         * enable/disable make sure tx flag is always in same state as rx.
2594         */
2595        if (features & NETIF_F_HW_VLAN_CTAG_RX)
2596                features |= NETIF_F_HW_VLAN_CTAG_TX;
2597        else
2598                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
2599
2600        return features;
2601}
2602
2603static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2604{
2605        netdev_features_t changed = dev->features ^ features;
2606
2607        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
2608                cxgb_vlan_mode(dev, features);
2609
2610        return 0;
2611}
2612
2613#ifdef CONFIG_NET_POLL_CONTROLLER
2614static void cxgb_netpoll(struct net_device *dev)
2615{
2616        struct port_info *pi = netdev_priv(dev);
2617        struct adapter *adapter = pi->adapter;
2618        int qidx;
2619
2620        for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2621                struct sge_qset *qs = &adapter->sge.qs[qidx];
2622                void *source;
2623
2624                if (adapter->flags & USING_MSIX)
2625                        source = qs;
2626                else
2627                        source = adapter;
2628
2629                t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2630        }
2631}
2632#endif
2633
2634/*
2635 * Periodic accumulation of MAC statistics.
2636 */
2637static void mac_stats_update(struct adapter *adapter)
2638{
2639        int i;
2640
2641        for_each_port(adapter, i) {
2642                struct net_device *dev = adapter->port[i];
2643                struct port_info *p = netdev_priv(dev);
2644
2645                if (netif_running(dev)) {
2646                        spin_lock(&adapter->stats_lock);
2647                        t3_mac_update_stats(&p->mac);
2648                        spin_unlock(&adapter->stats_lock);
2649                }
2650        }
2651}
2652
2653static void check_link_status(struct adapter *adapter)
2654{
2655        int i;
2656
2657        for_each_port(adapter, i) {
2658                struct net_device *dev = adapter->port[i];
2659                struct port_info *p = netdev_priv(dev);
2660                int link_fault;
2661
2662                spin_lock_irq(&adapter->work_lock);
2663                link_fault = p->link_fault;
2664                spin_unlock_irq(&adapter->work_lock);
2665
2666                if (link_fault) {
2667                        t3_link_fault(adapter, i);
2668                        continue;
2669                }
2670
2671                if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2672                        t3_xgm_intr_disable(adapter, i);
2673                        t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2674
2675                        t3_link_changed(adapter, i);
2676                        t3_xgm_intr_enable(adapter, i);
2677                }
2678        }
2679}
2680
2681static void check_t3b2_mac(struct adapter *adapter)
2682{
2683        int i;
2684
2685        if (!rtnl_trylock())    /* synchronize with ifdown */
2686                return;
2687
2688        for_each_port(adapter, i) {
2689                struct net_device *dev = adapter->port[i];
2690                struct port_info *p = netdev_priv(dev);
2691                int status;
2692
2693                if (!netif_running(dev))
2694                        continue;
2695
2696                status = 0;
2697                if (netif_running(dev) && netif_carrier_ok(dev))
2698                        status = t3b2_mac_watchdog_task(&p->mac);
2699                if (status == 1)
2700                        p->mac.stats.num_toggled++;
2701                else if (status == 2) {
2702                        struct cmac *mac = &p->mac;
2703
2704                        t3_mac_set_mtu(mac, dev->mtu);
2705                        t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2706                        cxgb_set_rxmode(dev);
2707                        t3_link_start(&p->phy, mac, &p->link_config);
2708                        t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2709                        t3_port_intr_enable(adapter, p->port_id);
2710                        p->mac.stats.num_resets++;
2711                }
2712        }
2713        rtnl_unlock();
2714}
2715
2716
2717static void t3_adap_check_task(struct work_struct *work)
2718{
2719        struct adapter *adapter = container_of(work, struct adapter,
2720                                               adap_check_task.work);
2721        const struct adapter_params *p = &adapter->params;
2722        int port;
2723        unsigned int v, status, reset;
2724
2725        adapter->check_task_cnt++;
2726
2727        check_link_status(adapter);
2728
2729        /* Accumulate MAC stats if needed */
2730        if (!p->linkpoll_period ||
2731            (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2732            p->stats_update_period) {
2733                mac_stats_update(adapter);
2734                adapter->check_task_cnt = 0;
2735        }
2736
2737        if (p->rev == T3_REV_B2)
2738                check_t3b2_mac(adapter);
2739
2740        /*
2741         * Scan the XGMAC's to check for various conditions which we want to
2742         * monitor in a periodic polling manner rather than via an interrupt
2743         * condition.  This is used for conditions which would otherwise flood
2744         * the system with interrupts and we only really need to know that the
2745         * conditions are "happening" ...  For each condition we count the
2746         * detection of the condition and reset it for the next polling loop.
2747         */
2748        for_each_port(adapter, port) {
2749                struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2750                u32 cause;
2751
2752                cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2753                reset = 0;
2754                if (cause & F_RXFIFO_OVERFLOW) {
2755                        mac->stats.rx_fifo_ovfl++;
2756                        reset |= F_RXFIFO_OVERFLOW;
2757                }
2758
2759                t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2760        }
2761
2762        /*
2763         * We do the same as above for FL_EMPTY interrupts.
2764         */
2765        status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2766        reset = 0;
2767
2768        if (status & F_FLEMPTY) {
2769                struct sge_qset *qs = &adapter->sge.qs[0];
2770                int i = 0;
2771
2772                reset |= F_FLEMPTY;
2773
2774                v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2775                    0xffff;
2776
2777                while (v) {
2778                        qs->fl[i].empty += (v & 1);
2779                        if (i)
2780                                qs++;
2781                        i ^= 1;
2782                        v >>= 1;
2783                }
2784        }
2785
2786        t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2787
2788        /* Schedule the next check update if any port is active. */
2789        spin_lock_irq(&adapter->work_lock);
2790        if (adapter->open_device_map & PORT_MASK)
2791                schedule_chk_task(adapter);
2792        spin_unlock_irq(&adapter->work_lock);
2793}
2794
2795static void db_full_task(struct work_struct *work)
2796{
2797        struct adapter *adapter = container_of(work, struct adapter,
2798                                               db_full_task);
2799
2800        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
2801}
2802
2803static void db_empty_task(struct work_struct *work)
2804{
2805        struct adapter *adapter = container_of(work, struct adapter,
2806                                               db_empty_task);
2807
2808        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
2809}
2810
2811static void db_drop_task(struct work_struct *work)
2812{
2813        struct adapter *adapter = container_of(work, struct adapter,
2814                                               db_drop_task);
2815        unsigned long delay = 1000;
2816        unsigned short r;
2817
2818        cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
2819
2820        /*
2821         * Sleep a while before ringing the driver qset dbs.
2822         * The delay is between 1000-2023 usecs.
2823         */
2824        get_random_bytes(&r, 2);
2825        delay += r & 1023;
2826        set_current_state(TASK_UNINTERRUPTIBLE);
2827        schedule_timeout(usecs_to_jiffies(delay));
2828        ring_dbs(adapter);
2829}
2830
2831/*
2832 * Processes external (PHY) interrupts in process context.
2833 */
2834static void ext_intr_task(struct work_struct *work)
2835{
2836        struct adapter *adapter = container_of(work, struct adapter,
2837                                               ext_intr_handler_task);
2838        int i;
2839
2840        /* Disable link fault interrupts */
2841        for_each_port(adapter, i) {
2842                struct net_device *dev = adapter->port[i];
2843                struct port_info *p = netdev_priv(dev);
2844
2845                t3_xgm_intr_disable(adapter, i);
2846                t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2847        }
2848
2849        /* Re-enable link fault interrupts */
2850        t3_phy_intr_handler(adapter);
2851
2852        for_each_port(adapter, i)
2853                t3_xgm_intr_enable(adapter, i);
2854
2855        /* Now reenable external interrupts */
2856        spin_lock_irq(&adapter->work_lock);
2857        if (adapter->slow_intr_mask) {
2858                adapter->slow_intr_mask |= F_T3DBG;
2859                t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2860                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2861                             adapter->slow_intr_mask);
2862        }
2863        spin_unlock_irq(&adapter->work_lock);
2864}
2865
2866/*
2867 * Interrupt-context handler for external (PHY) interrupts.
2868 */
2869void t3_os_ext_intr_handler(struct adapter *adapter)
2870{
2871        /*
2872         * Schedule a task to handle external interrupts as they may be slow
2873         * and we use a mutex to protect MDIO registers.  We disable PHY
2874         * interrupts in the meantime and let the task reenable them when
2875         * it's done.
2876         */
2877        spin_lock(&adapter->work_lock);
2878        if (adapter->slow_intr_mask) {
2879                adapter->slow_intr_mask &= ~F_T3DBG;
2880                t3_write_reg(adapter, A_PL_INT_ENABLE0,
2881                             adapter->slow_intr_mask);
2882                queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2883        }
2884        spin_unlock(&adapter->work_lock);
2885}
2886
2887void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2888{
2889        struct net_device *netdev = adapter->port[port_id];
2890        struct port_info *pi = netdev_priv(netdev);
2891
2892        spin_lock(&adapter->work_lock);
2893        pi->link_fault = 1;
2894        spin_unlock(&adapter->work_lock);
2895}
2896
2897static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
2898{
2899        int i, ret = 0;
2900
2901        if (is_offload(adapter) &&
2902            test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2903                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2904                offload_close(&adapter->tdev);
2905        }
2906
2907        /* Stop all ports */
2908        for_each_port(adapter, i) {
2909                struct net_device *netdev = adapter->port[i];
2910
2911                if (netif_running(netdev))
2912                        __cxgb_close(netdev, on_wq);
2913        }
2914
2915        /* Stop SGE timers */
2916        t3_stop_sge_timers(adapter);
2917
2918        adapter->flags &= ~FULL_INIT_DONE;
2919
2920        if (reset)
2921                ret = t3_reset_adapter(adapter);
2922
2923        pci_disable_device(adapter->pdev);
2924
2925        return ret;
2926}
2927
2928static int t3_reenable_adapter(struct adapter *adapter)
2929{
2930        if (pci_enable_device(adapter->pdev)) {
2931                dev_err(&adapter->pdev->dev,
2932                        "Cannot re-enable PCI device after reset.\n");
2933                goto err;
2934        }
2935        pci_set_master(adapter->pdev);
2936        pci_restore_state(adapter->pdev);
2937        pci_save_state(adapter->pdev);
2938
2939        /* Free sge resources */
2940        t3_free_sge_resources(adapter);
2941
2942        if (t3_replay_prep_adapter(adapter))
2943                goto err;
2944
2945        return 0;
2946err:
2947        return -1;
2948}
2949
2950static void t3_resume_ports(struct adapter *adapter)
2951{
2952        int i;
2953
2954        /* Restart the ports */
2955        for_each_port(adapter, i) {
2956                struct net_device *netdev = adapter->port[i];
2957
2958                if (netif_running(netdev)) {
2959                        if (cxgb_open(netdev)) {
2960                                dev_err(&adapter->pdev->dev,
2961                                        "can't bring device back up"
2962                                        " after reset\n");
2963                                continue;
2964                        }
2965                }
2966        }
2967
2968        if (is_offload(adapter) && !ofld_disable)
2969                cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2970}
2971
2972/*
2973 * processes a fatal error.
2974 * Bring the ports down, reset the chip, bring the ports back up.
2975 */
2976static void fatal_error_task(struct work_struct *work)
2977{
2978        struct adapter *adapter = container_of(work, struct adapter,
2979                                               fatal_error_handler_task);
2980        int err = 0;
2981
2982        rtnl_lock();
2983        err = t3_adapter_error(adapter, 1, 1);
2984        if (!err)
2985                err = t3_reenable_adapter(adapter);
2986        if (!err)
2987                t3_resume_ports(adapter);
2988
2989        CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2990        rtnl_unlock();
2991}
2992
2993void t3_fatal_err(struct adapter *adapter)
2994{
2995        unsigned int fw_status[4];
2996
2997        if (adapter->flags & FULL_INIT_DONE) {
2998                t3_sge_stop_dma(adapter);
2999                t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
3000                t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
3001                t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
3002                t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
3003
3004                spin_lock(&adapter->work_lock);
3005                t3_intr_disable(adapter);
3006                queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
3007                spin_unlock(&adapter->work_lock);
3008        }
3009        CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
3010        if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
3011                CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
3012                         fw_status[0], fw_status[1],
3013                         fw_status[2], fw_status[3]);
3014}
3015
3016/**
3017 * t3_io_error_detected - called when PCI error is detected
3018 * @pdev: Pointer to PCI device
3019 * @state: The current pci connection state
3020 *
3021 * This function is called after a PCI bus error affecting
3022 * this device has been detected.
3023 */
3024static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3025                                             pci_channel_state_t state)
3026{
3027        struct adapter *adapter = pci_get_drvdata(pdev);
3028
3029        if (state == pci_channel_io_perm_failure)
3030                return PCI_ERS_RESULT_DISCONNECT;
3031
3032        t3_adapter_error(adapter, 0, 0);
3033
3034        /* Request a slot reset. */
3035        return PCI_ERS_RESULT_NEED_RESET;
3036}
3037
3038/**
3039 * t3_io_slot_reset - called after the pci bus has been reset.
3040 * @pdev: Pointer to PCI device
3041 *
3042 * Restart the card from scratch, as if from a cold-boot.
3043 */
3044static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
3045{
3046        struct adapter *adapter = pci_get_drvdata(pdev);
3047
3048        if (!t3_reenable_adapter(adapter))
3049                return PCI_ERS_RESULT_RECOVERED;
3050
3051        return PCI_ERS_RESULT_DISCONNECT;
3052}
3053
3054/**
3055 * t3_io_resume - called when traffic can start flowing again.
3056 * @pdev: Pointer to PCI device
3057 *
3058 * This callback is called when the error recovery driver tells us that
3059 * its OK to resume normal operation.
3060 */
3061static void t3_io_resume(struct pci_dev *pdev)
3062{
3063        struct adapter *adapter = pci_get_drvdata(pdev);
3064
3065        CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
3066                 t3_read_reg(adapter, A_PCIE_PEX_ERR));
3067
3068        rtnl_lock();
3069        t3_resume_ports(adapter);
3070        rtnl_unlock();
3071}
3072
3073static const struct pci_error_handlers t3_err_handler = {
3074        .error_detected = t3_io_error_detected,
3075        .slot_reset = t3_io_slot_reset,
3076        .resume = t3_io_resume,
3077};
3078
3079/*
3080 * Set the number of qsets based on the number of CPUs and the number of ports,
3081 * not to exceed the number of available qsets, assuming there are enough qsets
3082 * per port in HW.
3083 */
3084static void set_nqsets(struct adapter *adap)
3085{
3086        int i, j = 0;
3087        int num_cpus = netif_get_num_default_rss_queues();
3088        int hwports = adap->params.nports;
3089        int nqsets = adap->msix_nvectors - 1;
3090
3091        if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
3092                if (hwports == 2 &&
3093                    (hwports * nqsets > SGE_QSETS ||
3094                     num_cpus >= nqsets / hwports))
3095                        nqsets /= hwports;
3096                if (nqsets > num_cpus)
3097                        nqsets = num_cpus;
3098                if (nqsets < 1 || hwports == 4)
3099                        nqsets = 1;
3100        } else {
3101                nqsets = 1;
3102        }
3103
3104        for_each_port(adap, i) {
3105                struct port_info *pi = adap2pinfo(adap, i);
3106
3107                pi->first_qset = j;
3108                pi->nqsets = nqsets;
3109                j = pi->first_qset + nqsets;
3110
3111                dev_info(&adap->pdev->dev,
3112                         "Port %d using %d queue sets.\n", i, nqsets);
3113        }
3114}
3115
3116static int cxgb_enable_msix(struct adapter *adap)
3117{
3118        struct msix_entry entries[SGE_QSETS + 1];
3119        int vectors;
3120        int i;
3121
3122        vectors = ARRAY_SIZE(entries);
3123        for (i = 0; i < vectors; ++i)
3124                entries[i].entry = i;
3125
3126        vectors = pci_enable_msix_range(adap->pdev, entries,
3127                                        adap->params.nports + 1, vectors);
3128        if (vectors < 0)
3129                return vectors;
3130
3131        for (i = 0; i < vectors; ++i)
3132                adap->msix_info[i].vec = entries[i].vector;
3133        adap->msix_nvectors = vectors;
3134
3135        return 0;
3136}
3137
3138static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
3139{
3140        static const char *pci_variant[] = {
3141                "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3142        };
3143
3144        int i;
3145        char buf[80];
3146
3147        if (is_pcie(adap))
3148                snprintf(buf, sizeof(buf), "%s x%d",
3149                         pci_variant[adap->params.pci.variant],
3150                         adap->params.pci.width);
3151        else
3152                snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
3153                         pci_variant[adap->params.pci.variant],
3154                         adap->params.pci.speed, adap->params.pci.width);
3155
3156        for_each_port(adap, i) {
3157                struct net_device *dev = adap->port[i];
3158                const struct port_info *pi = netdev_priv(dev);
3159
3160                if (!test_bit(i, &adap->registered_device_map))
3161                        continue;
3162                netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
3163                            ai->desc, pi->phy.desc,
3164                            is_offload(adap) ? "R" : "", adap->params.rev, buf,
3165                            (adap->flags & USING_MSIX) ? " MSI-X" :
3166                            (adap->flags & USING_MSI) ? " MSI" : "");
3167                if (adap->name == dev->name && adap->params.vpd.mclk)
3168                        pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3169                               adap->name, t3_mc7_size(&adap->cm) >> 20,
3170                               t3_mc7_size(&adap->pmtx) >> 20,
3171                               t3_mc7_size(&adap->pmrx) >> 20,
3172                               adap->params.vpd.sn);
3173        }
3174}
3175
3176static const struct net_device_ops cxgb_netdev_ops = {
3177        .ndo_open               = cxgb_open,
3178        .ndo_stop               = cxgb_close,
3179        .ndo_start_xmit         = t3_eth_xmit,
3180        .ndo_get_stats          = cxgb_get_stats,
3181        .ndo_validate_addr      = eth_validate_addr,
3182        .ndo_set_rx_mode        = cxgb_set_rxmode,
3183        .ndo_eth_ioctl          = cxgb_ioctl,
3184        .ndo_siocdevprivate     = cxgb_siocdevprivate,
3185        .ndo_change_mtu         = cxgb_change_mtu,
3186        .ndo_set_mac_address    = cxgb_set_mac_addr,
3187        .ndo_fix_features       = cxgb_fix_features,
3188        .ndo_set_features       = cxgb_set_features,
3189#ifdef CONFIG_NET_POLL_CONTROLLER
3190        .ndo_poll_controller    = cxgb_netpoll,
3191#endif
3192};
3193
3194static void cxgb3_init_iscsi_mac(struct net_device *dev)
3195{
3196        struct port_info *pi = netdev_priv(dev);
3197
3198        memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3199        pi->iscsic.mac_addr[3] |= 0x80;
3200}
3201
3202#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3203#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3204                        NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3205static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3206{
3207        int i, err;
3208        resource_size_t mmio_start, mmio_len;
3209        const struct adapter_info *ai;
3210        struct adapter *adapter = NULL;
3211        struct port_info *pi;
3212
3213        if (!cxgb3_wq) {
3214                cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3215                if (!cxgb3_wq) {
3216                        pr_err("cannot initialize work queue\n");
3217                        return -ENOMEM;
3218                }
3219        }
3220
3221        err = pci_enable_device(pdev);
3222        if (err) {
3223                dev_err(&pdev->dev, "cannot enable PCI device\n");
3224                goto out;
3225        }
3226
3227        err = pci_request_regions(pdev, DRV_NAME);
3228        if (err) {
3229                /* Just info, some other driver may have claimed the device. */
3230                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3231                goto out_disable_device;
3232        }
3233
3234        err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3235        if (err) {
3236                dev_err(&pdev->dev, "no usable DMA configuration\n");
3237                goto out_release_regions;
3238        }
3239
3240        pci_set_master(pdev);
3241        pci_save_state(pdev);
3242
3243        mmio_start = pci_resource_start(pdev, 0);
3244        mmio_len = pci_resource_len(pdev, 0);
3245        ai = t3_get_adapter_info(ent->driver_data);
3246
3247        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3248        if (!adapter) {
3249                err = -ENOMEM;
3250                goto out_release_regions;
3251        }
3252
3253        adapter->nofail_skb =
3254                alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3255        if (!adapter->nofail_skb) {
3256                dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3257                err = -ENOMEM;
3258                goto out_free_adapter;
3259        }
3260
3261        adapter->regs = ioremap(mmio_start, mmio_len);
3262        if (!adapter->regs) {
3263                dev_err(&pdev->dev, "cannot map device registers\n");
3264                err = -ENOMEM;
3265                goto out_free_adapter_nofail;
3266        }
3267
3268        adapter->pdev = pdev;
3269        adapter->name = pci_name(pdev);
3270        adapter->msg_enable = dflt_msg_enable;
3271        adapter->mmio_len = mmio_len;
3272
3273        mutex_init(&adapter->mdio_lock);
3274        spin_lock_init(&adapter->work_lock);
3275        spin_lock_init(&adapter->stats_lock);
3276
3277        INIT_LIST_HEAD(&adapter->adapter_list);
3278        INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3279        INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3280
3281        INIT_WORK(&adapter->db_full_task, db_full_task);
3282        INIT_WORK(&adapter->db_empty_task, db_empty_task);
3283        INIT_WORK(&adapter->db_drop_task, db_drop_task);
3284
3285        INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3286
3287        for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3288                struct net_device *netdev;
3289
3290                netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3291                if (!netdev) {
3292                        err = -ENOMEM;
3293                        goto out_free_dev;
3294                }
3295
3296                SET_NETDEV_DEV(netdev, &pdev->dev);
3297
3298                adapter->port[i] = netdev;
3299                pi = netdev_priv(netdev);
3300                pi->adapter = adapter;
3301                pi->port_id = i;
3302                netif_carrier_off(netdev);
3303                netdev->irq = pdev->irq;
3304                netdev->mem_start = mmio_start;
3305                netdev->mem_end = mmio_start + mmio_len - 1;
3306                netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
3307                        NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
3308                netdev->features |= netdev->hw_features |
3309                                    NETIF_F_HW_VLAN_CTAG_TX;
3310                netdev->vlan_features |= netdev->features & VLAN_FEAT;
3311
3312                netdev->features |= NETIF_F_HIGHDMA;
3313
3314                netdev->netdev_ops = &cxgb_netdev_ops;
3315                netdev->ethtool_ops = &cxgb_ethtool_ops;
3316                netdev->min_mtu = 81;
3317                netdev->max_mtu = ETH_MAX_MTU;
3318                netdev->dev_port = pi->port_id;
3319        }
3320
3321        pci_set_drvdata(pdev, adapter);
3322        if (t3_prep_adapter(adapter, ai, 1) < 0) {
3323                err = -ENODEV;
3324                goto out_free_dev;
3325        }
3326
3327        /*
3328         * The card is now ready to go.  If any errors occur during device
3329         * registration we do not fail the whole card but rather proceed only
3330         * with the ports we manage to register successfully.  However we must
3331         * register at least one net device.
3332         */
3333        for_each_port(adapter, i) {
3334                err = register_netdev(adapter->port[i]);
3335                if (err)
3336                        dev_warn(&pdev->dev,
3337                                 "cannot register net device %s, skipping\n",
3338                                 adapter->port[i]->name);
3339                else {
3340                        /*
3341                         * Change the name we use for messages to the name of
3342                         * the first successfully registered interface.
3343                         */
3344                        if (!adapter->registered_device_map)
3345                                adapter->name = adapter->port[i]->name;
3346
3347                        __set_bit(i, &adapter->registered_device_map);
3348                }
3349        }
3350        if (!adapter->registered_device_map) {
3351                dev_err(&pdev->dev, "could not register any net devices\n");
3352                err = -ENODEV;
3353                goto out_free_dev;
3354        }
3355
3356        for_each_port(adapter, i)
3357                cxgb3_init_iscsi_mac(adapter->port[i]);
3358
3359        /* Driver's ready. Reflect it on LEDs */
3360        t3_led_ready(adapter);
3361
3362        if (is_offload(adapter)) {
3363                __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3364                cxgb3_adapter_ofld(adapter);
3365        }
3366
3367        /* See what interrupts we'll be using */
3368        if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3369                adapter->flags |= USING_MSIX;
3370        else if (msi > 0 && pci_enable_msi(pdev) == 0)
3371                adapter->flags |= USING_MSI;
3372
3373        set_nqsets(adapter);
3374
3375        err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3376                                 &cxgb3_attr_group);
3377        if (err) {
3378                dev_err(&pdev->dev, "cannot create sysfs group\n");
3379                goto out_close_led;
3380        }
3381
3382        print_port_info(adapter, ai);
3383        return 0;
3384
3385out_close_led:
3386        t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
3387
3388out_free_dev:
3389        iounmap(adapter->regs);
3390        for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3391                if (adapter->port[i])
3392                        free_netdev(adapter->port[i]);
3393
3394out_free_adapter_nofail:
3395        kfree_skb(adapter->nofail_skb);
3396
3397out_free_adapter:
3398        kfree(adapter);
3399
3400out_release_regions:
3401        pci_release_regions(pdev);
3402out_disable_device:
3403        pci_disable_device(pdev);
3404out:
3405        return err;
3406}
3407
3408static void remove_one(struct pci_dev *pdev)
3409{
3410        struct adapter *adapter = pci_get_drvdata(pdev);
3411
3412        if (adapter) {
3413                int i;
3414
3415                t3_sge_stop(adapter);
3416                sysfs_remove_group(&adapter->port[0]->dev.kobj,
3417                                   &cxgb3_attr_group);
3418
3419                if (is_offload(adapter)) {
3420                        cxgb3_adapter_unofld(adapter);
3421                        if (test_bit(OFFLOAD_DEVMAP_BIT,
3422                                     &adapter->open_device_map))
3423                                offload_close(&adapter->tdev);
3424                }
3425
3426                for_each_port(adapter, i)
3427                    if (test_bit(i, &adapter->registered_device_map))
3428                        unregister_netdev(adapter->port[i]);
3429
3430                t3_stop_sge_timers(adapter);
3431                t3_free_sge_resources(adapter);
3432                cxgb_disable_msi(adapter);
3433
3434                for_each_port(adapter, i)
3435                        if (adapter->port[i])
3436                                free_netdev(adapter->port[i]);
3437
3438                iounmap(adapter->regs);
3439                kfree_skb(adapter->nofail_skb);
3440                kfree(adapter);
3441                pci_release_regions(pdev);
3442                pci_disable_device(pdev);
3443        }
3444}
3445
3446static struct pci_driver driver = {
3447        .name = DRV_NAME,
3448        .id_table = cxgb3_pci_tbl,
3449        .probe = init_one,
3450        .remove = remove_one,
3451        .err_handler = &t3_err_handler,
3452};
3453
3454static int __init cxgb3_init_module(void)
3455{
3456        int ret;
3457
3458        cxgb3_offload_init();
3459
3460        ret = pci_register_driver(&driver);
3461        return ret;
3462}
3463
3464static void __exit cxgb3_cleanup_module(void)
3465{
3466        pci_unregister_driver(&driver);
3467        if (cxgb3_wq)
3468                destroy_workqueue(cxgb3_wq);
3469}
3470
3471module_init(cxgb3_init_module);
3472module_exit(cxgb3_cleanup_module);
3473