linux/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
<<
>>
Prefs
   1/*
   2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
   3 *
   4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
   5 *
   6 * This software is available to you under a choice of one of two
   7 * licenses.  You may choose to be licensed under the terms of the GNU
   8 * General Public License (GPL) Version 2, available from the file
   9 * COPYING in the main directory of this source tree, or the
  10 * OpenIB.org BSD license below:
  11 *
  12 *     Redistribution and use in source and binary forms, with or
  13 *     without modification, are permitted provided that the following
  14 *     conditions are met:
  15 *
  16 *      - Redistributions of source code must retain the above
  17 *        copyright notice, this list of conditions and the following
  18 *        disclaimer.
  19 *
  20 *      - Redistributions in binary form must reproduce the above
  21 *        copyright notice, this list of conditions and the following
  22 *        disclaimer in the documentation and/or other materials
  23 *        provided with the distribution.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32 * SOFTWARE.
  33 */
  34
  35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36
  37#include <linux/bitmap.h>
  38#include <linux/crc32.h>
  39#include <linux/ctype.h>
  40#include <linux/debugfs.h>
  41#include <linux/err.h>
  42#include <linux/etherdevice.h>
  43#include <linux/firmware.h>
  44#include <linux/if.h>
  45#include <linux/if_vlan.h>
  46#include <linux/init.h>
  47#include <linux/log2.h>
  48#include <linux/mdio.h>
  49#include <linux/module.h>
  50#include <linux/moduleparam.h>
  51#include <linux/mutex.h>
  52#include <linux/netdevice.h>
  53#include <linux/pci.h>
  54#include <linux/aer.h>
  55#include <linux/rtnetlink.h>
  56#include <linux/sched.h>
  57#include <linux/seq_file.h>
  58#include <linux/sockios.h>
  59#include <linux/vmalloc.h>
  60#include <linux/workqueue.h>
  61#include <net/neighbour.h>
  62#include <net/netevent.h>
  63#include <net/addrconf.h>
  64#include <asm/uaccess.h>
  65
  66#include "cxgb4.h"
  67#include "t4_regs.h"
  68#include "t4_msg.h"
  69#include "t4fw_api.h"
  70#include "l2t.h"
  71
  72#include <../drivers/net/bonding/bonding.h>
  73
  74#ifdef DRV_VERSION
  75#undef DRV_VERSION
  76#endif
  77#define DRV_VERSION "2.0.0-ko"
  78#define DRV_DESC "Chelsio T4/T5 Network Driver"
  79
  80/*
  81 * Max interrupt hold-off timer value in us.  Queues fall back to this value
  82 * under extreme memory pressure so it's largish to give the system time to
  83 * recover.
  84 */
  85#define MAX_SGE_TIMERVAL 200U
  86
  87enum {
  88        /*
  89         * Physical Function provisioning constants.
  90         */
  91        PFRES_NVI = 4,                  /* # of Virtual Interfaces */
  92        PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
  93        PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
  94                                         */
  95        PFRES_NEQ = 256,                /* # of egress queues */
  96        PFRES_NIQ = 0,                  /* # of ingress queues */
  97        PFRES_TC = 0,                   /* PCI-E traffic class */
  98        PFRES_NEXACTF = 128,            /* # of exact MPS filters */
  99
 100        PFRES_R_CAPS = FW_CMD_CAP_PF,
 101        PFRES_WX_CAPS = FW_CMD_CAP_PF,
 102
 103#ifdef CONFIG_PCI_IOV
 104        /*
 105         * Virtual Function provisioning constants.  We need two extra Ingress
 106         * Queues with Interrupt capability to serve as the VF's Firmware
 107         * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
 108         * neither will have Free Lists associated with them).  For each
 109         * Ethernet/Control Egress Queue and for each Free List, we need an
 110         * Egress Context.
 111         */
 112        VFRES_NPORTS = 1,               /* # of "ports" per VF */
 113        VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
 114
 115        VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
 116        VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
 117        VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
 118        VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
 119        VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
 120        VFRES_TC = 0,                   /* PCI-E traffic class */
 121        VFRES_NEXACTF = 16,             /* # of exact MPS filters */
 122
 123        VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
 124        VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
 125#endif
 126};
 127
 128/*
 129 * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
 130 * static and likely not to be useful in the long run.  We really need to
 131 * implement some form of persistent configuration which the firmware
 132 * controls.
 133 */
 134static unsigned int pfvfres_pmask(struct adapter *adapter,
 135                                  unsigned int pf, unsigned int vf)
 136{
 137        unsigned int portn, portvec;
 138
 139        /*
 140         * Give PF's access to all of the ports.
 141         */
 142        if (vf == 0)
 143                return FW_PFVF_CMD_PMASK_MASK;
 144
 145        /*
 146         * For VFs, we'll assign them access to the ports based purely on the
 147         * PF.  We assign active ports in order, wrapping around if there are
 148         * fewer active ports than PFs: e.g. active port[pf % nports].
 149         * Unfortunately the adapter's port_info structs haven't been
 150         * initialized yet so we have to compute this.
 151         */
 152        if (adapter->params.nports == 0)
 153                return 0;
 154
 155        portn = pf % adapter->params.nports;
 156        portvec = adapter->params.portvec;
 157        for (;;) {
 158                /*
 159                 * Isolate the lowest set bit in the port vector.  If we're at
 160                 * the port number that we want, return that as the pmask.
 161                 * otherwise mask that bit out of the port vector and
 162                 * decrement our port number ...
 163                 */
 164                unsigned int pmask = portvec ^ (portvec & (portvec-1));
 165                if (portn == 0)
 166                        return pmask;
 167                portn--;
 168                portvec &= ~pmask;
 169        }
 170        /*NOTREACHED*/
 171}
 172
 173enum {
 174        MAX_TXQ_ENTRIES      = 16384,
 175        MAX_CTRL_TXQ_ENTRIES = 1024,
 176        MAX_RSPQ_ENTRIES     = 16384,
 177        MAX_RX_BUFFERS       = 16384,
 178        MIN_TXQ_ENTRIES      = 32,
 179        MIN_CTRL_TXQ_ENTRIES = 32,
 180        MIN_RSPQ_ENTRIES     = 128,
 181        MIN_FL_ENTRIES       = 16
 182};
 183
 184/* Host shadow copy of ingress filter entry.  This is in host native format
 185 * and doesn't match the ordering or bit order, etc. of the hardware of the
 186 * firmware command.  The use of bit-field structure elements is purely to
 187 * remind ourselves of the field size limitations and save memory in the case
 188 * where the filter table is large.
 189 */
 190struct filter_entry {
 191        /* Administrative fields for filter.
 192         */
 193        u32 valid:1;            /* filter allocated and valid */
 194        u32 locked:1;           /* filter is administratively locked */
 195
 196        u32 pending:1;          /* filter action is pending firmware reply */
 197        u32 smtidx:8;           /* Source MAC Table index for smac */
 198        struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
 199
 200        /* The filter itself.  Most of this is a straight copy of information
 201         * provided by the extended ioctl().  Some fields are translated to
 202         * internal forms -- for instance the Ingress Queue ID passed in from
 203         * the ioctl() is translated into the Absolute Ingress Queue ID.
 204         */
 205        struct ch_filter_specification fs;
 206};
 207
 208#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 209                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 210                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 211
 212#define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
 213
 214static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
 215        CH_DEVICE(0xa000, 0),  /* PE10K */
 216        CH_DEVICE(0x4001, -1),
 217        CH_DEVICE(0x4002, -1),
 218        CH_DEVICE(0x4003, -1),
 219        CH_DEVICE(0x4004, -1),
 220        CH_DEVICE(0x4005, -1),
 221        CH_DEVICE(0x4006, -1),
 222        CH_DEVICE(0x4007, -1),
 223        CH_DEVICE(0x4008, -1),
 224        CH_DEVICE(0x4009, -1),
 225        CH_DEVICE(0x400a, -1),
 226        CH_DEVICE(0x4401, 4),
 227        CH_DEVICE(0x4402, 4),
 228        CH_DEVICE(0x4403, 4),
 229        CH_DEVICE(0x4404, 4),
 230        CH_DEVICE(0x4405, 4),
 231        CH_DEVICE(0x4406, 4),
 232        CH_DEVICE(0x4407, 4),
 233        CH_DEVICE(0x4408, 4),
 234        CH_DEVICE(0x4409, 4),
 235        CH_DEVICE(0x440a, 4),
 236        CH_DEVICE(0x440d, 4),
 237        CH_DEVICE(0x440e, 4),
 238        CH_DEVICE(0x5001, 4),
 239        CH_DEVICE(0x5002, 4),
 240        CH_DEVICE(0x5003, 4),
 241        CH_DEVICE(0x5004, 4),
 242        CH_DEVICE(0x5005, 4),
 243        CH_DEVICE(0x5006, 4),
 244        CH_DEVICE(0x5007, 4),
 245        CH_DEVICE(0x5008, 4),
 246        CH_DEVICE(0x5009, 4),
 247        CH_DEVICE(0x500A, 4),
 248        CH_DEVICE(0x500B, 4),
 249        CH_DEVICE(0x500C, 4),
 250        CH_DEVICE(0x500D, 4),
 251        CH_DEVICE(0x500E, 4),
 252        CH_DEVICE(0x500F, 4),
 253        CH_DEVICE(0x5010, 4),
 254        CH_DEVICE(0x5011, 4),
 255        CH_DEVICE(0x5012, 4),
 256        CH_DEVICE(0x5013, 4),
 257        CH_DEVICE(0x5401, 4),
 258        CH_DEVICE(0x5402, 4),
 259        CH_DEVICE(0x5403, 4),
 260        CH_DEVICE(0x5404, 4),
 261        CH_DEVICE(0x5405, 4),
 262        CH_DEVICE(0x5406, 4),
 263        CH_DEVICE(0x5407, 4),
 264        CH_DEVICE(0x5408, 4),
 265        CH_DEVICE(0x5409, 4),
 266        CH_DEVICE(0x540A, 4),
 267        CH_DEVICE(0x540B, 4),
 268        CH_DEVICE(0x540C, 4),
 269        CH_DEVICE(0x540D, 4),
 270        CH_DEVICE(0x540E, 4),
 271        CH_DEVICE(0x540F, 4),
 272        CH_DEVICE(0x5410, 4),
 273        CH_DEVICE(0x5411, 4),
 274        CH_DEVICE(0x5412, 4),
 275        CH_DEVICE(0x5413, 4),
 276        { 0, }
 277};
 278
 279#define FW4_FNAME "cxgb4/t4fw.bin"
 280#define FW5_FNAME "cxgb4/t5fw.bin"
 281#define FW4_CFNAME "cxgb4/t4-config.txt"
 282#define FW5_CFNAME "cxgb4/t5-config.txt"
 283
 284MODULE_DESCRIPTION(DRV_DESC);
 285MODULE_AUTHOR("Chelsio Communications");
 286MODULE_LICENSE("Dual BSD/GPL");
 287MODULE_VERSION(DRV_VERSION);
 288MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
 289MODULE_FIRMWARE(FW4_FNAME);
 290MODULE_FIRMWARE(FW5_FNAME);
 291
 292/*
 293 * Normally we're willing to become the firmware's Master PF but will be happy
 294 * if another PF has already become the Master and initialized the adapter.
 295 * Setting "force_init" will cause this driver to forcibly establish itself as
 296 * the Master PF and initialize the adapter.
 297 */
 298static uint force_init;
 299
 300module_param(force_init, uint, 0644);
 301MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
 302
 303/*
 304 * Normally if the firmware we connect to has Configuration File support, we
 305 * use that and only fall back to the old Driver-based initialization if the
 306 * Configuration File fails for some reason.  If force_old_init is set, then
 307 * we'll always use the old Driver-based initialization sequence.
 308 */
 309static uint force_old_init;
 310
 311module_param(force_old_init, uint, 0644);
 312MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
 313
 314static int dflt_msg_enable = DFLT_MSG_ENABLE;
 315
 316module_param(dflt_msg_enable, int, 0644);
 317MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
 318
 319/*
 320 * The driver uses the best interrupt scheme available on a platform in the
 321 * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
 322 * of these schemes the driver may consider as follows:
 323 *
 324 * msi = 2: choose from among all three options
 325 * msi = 1: only consider MSI and INTx interrupts
 326 * msi = 0: force INTx interrupts
 327 */
 328static int msi = 2;
 329
 330module_param(msi, int, 0644);
 331MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
 332
 333/*
 334 * Queue interrupt hold-off timer values.  Queues default to the first of these
 335 * upon creation.
 336 */
 337static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
 338
 339module_param_array(intr_holdoff, uint, NULL, 0644);
 340MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
 341                 "0..4 in microseconds");
 342
 343static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
 344
 345module_param_array(intr_cnt, uint, NULL, 0644);
 346MODULE_PARM_DESC(intr_cnt,
 347                 "thresholds 1..3 for queue interrupt packet counters");
 348
 349/*
 350 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
 351 * offset by 2 bytes in order to have the IP headers line up on 4-byte
 352 * boundaries.  This is a requirement for many architectures which will throw
 353 * a machine check fault if an attempt is made to access one of the 4-byte IP
 354 * header fields on a non-4-byte boundary.  And it's a major performance issue
 355 * even on some architectures which allow it like some implementations of the
 356 * x86 ISA.  However, some architectures don't mind this and for some very
 357 * edge-case performance sensitive applications (like forwarding large volumes
 358 * of small packets), setting this DMA offset to 0 will decrease the number of
 359 * PCI-E Bus transfers enough to measurably affect performance.
 360 */
 361static int rx_dma_offset = 2;
 362
 363static bool vf_acls;
 364
 365#ifdef CONFIG_PCI_IOV
 366module_param(vf_acls, bool, 0644);
 367MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
 368
 369/* Configure the number of PCI-E Virtual Function which are to be instantiated
 370 * on SR-IOV Capable Physical Functions.
 371 */
 372static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
 373
 374module_param_array(num_vf, uint, NULL, 0644);
 375MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
 376#endif
 377
 378/*
 379 * The filter TCAM has a fixed portion and a variable portion.  The fixed
 380 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
 381 * ports.  The variable portion is 36 bits which can include things like Exact
 382 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
 383 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
 384 * far exceed the 36-bit budget for this "compressed" header portion of the
 385 * filter.  Thus, we have a scarce resource which must be carefully managed.
 386 *
 387 * By default we set this up to mostly match the set of filter matching
 388 * capabilities of T3 but with accommodations for some of T4's more
 389 * interesting features:
 390 *
 391 *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
 392 *     [Inner] VLAN (17), Port (3), FCoE (1) }
 393 */
 394enum {
 395        TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
 396        TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
 397        TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
 398};
 399
 400static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
 401
 402module_param(tp_vlan_pri_map, uint, 0644);
 403MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
 404
 405static struct dentry *cxgb4_debugfs_root;
 406
 407static LIST_HEAD(adapter_list);
 408static DEFINE_MUTEX(uld_mutex);
 409/* Adapter list to be accessed from atomic context */
 410static LIST_HEAD(adap_rcu_list);
 411static DEFINE_SPINLOCK(adap_rcu_lock);
 412static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
 413static const char *uld_str[] = { "RDMA", "iSCSI" };
 414
 415static void link_report(struct net_device *dev)
 416{
 417        if (!netif_carrier_ok(dev))
 418                netdev_info(dev, "link down\n");
 419        else {
 420                static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
 421
 422                const char *s = "10Mbps";
 423                const struct port_info *p = netdev_priv(dev);
 424
 425                switch (p->link_cfg.speed) {
 426                case SPEED_10000:
 427                        s = "10Gbps";
 428                        break;
 429                case SPEED_1000:
 430                        s = "1000Mbps";
 431                        break;
 432                case SPEED_100:
 433                        s = "100Mbps";
 434                        break;
 435                }
 436
 437                netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
 438                            fc[p->link_cfg.fc]);
 439        }
 440}
 441
 442void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 443{
 444        struct net_device *dev = adapter->port[port_id];
 445
 446        /* Skip changes from disabled ports. */
 447        if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
 448                if (link_stat)
 449                        netif_carrier_on(dev);
 450                else
 451                        netif_carrier_off(dev);
 452
 453                link_report(dev);
 454        }
 455}
 456
 457void t4_os_portmod_changed(const struct adapter *adap, int port_id)
 458{
 459        static const char *mod_str[] = {
 460                NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
 461        };
 462
 463        const struct net_device *dev = adap->port[port_id];
 464        const struct port_info *pi = netdev_priv(dev);
 465
 466        if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
 467                netdev_info(dev, "port module unplugged\n");
 468        else if (pi->mod_type < ARRAY_SIZE(mod_str))
 469                netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
 470}
 471
 472/*
 473 * Configure the exact and hash address filters to handle a port's multicast
 474 * and secondary unicast MAC addresses.
 475 */
 476static int set_addr_filters(const struct net_device *dev, bool sleep)
 477{
 478        u64 mhash = 0;
 479        u64 uhash = 0;
 480        bool free = true;
 481        u16 filt_idx[7];
 482        const u8 *addr[7];
 483        int ret, naddr = 0;
 484        const struct netdev_hw_addr *ha;
 485        int uc_cnt = netdev_uc_count(dev);
 486        int mc_cnt = netdev_mc_count(dev);
 487        const struct port_info *pi = netdev_priv(dev);
 488        unsigned int mb = pi->adapter->fn;
 489
 490        /* first do the secondary unicast addresses */
 491        netdev_for_each_uc_addr(ha, dev) {
 492                addr[naddr++] = ha->addr;
 493                if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
 494                        ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 495                                        naddr, addr, filt_idx, &uhash, sleep);
 496                        if (ret < 0)
 497                                return ret;
 498
 499                        free = false;
 500                        naddr = 0;
 501                }
 502        }
 503
 504        /* next set up the multicast addresses */
 505        netdev_for_each_mc_addr(ha, dev) {
 506                addr[naddr++] = ha->addr;
 507                if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
 508                        ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
 509                                        naddr, addr, filt_idx, &mhash, sleep);
 510                        if (ret < 0)
 511                                return ret;
 512
 513                        free = false;
 514                        naddr = 0;
 515                }
 516        }
 517
 518        return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
 519                                uhash | mhash, sleep);
 520}
 521
 522int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
 523module_param(dbfifo_int_thresh, int, 0644);
 524MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
 525
 526/*
 527 * usecs to sleep while draining the dbfifo
 528 */
 529static int dbfifo_drain_delay = 1000;
 530module_param(dbfifo_drain_delay, int, 0644);
 531MODULE_PARM_DESC(dbfifo_drain_delay,
 532                 "usecs to sleep while draining the dbfifo");
 533
 534/*
 535 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
 536 * If @mtu is -1 it is left unchanged.
 537 */
 538static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
 539{
 540        int ret;
 541        struct port_info *pi = netdev_priv(dev);
 542
 543        ret = set_addr_filters(dev, sleep_ok);
 544        if (ret == 0)
 545                ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
 546                                    (dev->flags & IFF_PROMISC) ? 1 : 0,
 547                                    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
 548                                    sleep_ok);
 549        return ret;
 550}
 551
 552static struct workqueue_struct *workq;
 553
 554/**
 555 *      link_start - enable a port
 556 *      @dev: the port to enable
 557 *
 558 *      Performs the MAC and PHY actions needed to enable a port.
 559 */
 560static int link_start(struct net_device *dev)
 561{
 562        int ret;
 563        struct port_info *pi = netdev_priv(dev);
 564        unsigned int mb = pi->adapter->fn;
 565
 566        /*
 567         * We do not set address filters and promiscuity here, the stack does
 568         * that step explicitly.
 569         */
 570        ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
 571                            !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
 572        if (ret == 0) {
 573                ret = t4_change_mac(pi->adapter, mb, pi->viid,
 574                                    pi->xact_addr_filt, dev->dev_addr, true,
 575                                    true);
 576                if (ret >= 0) {
 577                        pi->xact_addr_filt = ret;
 578                        ret = 0;
 579                }
 580        }
 581        if (ret == 0)
 582                ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
 583                                    &pi->link_cfg);
 584        if (ret == 0)
 585                ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
 586        return ret;
 587}
 588
 589/* Clear a filter and release any of its resources that we own.  This also
 590 * clears the filter's "pending" status.
 591 */
 592static void clear_filter(struct adapter *adap, struct filter_entry *f)
 593{
 594        /* If the new or old filter have loopback rewriteing rules then we'll
 595         * need to free any existing Layer Two Table (L2T) entries of the old
 596         * filter rule.  The firmware will handle freeing up any Source MAC
 597         * Table (SMT) entries used for rewriting Source MAC Addresses in
 598         * loopback rules.
 599         */
 600        if (f->l2t)
 601                cxgb4_l2t_release(f->l2t);
 602
 603        /* The zeroing of the filter rule below clears the filter valid,
 604         * pending, locked flags, l2t pointer, etc. so it's all we need for
 605         * this operation.
 606         */
 607        memset(f, 0, sizeof(*f));
 608}
 609
 610/* Handle a filter write/deletion reply.
 611 */
 612static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
 613{
 614        unsigned int idx = GET_TID(rpl);
 615        unsigned int nidx = idx - adap->tids.ftid_base;
 616        unsigned int ret;
 617        struct filter_entry *f;
 618
 619        if (idx >= adap->tids.ftid_base && nidx <
 620           (adap->tids.nftids + adap->tids.nsftids)) {
 621                idx = nidx;
 622                ret = GET_TCB_COOKIE(rpl->cookie);
 623                f = &adap->tids.ftid_tab[idx];
 624
 625                if (ret == FW_FILTER_WR_FLT_DELETED) {
 626                        /* Clear the filter when we get confirmation from the
 627                         * hardware that the filter has been deleted.
 628                         */
 629                        clear_filter(adap, f);
 630                } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
 631                        dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
 632                                idx);
 633                        clear_filter(adap, f);
 634                } else if (ret == FW_FILTER_WR_FLT_ADDED) {
 635                        f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
 636                        f->pending = 0;  /* asynchronous setup completed */
 637                        f->valid = 1;
 638                } else {
 639                        /* Something went wrong.  Issue a warning about the
 640                         * problem and clear everything out.
 641                         */
 642                        dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
 643                                idx, ret);
 644                        clear_filter(adap, f);
 645                }
 646        }
 647}
 648
 649/* Response queue handler for the FW event queue.
 650 */
 651static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
 652                          const struct pkt_gl *gl)
 653{
 654        u8 opcode = ((const struct rss_header *)rsp)->opcode;
 655
 656        rsp++;                                          /* skip RSS header */
 657
 658        /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
 659         */
 660        if (unlikely(opcode == CPL_FW4_MSG &&
 661           ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
 662                rsp++;
 663                opcode = ((const struct rss_header *)rsp)->opcode;
 664                rsp++;
 665                if (opcode != CPL_SGE_EGR_UPDATE) {
 666                        dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
 667                                , opcode);
 668                        goto out;
 669                }
 670        }
 671
 672        if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
 673                const struct cpl_sge_egr_update *p = (void *)rsp;
 674                unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
 675                struct sge_txq *txq;
 676
 677                txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
 678                txq->restarts++;
 679                if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
 680                        struct sge_eth_txq *eq;
 681
 682                        eq = container_of(txq, struct sge_eth_txq, q);
 683                        netif_tx_wake_queue(eq->txq);
 684                } else {
 685                        struct sge_ofld_txq *oq;
 686
 687                        oq = container_of(txq, struct sge_ofld_txq, q);
 688                        tasklet_schedule(&oq->qresume_tsk);
 689                }
 690        } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
 691                const struct cpl_fw6_msg *p = (void *)rsp;
 692
 693                if (p->type == 0)
 694                        t4_handle_fw_rpl(q->adap, p->data);
 695        } else if (opcode == CPL_L2T_WRITE_RPL) {
 696                const struct cpl_l2t_write_rpl *p = (void *)rsp;
 697
 698                do_l2t_write_rpl(q->adap, p);
 699        } else if (opcode == CPL_SET_TCB_RPL) {
 700                const struct cpl_set_tcb_rpl *p = (void *)rsp;
 701
 702                filter_rpl(q->adap, p);
 703        } else
 704                dev_err(q->adap->pdev_dev,
 705                        "unexpected CPL %#x on FW event queue\n", opcode);
 706out:
 707        return 0;
 708}
 709
 710/**
 711 *      uldrx_handler - response queue handler for ULD queues
 712 *      @q: the response queue that received the packet
 713 *      @rsp: the response queue descriptor holding the offload message
 714 *      @gl: the gather list of packet fragments
 715 *
 716 *      Deliver an ingress offload packet to a ULD.  All processing is done by
 717 *      the ULD, we just maintain statistics.
 718 */
 719static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
 720                         const struct pkt_gl *gl)
 721{
 722        struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
 723
 724        /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
 725         */
 726        if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
 727            ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
 728                rsp += 2;
 729
 730        if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
 731                rxq->stats.nomem++;
 732                return -1;
 733        }
 734        if (gl == NULL)
 735                rxq->stats.imm++;
 736        else if (gl == CXGB4_MSG_AN)
 737                rxq->stats.an++;
 738        else
 739                rxq->stats.pkts++;
 740        return 0;
 741}
 742
 743static void disable_msi(struct adapter *adapter)
 744{
 745        if (adapter->flags & USING_MSIX) {
 746                pci_disable_msix(adapter->pdev);
 747                adapter->flags &= ~USING_MSIX;
 748        } else if (adapter->flags & USING_MSI) {
 749                pci_disable_msi(adapter->pdev);
 750                adapter->flags &= ~USING_MSI;
 751        }
 752}
 753
 754/*
 755 * Interrupt handler for non-data events used with MSI-X.
 756 */
 757static irqreturn_t t4_nondata_intr(int irq, void *cookie)
 758{
 759        struct adapter *adap = cookie;
 760
 761        u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
 762        if (v & PFSW) {
 763                adap->swintr = 1;
 764                t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
 765        }
 766        t4_slow_intr_handler(adap);
 767        return IRQ_HANDLED;
 768}
 769
 770/*
 771 * Name the MSI-X interrupts.
 772 */
 773static void name_msix_vecs(struct adapter *adap)
 774{
 775        int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
 776
 777        /* non-data interrupts */
 778        snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
 779
 780        /* FW events */
 781        snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
 782                 adap->port[0]->name);
 783
 784        /* Ethernet queues */
 785        for_each_port(adap, j) {
 786                struct net_device *d = adap->port[j];
 787                const struct port_info *pi = netdev_priv(d);
 788
 789                for (i = 0; i < pi->nqsets; i++, msi_idx++)
 790                        snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
 791                                 d->name, i);
 792        }
 793
 794        /* offload queues */
 795        for_each_ofldrxq(&adap->sge, i)
 796                snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
 797                         adap->port[0]->name, i);
 798
 799        for_each_rdmarxq(&adap->sge, i)
 800                snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
 801                         adap->port[0]->name, i);
 802}
 803
 804static int request_msix_queue_irqs(struct adapter *adap)
 805{
 806        struct sge *s = &adap->sge;
 807        int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
 808
 809        err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
 810                          adap->msix_info[1].desc, &s->fw_evtq);
 811        if (err)
 812                return err;
 813
 814        for_each_ethrxq(s, ethqidx) {
 815                err = request_irq(adap->msix_info[msi_index].vec,
 816                                  t4_sge_intr_msix, 0,
 817                                  adap->msix_info[msi_index].desc,
 818                                  &s->ethrxq[ethqidx].rspq);
 819                if (err)
 820                        goto unwind;
 821                msi_index++;
 822        }
 823        for_each_ofldrxq(s, ofldqidx) {
 824                err = request_irq(adap->msix_info[msi_index].vec,
 825                                  t4_sge_intr_msix, 0,
 826                                  adap->msix_info[msi_index].desc,
 827                                  &s->ofldrxq[ofldqidx].rspq);
 828                if (err)
 829                        goto unwind;
 830                msi_index++;
 831        }
 832        for_each_rdmarxq(s, rdmaqidx) {
 833                err = request_irq(adap->msix_info[msi_index].vec,
 834                                  t4_sge_intr_msix, 0,
 835                                  adap->msix_info[msi_index].desc,
 836                                  &s->rdmarxq[rdmaqidx].rspq);
 837                if (err)
 838                        goto unwind;
 839                msi_index++;
 840        }
 841        return 0;
 842
 843unwind:
 844        while (--rdmaqidx >= 0)
 845                free_irq(adap->msix_info[--msi_index].vec,
 846                         &s->rdmarxq[rdmaqidx].rspq);
 847        while (--ofldqidx >= 0)
 848                free_irq(adap->msix_info[--msi_index].vec,
 849                         &s->ofldrxq[ofldqidx].rspq);
 850        while (--ethqidx >= 0)
 851                free_irq(adap->msix_info[--msi_index].vec,
 852                         &s->ethrxq[ethqidx].rspq);
 853        free_irq(adap->msix_info[1].vec, &s->fw_evtq);
 854        return err;
 855}
 856
 857static void free_msix_queue_irqs(struct adapter *adap)
 858{
 859        int i, msi_index = 2;
 860        struct sge *s = &adap->sge;
 861
 862        free_irq(adap->msix_info[1].vec, &s->fw_evtq);
 863        for_each_ethrxq(s, i)
 864                free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
 865        for_each_ofldrxq(s, i)
 866                free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
 867        for_each_rdmarxq(s, i)
 868                free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
 869}
 870
 871/**
 872 *      write_rss - write the RSS table for a given port
 873 *      @pi: the port
 874 *      @queues: array of queue indices for RSS
 875 *
 876 *      Sets up the portion of the HW RSS table for the port's VI to distribute
 877 *      packets to the Rx queues in @queues.
 878 */
 879static int write_rss(const struct port_info *pi, const u16 *queues)
 880{
 881        u16 *rss;
 882        int i, err;
 883        const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
 884
 885        rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
 886        if (!rss)
 887                return -ENOMEM;
 888
 889        /* map the queue indices to queue ids */
 890        for (i = 0; i < pi->rss_size; i++, queues++)
 891                rss[i] = q[*queues].rspq.abs_id;
 892
 893        err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
 894                                  pi->rss_size, rss, pi->rss_size);
 895        kfree(rss);
 896        return err;
 897}
 898
 899/**
 900 *      setup_rss - configure RSS
 901 *      @adap: the adapter
 902 *
 903 *      Sets up RSS for each port.
 904 */
 905static int setup_rss(struct adapter *adap)
 906{
 907        int i, err;
 908
 909        for_each_port(adap, i) {
 910                const struct port_info *pi = adap2pinfo(adap, i);
 911
 912                err = write_rss(pi, pi->rss);
 913                if (err)
 914                        return err;
 915        }
 916        return 0;
 917}
 918
 919/*
 920 * Return the channel of the ingress queue with the given qid.
 921 */
 922static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
 923{
 924        qid -= p->ingr_start;
 925        return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
 926}
 927
 928/*
 929 * Wait until all NAPI handlers are descheduled.
 930 */
 931static void quiesce_rx(struct adapter *adap)
 932{
 933        int i;
 934
 935        for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
 936                struct sge_rspq *q = adap->sge.ingr_map[i];
 937
 938                if (q && q->handler)
 939                        napi_disable(&q->napi);
 940        }
 941}
 942
 943/*
 944 * Enable NAPI scheduling and interrupt generation for all Rx queues.
 945 */
 946static void enable_rx(struct adapter *adap)
 947{
 948        int i;
 949
 950        for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
 951                struct sge_rspq *q = adap->sge.ingr_map[i];
 952
 953                if (!q)
 954                        continue;
 955                if (q->handler)
 956                        napi_enable(&q->napi);
 957                /* 0-increment GTS to start the timer and enable interrupts */
 958                t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
 959                             SEINTARM(q->intr_params) |
 960                             INGRESSQID(q->cntxt_id));
 961        }
 962}
 963
 964/**
 965 *      setup_sge_queues - configure SGE Tx/Rx/response queues
 966 *      @adap: the adapter
 967 *
 968 *      Determines how many sets of SGE queues to use and initializes them.
 969 *      We support multiple queue sets per port if we have MSI-X, otherwise
 970 *      just one queue set per port.
 971 */
 972static int setup_sge_queues(struct adapter *adap)
 973{
 974        int err, msi_idx, i, j;
 975        struct sge *s = &adap->sge;
 976
 977        bitmap_zero(s->starving_fl, MAX_EGRQ);
 978        bitmap_zero(s->txq_maperr, MAX_EGRQ);
 979
 980        if (adap->flags & USING_MSIX)
 981                msi_idx = 1;         /* vector 0 is for non-queue interrupts */
 982        else {
 983                err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
 984                                       NULL, NULL);
 985                if (err)
 986                        return err;
 987                msi_idx = -((int)s->intrq.abs_id + 1);
 988        }
 989
 990        err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
 991                               msi_idx, NULL, fwevtq_handler);
 992        if (err) {
 993freeout:        t4_free_sge_resources(adap);
 994                return err;
 995        }
 996
 997        for_each_port(adap, i) {
 998                struct net_device *dev = adap->port[i];
 999                struct port_info *pi = netdev_priv(dev);
1000                struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1001                struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1002
1003                for (j = 0; j < pi->nqsets; j++, q++) {
1004                        if (msi_idx > 0)
1005                                msi_idx++;
1006                        err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1007                                               msi_idx, &q->fl,
1008                                               t4_ethrx_handler);
1009                        if (err)
1010                                goto freeout;
1011                        q->rspq.idx = j;
1012                        memset(&q->stats, 0, sizeof(q->stats));
1013                }
1014                for (j = 0; j < pi->nqsets; j++, t++) {
1015                        err = t4_sge_alloc_eth_txq(adap, t, dev,
1016                                        netdev_get_tx_queue(dev, j),
1017                                        s->fw_evtq.cntxt_id);
1018                        if (err)
1019                                goto freeout;
1020                }
1021        }
1022
1023        j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1024        for_each_ofldrxq(s, i) {
1025                struct sge_ofld_rxq *q = &s->ofldrxq[i];
1026                struct net_device *dev = adap->port[i / j];
1027
1028                if (msi_idx > 0)
1029                        msi_idx++;
1030                err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1031                                       &q->fl, uldrx_handler);
1032                if (err)
1033                        goto freeout;
1034                memset(&q->stats, 0, sizeof(q->stats));
1035                s->ofld_rxq[i] = q->rspq.abs_id;
1036                err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1037                                            s->fw_evtq.cntxt_id);
1038                if (err)
1039                        goto freeout;
1040        }
1041
1042        for_each_rdmarxq(s, i) {
1043                struct sge_ofld_rxq *q = &s->rdmarxq[i];
1044
1045                if (msi_idx > 0)
1046                        msi_idx++;
1047                err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1048                                       msi_idx, &q->fl, uldrx_handler);
1049                if (err)
1050                        goto freeout;
1051                memset(&q->stats, 0, sizeof(q->stats));
1052                s->rdma_rxq[i] = q->rspq.abs_id;
1053        }
1054
1055        for_each_port(adap, i) {
1056                /*
1057                 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1058                 * have RDMA queues, and that's the right value.
1059                 */
1060                err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1061                                            s->fw_evtq.cntxt_id,
1062                                            s->rdmarxq[i].rspq.cntxt_id);
1063                if (err)
1064                        goto freeout;
1065        }
1066
1067        t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1068                     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1069                     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1070        return 0;
1071}
1072
1073/*
1074 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1075 * The allocated memory is cleared.
1076 */
1077void *t4_alloc_mem(size_t size)
1078{
1079        void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1080
1081        if (!p)
1082                p = vzalloc(size);
1083        return p;
1084}
1085
1086/*
1087 * Free memory allocated through alloc_mem().
1088 */
1089static void t4_free_mem(void *addr)
1090{
1091        if (is_vmalloc_addr(addr))
1092                vfree(addr);
1093        else
1094                kfree(addr);
1095}
1096
1097/* Send a Work Request to write the filter at a specified index.  We construct
1098 * a Firmware Filter Work Request to have the work done and put the indicated
1099 * filter into "pending" mode which will prevent any further actions against
1100 * it till we get a reply from the firmware on the completion status of the
1101 * request.
1102 */
1103static int set_filter_wr(struct adapter *adapter, int fidx)
1104{
1105        struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1106        struct sk_buff *skb;
1107        struct fw_filter_wr *fwr;
1108        unsigned int ftid;
1109
1110        /* If the new filter requires loopback Destination MAC and/or VLAN
1111         * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1112         * the filter.
1113         */
1114        if (f->fs.newdmac || f->fs.newvlan) {
1115                /* allocate L2T entry for new filter */
1116                f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1117                if (f->l2t == NULL)
1118                        return -EAGAIN;
1119                if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1120                                        f->fs.eport, f->fs.dmac)) {
1121                        cxgb4_l2t_release(f->l2t);
1122                        f->l2t = NULL;
1123                        return -ENOMEM;
1124                }
1125        }
1126
1127        ftid = adapter->tids.ftid_base + fidx;
1128
1129        skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1130        fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1131        memset(fwr, 0, sizeof(*fwr));
1132
1133        /* It would be nice to put most of the following in t4_hw.c but most
1134         * of the work is translating the cxgbtool ch_filter_specification
1135         * into the Work Request and the definition of that structure is
1136         * currently in cxgbtool.h which isn't appropriate to pull into the
1137         * common code.  We may eventually try to come up with a more neutral
1138         * filter specification structure but for now it's easiest to simply
1139         * put this fairly direct code in line ...
1140         */
1141        fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1142        fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1143        fwr->tid_to_iq =
1144                htonl(V_FW_FILTER_WR_TID(ftid) |
1145                      V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1146                      V_FW_FILTER_WR_NOREPLY(0) |
1147                      V_FW_FILTER_WR_IQ(f->fs.iq));
1148        fwr->del_filter_to_l2tix =
1149                htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1150                      V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1151                      V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1152                      V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1153                      V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1154                      V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1155                      V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1156                      V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1157                      V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1158                                             f->fs.newvlan == VLAN_REWRITE) |
1159                      V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1160                                            f->fs.newvlan == VLAN_REWRITE) |
1161                      V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1162                      V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1163                      V_FW_FILTER_WR_PRIO(f->fs.prio) |
1164                      V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1165        fwr->ethtype = htons(f->fs.val.ethtype);
1166        fwr->ethtypem = htons(f->fs.mask.ethtype);
1167        fwr->frag_to_ovlan_vldm =
1168                (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1169                 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1170                 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1171                 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1172                 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1173                 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1174        fwr->smac_sel = 0;
1175        fwr->rx_chan_rx_rpl_iq =
1176                htons(V_FW_FILTER_WR_RX_CHAN(0) |
1177                      V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1178        fwr->maci_to_matchtypem =
1179                htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1180                      V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1181                      V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1182                      V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1183                      V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1184                      V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1185                      V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1186                      V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1187        fwr->ptcl = f->fs.val.proto;
1188        fwr->ptclm = f->fs.mask.proto;
1189        fwr->ttyp = f->fs.val.tos;
1190        fwr->ttypm = f->fs.mask.tos;
1191        fwr->ivlan = htons(f->fs.val.ivlan);
1192        fwr->ivlanm = htons(f->fs.mask.ivlan);
1193        fwr->ovlan = htons(f->fs.val.ovlan);
1194        fwr->ovlanm = htons(f->fs.mask.ovlan);
1195        memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1196        memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1197        memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1198        memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1199        fwr->lp = htons(f->fs.val.lport);
1200        fwr->lpm = htons(f->fs.mask.lport);
1201        fwr->fp = htons(f->fs.val.fport);
1202        fwr->fpm = htons(f->fs.mask.fport);
1203        if (f->fs.newsmac)
1204                memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1205
1206        /* Mark the filter as "pending" and ship off the Filter Work Request.
1207         * When we get the Work Request Reply we'll clear the pending status.
1208         */
1209        f->pending = 1;
1210        set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1211        t4_ofld_send(adapter, skb);
1212        return 0;
1213}
1214
1215/* Delete the filter at a specified index.
1216 */
1217static int del_filter_wr(struct adapter *adapter, int fidx)
1218{
1219        struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1220        struct sk_buff *skb;
1221        struct fw_filter_wr *fwr;
1222        unsigned int len, ftid;
1223
1224        len = sizeof(*fwr);
1225        ftid = adapter->tids.ftid_base + fidx;
1226
1227        skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1228        fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1229        t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1230
1231        /* Mark the filter as "pending" and ship off the Filter Work Request.
1232         * When we get the Work Request Reply we'll clear the pending status.
1233         */
1234        f->pending = 1;
1235        t4_mgmt_tx(adapter, skb);
1236        return 0;
1237}
1238
1239static inline int is_offload(const struct adapter *adap)
1240{
1241        return adap->params.offload;
1242}
1243
1244/*
1245 * Implementation of ethtool operations.
1246 */
1247
1248static u32 get_msglevel(struct net_device *dev)
1249{
1250        return netdev2adap(dev)->msg_enable;
1251}
1252
1253static void set_msglevel(struct net_device *dev, u32 val)
1254{
1255        netdev2adap(dev)->msg_enable = val;
1256}
1257
1258static char stats_strings[][ETH_GSTRING_LEN] = {
1259        "TxOctetsOK         ",
1260        "TxFramesOK         ",
1261        "TxBroadcastFrames  ",
1262        "TxMulticastFrames  ",
1263        "TxUnicastFrames    ",
1264        "TxErrorFrames      ",
1265
1266        "TxFrames64         ",
1267        "TxFrames65To127    ",
1268        "TxFrames128To255   ",
1269        "TxFrames256To511   ",
1270        "TxFrames512To1023  ",
1271        "TxFrames1024To1518 ",
1272        "TxFrames1519ToMax  ",
1273
1274        "TxFramesDropped    ",
1275        "TxPauseFrames      ",
1276        "TxPPP0Frames       ",
1277        "TxPPP1Frames       ",
1278        "TxPPP2Frames       ",
1279        "TxPPP3Frames       ",
1280        "TxPPP4Frames       ",
1281        "TxPPP5Frames       ",
1282        "TxPPP6Frames       ",
1283        "TxPPP7Frames       ",
1284
1285        "RxOctetsOK         ",
1286        "RxFramesOK         ",
1287        "RxBroadcastFrames  ",
1288        "RxMulticastFrames  ",
1289        "RxUnicastFrames    ",
1290
1291        "RxFramesTooLong    ",
1292        "RxJabberErrors     ",
1293        "RxFCSErrors        ",
1294        "RxLengthErrors     ",
1295        "RxSymbolErrors     ",
1296        "RxRuntFrames       ",
1297
1298        "RxFrames64         ",
1299        "RxFrames65To127    ",
1300        "RxFrames128To255   ",
1301        "RxFrames256To511   ",
1302        "RxFrames512To1023  ",
1303        "RxFrames1024To1518 ",
1304        "RxFrames1519ToMax  ",
1305
1306        "RxPauseFrames      ",
1307        "RxPPP0Frames       ",
1308        "RxPPP1Frames       ",
1309        "RxPPP2Frames       ",
1310        "RxPPP3Frames       ",
1311        "RxPPP4Frames       ",
1312        "RxPPP5Frames       ",
1313        "RxPPP6Frames       ",
1314        "RxPPP7Frames       ",
1315
1316        "RxBG0FramesDropped ",
1317        "RxBG1FramesDropped ",
1318        "RxBG2FramesDropped ",
1319        "RxBG3FramesDropped ",
1320        "RxBG0FramesTrunc   ",
1321        "RxBG1FramesTrunc   ",
1322        "RxBG2FramesTrunc   ",
1323        "RxBG3FramesTrunc   ",
1324
1325        "TSO                ",
1326        "TxCsumOffload      ",
1327        "RxCsumGood         ",
1328        "VLANextractions    ",
1329        "VLANinsertions     ",
1330        "GROpackets         ",
1331        "GROmerged          ",
1332        "WriteCoalSuccess   ",
1333        "WriteCoalFail      ",
1334};
1335
1336static int get_sset_count(struct net_device *dev, int sset)
1337{
1338        switch (sset) {
1339        case ETH_SS_STATS:
1340                return ARRAY_SIZE(stats_strings);
1341        default:
1342                return -EOPNOTSUPP;
1343        }
1344}
1345
1346#define T4_REGMAP_SIZE (160 * 1024)
1347#define T5_REGMAP_SIZE (332 * 1024)
1348
1349static int get_regs_len(struct net_device *dev)
1350{
1351        struct adapter *adap = netdev2adap(dev);
1352        if (is_t4(adap->params.chip))
1353                return T4_REGMAP_SIZE;
1354        else
1355                return T5_REGMAP_SIZE;
1356}
1357
1358static int get_eeprom_len(struct net_device *dev)
1359{
1360        return EEPROMSIZE;
1361}
1362
1363static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1364{
1365        struct adapter *adapter = netdev2adap(dev);
1366
1367        strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1368        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1369        strlcpy(info->bus_info, pci_name(adapter->pdev),
1370                sizeof(info->bus_info));
1371
1372        if (adapter->params.fw_vers)
1373                snprintf(info->fw_version, sizeof(info->fw_version),
1374                        "%u.%u.%u.%u, TP %u.%u.%u.%u",
1375                        FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1376                        FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1377                        FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1378                        FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1379                        FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1380                        FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1381                        FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1382                        FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1383}
1384
1385static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1386{
1387        if (stringset == ETH_SS_STATS)
1388                memcpy(data, stats_strings, sizeof(stats_strings));
1389}
1390
1391/*
1392 * port stats maintained per queue of the port.  They should be in the same
1393 * order as in stats_strings above.
1394 */
1395struct queue_port_stats {
1396        u64 tso;
1397        u64 tx_csum;
1398        u64 rx_csum;
1399        u64 vlan_ex;
1400        u64 vlan_ins;
1401        u64 gro_pkts;
1402        u64 gro_merged;
1403};
1404
1405static void collect_sge_port_stats(const struct adapter *adap,
1406                const struct port_info *p, struct queue_port_stats *s)
1407{
1408        int i;
1409        const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1410        const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1411
1412        memset(s, 0, sizeof(*s));
1413        for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1414                s->tso += tx->tso;
1415                s->tx_csum += tx->tx_cso;
1416                s->rx_csum += rx->stats.rx_cso;
1417                s->vlan_ex += rx->stats.vlan_ex;
1418                s->vlan_ins += tx->vlan_ins;
1419                s->gro_pkts += rx->stats.lro_pkts;
1420                s->gro_merged += rx->stats.lro_merged;
1421        }
1422}
1423
1424static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1425                      u64 *data)
1426{
1427        struct port_info *pi = netdev_priv(dev);
1428        struct adapter *adapter = pi->adapter;
1429        u32 val1, val2;
1430
1431        t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1432
1433        data += sizeof(struct port_stats) / sizeof(u64);
1434        collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1435        data += sizeof(struct queue_port_stats) / sizeof(u64);
1436        if (!is_t4(adapter->params.chip)) {
1437                t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1438                val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1439                val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1440                *data = val1 - val2;
1441                data++;
1442                *data = val2;
1443                data++;
1444        } else {
1445                memset(data, 0, 2 * sizeof(u64));
1446                *data += 2;
1447        }
1448}
1449
1450/*
1451 * Return a version number to identify the type of adapter.  The scheme is:
1452 * - bits 0..9: chip version
1453 * - bits 10..15: chip revision
1454 * - bits 16..23: register dump version
1455 */
1456static inline unsigned int mk_adap_vers(const struct adapter *ap)
1457{
1458        return CHELSIO_CHIP_VERSION(ap->params.chip) |
1459                (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1460}
1461
1462static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1463                           unsigned int end)
1464{
1465        u32 *p = buf + start;
1466
1467        for ( ; start <= end; start += sizeof(u32))
1468                *p++ = t4_read_reg(ap, start);
1469}
1470
1471static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1472                     void *buf)
1473{
1474        static const unsigned int t4_reg_ranges[] = {
1475                0x1008, 0x1108,
1476                0x1180, 0x11b4,
1477                0x11fc, 0x123c,
1478                0x1300, 0x173c,
1479                0x1800, 0x18fc,
1480                0x3000, 0x30d8,
1481                0x30e0, 0x5924,
1482                0x5960, 0x59d4,
1483                0x5a00, 0x5af8,
1484                0x6000, 0x6098,
1485                0x6100, 0x6150,
1486                0x6200, 0x6208,
1487                0x6240, 0x6248,
1488                0x6280, 0x6338,
1489                0x6370, 0x638c,
1490                0x6400, 0x643c,
1491                0x6500, 0x6524,
1492                0x6a00, 0x6a38,
1493                0x6a60, 0x6a78,
1494                0x6b00, 0x6b84,
1495                0x6bf0, 0x6c84,
1496                0x6cf0, 0x6d84,
1497                0x6df0, 0x6e84,
1498                0x6ef0, 0x6f84,
1499                0x6ff0, 0x7084,
1500                0x70f0, 0x7184,
1501                0x71f0, 0x7284,
1502                0x72f0, 0x7384,
1503                0x73f0, 0x7450,
1504                0x7500, 0x7530,
1505                0x7600, 0x761c,
1506                0x7680, 0x76cc,
1507                0x7700, 0x7798,
1508                0x77c0, 0x77fc,
1509                0x7900, 0x79fc,
1510                0x7b00, 0x7c38,
1511                0x7d00, 0x7efc,
1512                0x8dc0, 0x8e1c,
1513                0x8e30, 0x8e78,
1514                0x8ea0, 0x8f6c,
1515                0x8fc0, 0x9074,
1516                0x90fc, 0x90fc,
1517                0x9400, 0x9458,
1518                0x9600, 0x96bc,
1519                0x9800, 0x9808,
1520                0x9820, 0x983c,
1521                0x9850, 0x9864,
1522                0x9c00, 0x9c6c,
1523                0x9c80, 0x9cec,
1524                0x9d00, 0x9d6c,
1525                0x9d80, 0x9dec,
1526                0x9e00, 0x9e6c,
1527                0x9e80, 0x9eec,
1528                0x9f00, 0x9f6c,
1529                0x9f80, 0x9fec,
1530                0xd004, 0xd03c,
1531                0xdfc0, 0xdfe0,
1532                0xe000, 0xea7c,
1533                0xf000, 0x11190,
1534                0x19040, 0x1906c,
1535                0x19078, 0x19080,
1536                0x1908c, 0x19124,
1537                0x19150, 0x191b0,
1538                0x191d0, 0x191e8,
1539                0x19238, 0x1924c,
1540                0x193f8, 0x19474,
1541                0x19490, 0x194f8,
1542                0x19800, 0x19f30,
1543                0x1a000, 0x1a06c,
1544                0x1a0b0, 0x1a120,
1545                0x1a128, 0x1a138,
1546                0x1a190, 0x1a1c4,
1547                0x1a1fc, 0x1a1fc,
1548                0x1e040, 0x1e04c,
1549                0x1e284, 0x1e28c,
1550                0x1e2c0, 0x1e2c0,
1551                0x1e2e0, 0x1e2e0,
1552                0x1e300, 0x1e384,
1553                0x1e3c0, 0x1e3c8,
1554                0x1e440, 0x1e44c,
1555                0x1e684, 0x1e68c,
1556                0x1e6c0, 0x1e6c0,
1557                0x1e6e0, 0x1e6e0,
1558                0x1e700, 0x1e784,
1559                0x1e7c0, 0x1e7c8,
1560                0x1e840, 0x1e84c,
1561                0x1ea84, 0x1ea8c,
1562                0x1eac0, 0x1eac0,
1563                0x1eae0, 0x1eae0,
1564                0x1eb00, 0x1eb84,
1565                0x1ebc0, 0x1ebc8,
1566                0x1ec40, 0x1ec4c,
1567                0x1ee84, 0x1ee8c,
1568                0x1eec0, 0x1eec0,
1569                0x1eee0, 0x1eee0,
1570                0x1ef00, 0x1ef84,
1571                0x1efc0, 0x1efc8,
1572                0x1f040, 0x1f04c,
1573                0x1f284, 0x1f28c,
1574                0x1f2c0, 0x1f2c0,
1575                0x1f2e0, 0x1f2e0,
1576                0x1f300, 0x1f384,
1577                0x1f3c0, 0x1f3c8,
1578                0x1f440, 0x1f44c,
1579                0x1f684, 0x1f68c,
1580                0x1f6c0, 0x1f6c0,
1581                0x1f6e0, 0x1f6e0,
1582                0x1f700, 0x1f784,
1583                0x1f7c0, 0x1f7c8,
1584                0x1f840, 0x1f84c,
1585                0x1fa84, 0x1fa8c,
1586                0x1fac0, 0x1fac0,
1587                0x1fae0, 0x1fae0,
1588                0x1fb00, 0x1fb84,
1589                0x1fbc0, 0x1fbc8,
1590                0x1fc40, 0x1fc4c,
1591                0x1fe84, 0x1fe8c,
1592                0x1fec0, 0x1fec0,
1593                0x1fee0, 0x1fee0,
1594                0x1ff00, 0x1ff84,
1595                0x1ffc0, 0x1ffc8,
1596                0x20000, 0x2002c,
1597                0x20100, 0x2013c,
1598                0x20190, 0x201c8,
1599                0x20200, 0x20318,
1600                0x20400, 0x20528,
1601                0x20540, 0x20614,
1602                0x21000, 0x21040,
1603                0x2104c, 0x21060,
1604                0x210c0, 0x210ec,
1605                0x21200, 0x21268,
1606                0x21270, 0x21284,
1607                0x212fc, 0x21388,
1608                0x21400, 0x21404,
1609                0x21500, 0x21518,
1610                0x2152c, 0x2153c,
1611                0x21550, 0x21554,
1612                0x21600, 0x21600,
1613                0x21608, 0x21628,
1614                0x21630, 0x2163c,
1615                0x21700, 0x2171c,
1616                0x21780, 0x2178c,
1617                0x21800, 0x21c38,
1618                0x21c80, 0x21d7c,
1619                0x21e00, 0x21e04,
1620                0x22000, 0x2202c,
1621                0x22100, 0x2213c,
1622                0x22190, 0x221c8,
1623                0x22200, 0x22318,
1624                0x22400, 0x22528,
1625                0x22540, 0x22614,
1626                0x23000, 0x23040,
1627                0x2304c, 0x23060,
1628                0x230c0, 0x230ec,
1629                0x23200, 0x23268,
1630                0x23270, 0x23284,
1631                0x232fc, 0x23388,
1632                0x23400, 0x23404,
1633                0x23500, 0x23518,
1634                0x2352c, 0x2353c,
1635                0x23550, 0x23554,
1636                0x23600, 0x23600,
1637                0x23608, 0x23628,
1638                0x23630, 0x2363c,
1639                0x23700, 0x2371c,
1640                0x23780, 0x2378c,
1641                0x23800, 0x23c38,
1642                0x23c80, 0x23d7c,
1643                0x23e00, 0x23e04,
1644                0x24000, 0x2402c,
1645                0x24100, 0x2413c,
1646                0x24190, 0x241c8,
1647                0x24200, 0x24318,
1648                0x24400, 0x24528,
1649                0x24540, 0x24614,
1650                0x25000, 0x25040,
1651                0x2504c, 0x25060,
1652                0x250c0, 0x250ec,
1653                0x25200, 0x25268,
1654                0x25270, 0x25284,
1655                0x252fc, 0x25388,
1656                0x25400, 0x25404,
1657                0x25500, 0x25518,
1658                0x2552c, 0x2553c,
1659                0x25550, 0x25554,
1660                0x25600, 0x25600,
1661                0x25608, 0x25628,
1662                0x25630, 0x2563c,
1663                0x25700, 0x2571c,
1664                0x25780, 0x2578c,
1665                0x25800, 0x25c38,
1666                0x25c80, 0x25d7c,
1667                0x25e00, 0x25e04,
1668                0x26000, 0x2602c,
1669                0x26100, 0x2613c,
1670                0x26190, 0x261c8,
1671                0x26200, 0x26318,
1672                0x26400, 0x26528,
1673                0x26540, 0x26614,
1674                0x27000, 0x27040,
1675                0x2704c, 0x27060,
1676                0x270c0, 0x270ec,
1677                0x27200, 0x27268,
1678                0x27270, 0x27284,
1679                0x272fc, 0x27388,
1680                0x27400, 0x27404,
1681                0x27500, 0x27518,
1682                0x2752c, 0x2753c,
1683                0x27550, 0x27554,
1684                0x27600, 0x27600,
1685                0x27608, 0x27628,
1686                0x27630, 0x2763c,
1687                0x27700, 0x2771c,
1688                0x27780, 0x2778c,
1689                0x27800, 0x27c38,
1690                0x27c80, 0x27d7c,
1691                0x27e00, 0x27e04
1692        };
1693
1694        static const unsigned int t5_reg_ranges[] = {
1695                0x1008, 0x1148,
1696                0x1180, 0x11b4,
1697                0x11fc, 0x123c,
1698                0x1280, 0x173c,
1699                0x1800, 0x18fc,
1700                0x3000, 0x3028,
1701                0x3060, 0x30d8,
1702                0x30e0, 0x30fc,
1703                0x3140, 0x357c,
1704                0x35a8, 0x35cc,
1705                0x35ec, 0x35ec,
1706                0x3600, 0x5624,
1707                0x56cc, 0x575c,
1708                0x580c, 0x5814,
1709                0x5890, 0x58bc,
1710                0x5940, 0x59dc,
1711                0x59fc, 0x5a18,
1712                0x5a60, 0x5a9c,
1713                0x5b9c, 0x5bfc,
1714                0x6000, 0x6040,
1715                0x6058, 0x614c,
1716                0x7700, 0x7798,
1717                0x77c0, 0x78fc,
1718                0x7b00, 0x7c54,
1719                0x7d00, 0x7efc,
1720                0x8dc0, 0x8de0,
1721                0x8df8, 0x8e84,
1722                0x8ea0, 0x8f84,
1723                0x8fc0, 0x90f8,
1724                0x9400, 0x9470,
1725                0x9600, 0x96f4,
1726                0x9800, 0x9808,
1727                0x9820, 0x983c,
1728                0x9850, 0x9864,
1729                0x9c00, 0x9c6c,
1730                0x9c80, 0x9cec,
1731                0x9d00, 0x9d6c,
1732                0x9d80, 0x9dec,
1733                0x9e00, 0x9e6c,
1734                0x9e80, 0x9eec,
1735                0x9f00, 0x9f6c,
1736                0x9f80, 0xa020,
1737                0xd004, 0xd03c,
1738                0xdfc0, 0xdfe0,
1739                0xe000, 0x11088,
1740                0x1109c, 0x1117c,
1741                0x11190, 0x11204,
1742                0x19040, 0x1906c,
1743                0x19078, 0x19080,
1744                0x1908c, 0x19124,
1745                0x19150, 0x191b0,
1746                0x191d0, 0x191e8,
1747                0x19238, 0x19290,
1748                0x193f8, 0x19474,
1749                0x19490, 0x194cc,
1750                0x194f0, 0x194f8,
1751                0x19c00, 0x19c60,
1752                0x19c94, 0x19e10,
1753                0x19e50, 0x19f34,
1754                0x19f40, 0x19f50,
1755                0x19f90, 0x19fe4,
1756                0x1a000, 0x1a06c,
1757                0x1a0b0, 0x1a120,
1758                0x1a128, 0x1a138,
1759                0x1a190, 0x1a1c4,
1760                0x1a1fc, 0x1a1fc,
1761                0x1e008, 0x1e00c,
1762                0x1e040, 0x1e04c,
1763                0x1e284, 0x1e290,
1764                0x1e2c0, 0x1e2c0,
1765                0x1e2e0, 0x1e2e0,
1766                0x1e300, 0x1e384,
1767                0x1e3c0, 0x1e3c8,
1768                0x1e408, 0x1e40c,
1769                0x1e440, 0x1e44c,
1770                0x1e684, 0x1e690,
1771                0x1e6c0, 0x1e6c0,
1772                0x1e6e0, 0x1e6e0,
1773                0x1e700, 0x1e784,
1774                0x1e7c0, 0x1e7c8,
1775                0x1e808, 0x1e80c,
1776                0x1e840, 0x1e84c,
1777                0x1ea84, 0x1ea90,
1778                0x1eac0, 0x1eac0,
1779                0x1eae0, 0x1eae0,
1780                0x1eb00, 0x1eb84,
1781                0x1ebc0, 0x1ebc8,
1782                0x1ec08, 0x1ec0c,
1783                0x1ec40, 0x1ec4c,
1784                0x1ee84, 0x1ee90,
1785                0x1eec0, 0x1eec0,
1786                0x1eee0, 0x1eee0,
1787                0x1ef00, 0x1ef84,
1788                0x1efc0, 0x1efc8,
1789                0x1f008, 0x1f00c,
1790                0x1f040, 0x1f04c,
1791                0x1f284, 0x1f290,
1792                0x1f2c0, 0x1f2c0,
1793                0x1f2e0, 0x1f2e0,
1794                0x1f300, 0x1f384,
1795                0x1f3c0, 0x1f3c8,
1796                0x1f408, 0x1f40c,
1797                0x1f440, 0x1f44c,
1798                0x1f684, 0x1f690,
1799                0x1f6c0, 0x1f6c0,
1800                0x1f6e0, 0x1f6e0,
1801                0x1f700, 0x1f784,
1802                0x1f7c0, 0x1f7c8,
1803                0x1f808, 0x1f80c,
1804                0x1f840, 0x1f84c,
1805                0x1fa84, 0x1fa90,
1806                0x1fac0, 0x1fac0,
1807                0x1fae0, 0x1fae0,
1808                0x1fb00, 0x1fb84,
1809                0x1fbc0, 0x1fbc8,
1810                0x1fc08, 0x1fc0c,
1811                0x1fc40, 0x1fc4c,
1812                0x1fe84, 0x1fe90,
1813                0x1fec0, 0x1fec0,
1814                0x1fee0, 0x1fee0,
1815                0x1ff00, 0x1ff84,
1816                0x1ffc0, 0x1ffc8,
1817                0x30000, 0x30030,
1818                0x30100, 0x30144,
1819                0x30190, 0x301d0,
1820                0x30200, 0x30318,
1821                0x30400, 0x3052c,
1822                0x30540, 0x3061c,
1823                0x30800, 0x30834,
1824                0x308c0, 0x30908,
1825                0x30910, 0x309ac,
1826                0x30a00, 0x30a04,
1827                0x30a0c, 0x30a2c,
1828                0x30a44, 0x30a50,
1829                0x30a74, 0x30c24,
1830                0x30d08, 0x30d14,
1831                0x30d1c, 0x30d20,
1832                0x30d3c, 0x30d50,
1833                0x31200, 0x3120c,
1834                0x31220, 0x31220,
1835                0x31240, 0x31240,
1836                0x31600, 0x31600,
1837                0x31608, 0x3160c,
1838                0x31a00, 0x31a1c,
1839                0x31e04, 0x31e20,
1840                0x31e38, 0x31e3c,
1841                0x31e80, 0x31e80,
1842                0x31e88, 0x31ea8,
1843                0x31eb0, 0x31eb4,
1844                0x31ec8, 0x31ed4,
1845                0x31fb8, 0x32004,
1846                0x32208, 0x3223c,
1847                0x32600, 0x32630,
1848                0x32a00, 0x32abc,
1849                0x32b00, 0x32b70,
1850                0x33000, 0x33048,
1851                0x33060, 0x3309c,
1852                0x330f0, 0x33148,
1853                0x33160, 0x3319c,
1854                0x331f0, 0x332e4,
1855                0x332f8, 0x333e4,
1856                0x333f8, 0x33448,
1857                0x33460, 0x3349c,
1858                0x334f0, 0x33548,
1859                0x33560, 0x3359c,
1860                0x335f0, 0x336e4,
1861                0x336f8, 0x337e4,
1862                0x337f8, 0x337fc,
1863                0x33814, 0x33814,
1864                0x3382c, 0x3382c,
1865                0x33880, 0x3388c,
1866                0x338e8, 0x338ec,
1867                0x33900, 0x33948,
1868                0x33960, 0x3399c,
1869                0x339f0, 0x33ae4,
1870                0x33af8, 0x33b10,
1871                0x33b28, 0x33b28,
1872                0x33b3c, 0x33b50,
1873                0x33bf0, 0x33c10,
1874                0x33c28, 0x33c28,
1875                0x33c3c, 0x33c50,
1876                0x33cf0, 0x33cfc,
1877                0x34000, 0x34030,
1878                0x34100, 0x34144,
1879                0x34190, 0x341d0,
1880                0x34200, 0x34318,
1881                0x34400, 0x3452c,
1882                0x34540, 0x3461c,
1883                0x34800, 0x34834,
1884                0x348c0, 0x34908,
1885                0x34910, 0x349ac,
1886                0x34a00, 0x34a04,
1887                0x34a0c, 0x34a2c,
1888                0x34a44, 0x34a50,
1889                0x34a74, 0x34c24,
1890                0x34d08, 0x34d14,
1891                0x34d1c, 0x34d20,
1892                0x34d3c, 0x34d50,
1893                0x35200, 0x3520c,
1894                0x35220, 0x35220,
1895                0x35240, 0x35240,
1896                0x35600, 0x35600,
1897                0x35608, 0x3560c,
1898                0x35a00, 0x35a1c,
1899                0x35e04, 0x35e20,
1900                0x35e38, 0x35e3c,
1901                0x35e80, 0x35e80,
1902                0x35e88, 0x35ea8,
1903                0x35eb0, 0x35eb4,
1904                0x35ec8, 0x35ed4,
1905                0x35fb8, 0x36004,
1906                0x36208, 0x3623c,
1907                0x36600, 0x36630,
1908                0x36a00, 0x36abc,
1909                0x36b00, 0x36b70,
1910                0x37000, 0x37048,
1911                0x37060, 0x3709c,
1912                0x370f0, 0x37148,
1913                0x37160, 0x3719c,
1914                0x371f0, 0x372e4,
1915                0x372f8, 0x373e4,
1916                0x373f8, 0x37448,
1917                0x37460, 0x3749c,
1918                0x374f0, 0x37548,
1919                0x37560, 0x3759c,
1920                0x375f0, 0x376e4,
1921                0x376f8, 0x377e4,
1922                0x377f8, 0x377fc,
1923                0x37814, 0x37814,
1924                0x3782c, 0x3782c,
1925                0x37880, 0x3788c,
1926                0x378e8, 0x378ec,
1927                0x37900, 0x37948,
1928                0x37960, 0x3799c,
1929                0x379f0, 0x37ae4,
1930                0x37af8, 0x37b10,
1931                0x37b28, 0x37b28,
1932                0x37b3c, 0x37b50,
1933                0x37bf0, 0x37c10,
1934                0x37c28, 0x37c28,
1935                0x37c3c, 0x37c50,
1936                0x37cf0, 0x37cfc,
1937                0x38000, 0x38030,
1938                0x38100, 0x38144,
1939                0x38190, 0x381d0,
1940                0x38200, 0x38318,
1941                0x38400, 0x3852c,
1942                0x38540, 0x3861c,
1943                0x38800, 0x38834,
1944                0x388c0, 0x38908,
1945                0x38910, 0x389ac,
1946                0x38a00, 0x38a04,
1947                0x38a0c, 0x38a2c,
1948                0x38a44, 0x38a50,
1949                0x38a74, 0x38c24,
1950                0x38d08, 0x38d14,
1951                0x38d1c, 0x38d20,
1952                0x38d3c, 0x38d50,
1953                0x39200, 0x3920c,
1954                0x39220, 0x39220,
1955                0x39240, 0x39240,
1956                0x39600, 0x39600,
1957                0x39608, 0x3960c,
1958                0x39a00, 0x39a1c,
1959                0x39e04, 0x39e20,
1960                0x39e38, 0x39e3c,
1961                0x39e80, 0x39e80,
1962                0x39e88, 0x39ea8,
1963                0x39eb0, 0x39eb4,
1964                0x39ec8, 0x39ed4,
1965                0x39fb8, 0x3a004,
1966                0x3a208, 0x3a23c,
1967                0x3a600, 0x3a630,
1968                0x3aa00, 0x3aabc,
1969                0x3ab00, 0x3ab70,
1970                0x3b000, 0x3b048,
1971                0x3b060, 0x3b09c,
1972                0x3b0f0, 0x3b148,
1973                0x3b160, 0x3b19c,
1974                0x3b1f0, 0x3b2e4,
1975                0x3b2f8, 0x3b3e4,
1976                0x3b3f8, 0x3b448,
1977                0x3b460, 0x3b49c,
1978                0x3b4f0, 0x3b548,
1979                0x3b560, 0x3b59c,
1980                0x3b5f0, 0x3b6e4,
1981                0x3b6f8, 0x3b7e4,
1982                0x3b7f8, 0x3b7fc,
1983                0x3b814, 0x3b814,
1984                0x3b82c, 0x3b82c,
1985                0x3b880, 0x3b88c,
1986                0x3b8e8, 0x3b8ec,
1987                0x3b900, 0x3b948,
1988                0x3b960, 0x3b99c,
1989                0x3b9f0, 0x3bae4,
1990                0x3baf8, 0x3bb10,
1991                0x3bb28, 0x3bb28,
1992                0x3bb3c, 0x3bb50,
1993                0x3bbf0, 0x3bc10,
1994                0x3bc28, 0x3bc28,
1995                0x3bc3c, 0x3bc50,
1996                0x3bcf0, 0x3bcfc,
1997                0x3c000, 0x3c030,
1998                0x3c100, 0x3c144,
1999                0x3c190, 0x3c1d0,
2000                0x3c200, 0x3c318,
2001                0x3c400, 0x3c52c,
2002                0x3c540, 0x3c61c,
2003                0x3c800, 0x3c834,
2004                0x3c8c0, 0x3c908,
2005                0x3c910, 0x3c9ac,
2006                0x3ca00, 0x3ca04,
2007                0x3ca0c, 0x3ca2c,
2008                0x3ca44, 0x3ca50,
2009                0x3ca74, 0x3cc24,
2010                0x3cd08, 0x3cd14,
2011                0x3cd1c, 0x3cd20,
2012                0x3cd3c, 0x3cd50,
2013                0x3d200, 0x3d20c,
2014                0x3d220, 0x3d220,
2015                0x3d240, 0x3d240,
2016                0x3d600, 0x3d600,
2017                0x3d608, 0x3d60c,
2018                0x3da00, 0x3da1c,
2019                0x3de04, 0x3de20,
2020                0x3de38, 0x3de3c,
2021                0x3de80, 0x3de80,
2022                0x3de88, 0x3dea8,
2023                0x3deb0, 0x3deb4,
2024                0x3dec8, 0x3ded4,
2025                0x3dfb8, 0x3e004,
2026                0x3e208, 0x3e23c,
2027                0x3e600, 0x3e630,
2028                0x3ea00, 0x3eabc,
2029                0x3eb00, 0x3eb70,
2030                0x3f000, 0x3f048,
2031                0x3f060, 0x3f09c,
2032                0x3f0f0, 0x3f148,
2033                0x3f160, 0x3f19c,
2034                0x3f1f0, 0x3f2e4,
2035                0x3f2f8, 0x3f3e4,
2036                0x3f3f8, 0x3f448,
2037                0x3f460, 0x3f49c,
2038                0x3f4f0, 0x3f548,
2039                0x3f560, 0x3f59c,
2040                0x3f5f0, 0x3f6e4,
2041                0x3f6f8, 0x3f7e4,
2042                0x3f7f8, 0x3f7fc,
2043                0x3f814, 0x3f814,
2044                0x3f82c, 0x3f82c,
2045                0x3f880, 0x3f88c,
2046                0x3f8e8, 0x3f8ec,
2047                0x3f900, 0x3f948,
2048                0x3f960, 0x3f99c,
2049                0x3f9f0, 0x3fae4,
2050                0x3faf8, 0x3fb10,
2051                0x3fb28, 0x3fb28,
2052                0x3fb3c, 0x3fb50,
2053                0x3fbf0, 0x3fc10,
2054                0x3fc28, 0x3fc28,
2055                0x3fc3c, 0x3fc50,
2056                0x3fcf0, 0x3fcfc,
2057                0x40000, 0x4000c,
2058                0x40040, 0x40068,
2059                0x40080, 0x40144,
2060                0x40180, 0x4018c,
2061                0x40200, 0x40298,
2062                0x402ac, 0x4033c,
2063                0x403f8, 0x403fc,
2064                0x41300, 0x413c4,
2065                0x41400, 0x4141c,
2066                0x41480, 0x414d0,
2067                0x44000, 0x44078,
2068                0x440c0, 0x44278,
2069                0x442c0, 0x44478,
2070                0x444c0, 0x44678,
2071                0x446c0, 0x44878,
2072                0x448c0, 0x449fc,
2073                0x45000, 0x45068,
2074                0x45080, 0x45084,
2075                0x450a0, 0x450b0,
2076                0x45200, 0x45268,
2077                0x45280, 0x45284,
2078                0x452a0, 0x452b0,
2079                0x460c0, 0x460e4,
2080                0x47000, 0x4708c,
2081                0x47200, 0x47250,
2082                0x47400, 0x47420,
2083                0x47600, 0x47618,
2084                0x47800, 0x47814,
2085                0x48000, 0x4800c,
2086                0x48040, 0x48068,
2087                0x48080, 0x48144,
2088                0x48180, 0x4818c,
2089                0x48200, 0x48298,
2090                0x482ac, 0x4833c,
2091                0x483f8, 0x483fc,
2092                0x49300, 0x493c4,
2093                0x49400, 0x4941c,
2094                0x49480, 0x494d0,
2095                0x4c000, 0x4c078,
2096                0x4c0c0, 0x4c278,
2097                0x4c2c0, 0x4c478,
2098                0x4c4c0, 0x4c678,
2099                0x4c6c0, 0x4c878,
2100                0x4c8c0, 0x4c9fc,
2101                0x4d000, 0x4d068,
2102                0x4d080, 0x4d084,
2103                0x4d0a0, 0x4d0b0,
2104                0x4d200, 0x4d268,
2105                0x4d280, 0x4d284,
2106                0x4d2a0, 0x4d2b0,
2107                0x4e0c0, 0x4e0e4,
2108                0x4f000, 0x4f08c,
2109                0x4f200, 0x4f250,
2110                0x4f400, 0x4f420,
2111                0x4f600, 0x4f618,
2112                0x4f800, 0x4f814,
2113                0x50000, 0x500cc,
2114                0x50400, 0x50400,
2115                0x50800, 0x508cc,
2116                0x50c00, 0x50c00,
2117                0x51000, 0x5101c,
2118                0x51300, 0x51308,
2119        };
2120
2121        int i;
2122        struct adapter *ap = netdev2adap(dev);
2123        static const unsigned int *reg_ranges;
2124        int arr_size = 0, buf_size = 0;
2125
2126        if (is_t4(ap->params.chip)) {
2127                reg_ranges = &t4_reg_ranges[0];
2128                arr_size = ARRAY_SIZE(t4_reg_ranges);
2129                buf_size = T4_REGMAP_SIZE;
2130        } else {
2131                reg_ranges = &t5_reg_ranges[0];
2132                arr_size = ARRAY_SIZE(t5_reg_ranges);
2133                buf_size = T5_REGMAP_SIZE;
2134        }
2135
2136        regs->version = mk_adap_vers(ap);
2137
2138        memset(buf, 0, buf_size);
2139        for (i = 0; i < arr_size; i += 2)
2140                reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2141}
2142
2143static int restart_autoneg(struct net_device *dev)
2144{
2145        struct port_info *p = netdev_priv(dev);
2146
2147        if (!netif_running(dev))
2148                return -EAGAIN;
2149        if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2150                return -EINVAL;
2151        t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2152        return 0;
2153}
2154
2155static int identify_port(struct net_device *dev,
2156                         enum ethtool_phys_id_state state)
2157{
2158        unsigned int val;
2159        struct adapter *adap = netdev2adap(dev);
2160
2161        if (state == ETHTOOL_ID_ACTIVE)
2162                val = 0xffff;
2163        else if (state == ETHTOOL_ID_INACTIVE)
2164                val = 0;
2165        else
2166                return -EINVAL;
2167
2168        return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2169}
2170
2171static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2172{
2173        unsigned int v = 0;
2174
2175        if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2176            type == FW_PORT_TYPE_BT_XAUI) {
2177                v |= SUPPORTED_TP;
2178                if (caps & FW_PORT_CAP_SPEED_100M)
2179                        v |= SUPPORTED_100baseT_Full;
2180                if (caps & FW_PORT_CAP_SPEED_1G)
2181                        v |= SUPPORTED_1000baseT_Full;
2182                if (caps & FW_PORT_CAP_SPEED_10G)
2183                        v |= SUPPORTED_10000baseT_Full;
2184        } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2185                v |= SUPPORTED_Backplane;
2186                if (caps & FW_PORT_CAP_SPEED_1G)
2187                        v |= SUPPORTED_1000baseKX_Full;
2188                if (caps & FW_PORT_CAP_SPEED_10G)
2189                        v |= SUPPORTED_10000baseKX4_Full;
2190        } else if (type == FW_PORT_TYPE_KR)
2191                v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2192        else if (type == FW_PORT_TYPE_BP_AP)
2193                v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2194                     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2195        else if (type == FW_PORT_TYPE_BP4_AP)
2196                v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2197                     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2198                     SUPPORTED_10000baseKX4_Full;
2199        else if (type == FW_PORT_TYPE_FIBER_XFI ||
2200                 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2201                v |= SUPPORTED_FIBRE;
2202
2203        if (caps & FW_PORT_CAP_ANEG)
2204                v |= SUPPORTED_Autoneg;
2205        return v;
2206}
2207
2208static unsigned int to_fw_linkcaps(unsigned int caps)
2209{
2210        unsigned int v = 0;
2211
2212        if (caps & ADVERTISED_100baseT_Full)
2213                v |= FW_PORT_CAP_SPEED_100M;
2214        if (caps & ADVERTISED_1000baseT_Full)
2215                v |= FW_PORT_CAP_SPEED_1G;
2216        if (caps & ADVERTISED_10000baseT_Full)
2217                v |= FW_PORT_CAP_SPEED_10G;
2218        return v;
2219}
2220
2221static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2222{
2223        const struct port_info *p = netdev_priv(dev);
2224
2225        if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2226            p->port_type == FW_PORT_TYPE_BT_XFI ||
2227            p->port_type == FW_PORT_TYPE_BT_XAUI)
2228                cmd->port = PORT_TP;
2229        else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2230                 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2231                cmd->port = PORT_FIBRE;
2232        else if (p->port_type == FW_PORT_TYPE_SFP) {
2233                if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2234                    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2235                        cmd->port = PORT_DA;
2236                else
2237                        cmd->port = PORT_FIBRE;
2238        } else
2239                cmd->port = PORT_OTHER;
2240
2241        if (p->mdio_addr >= 0) {
2242                cmd->phy_address = p->mdio_addr;
2243                cmd->transceiver = XCVR_EXTERNAL;
2244                cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2245                        MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2246        } else {
2247                cmd->phy_address = 0;  /* not really, but no better option */
2248                cmd->transceiver = XCVR_INTERNAL;
2249                cmd->mdio_support = 0;
2250        }
2251
2252        cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2253        cmd->advertising = from_fw_linkcaps(p->port_type,
2254                                            p->link_cfg.advertising);
2255        ethtool_cmd_speed_set(cmd,
2256                              netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2257        cmd->duplex = DUPLEX_FULL;
2258        cmd->autoneg = p->link_cfg.autoneg;
2259        cmd->maxtxpkt = 0;
2260        cmd->maxrxpkt = 0;
2261        return 0;
2262}
2263
2264static unsigned int speed_to_caps(int speed)
2265{
2266        if (speed == SPEED_100)
2267                return FW_PORT_CAP_SPEED_100M;
2268        if (speed == SPEED_1000)
2269                return FW_PORT_CAP_SPEED_1G;
2270        if (speed == SPEED_10000)
2271                return FW_PORT_CAP_SPEED_10G;
2272        return 0;
2273}
2274
2275static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2276{
2277        unsigned int cap;
2278        struct port_info *p = netdev_priv(dev);
2279        struct link_config *lc = &p->link_cfg;
2280        u32 speed = ethtool_cmd_speed(cmd);
2281
2282        if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
2283                return -EINVAL;
2284
2285        if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2286                /*
2287                 * PHY offers a single speed.  See if that's what's
2288                 * being requested.
2289                 */
2290                if (cmd->autoneg == AUTONEG_DISABLE &&
2291                    (lc->supported & speed_to_caps(speed)))
2292                        return 0;
2293                return -EINVAL;
2294        }
2295
2296        if (cmd->autoneg == AUTONEG_DISABLE) {
2297                cap = speed_to_caps(speed);
2298
2299                if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2300                    (speed == SPEED_10000))
2301                        return -EINVAL;
2302                lc->requested_speed = cap;
2303                lc->advertising = 0;
2304        } else {
2305                cap = to_fw_linkcaps(cmd->advertising);
2306                if (!(lc->supported & cap))
2307                        return -EINVAL;
2308                lc->requested_speed = 0;
2309                lc->advertising = cap | FW_PORT_CAP_ANEG;
2310        }
2311        lc->autoneg = cmd->autoneg;
2312
2313        if (netif_running(dev))
2314                return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2315                                     lc);
2316        return 0;
2317}
2318
2319static void get_pauseparam(struct net_device *dev,
2320                           struct ethtool_pauseparam *epause)
2321{
2322        struct port_info *p = netdev_priv(dev);
2323
2324        epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2325        epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2326        epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2327}
2328
2329static int set_pauseparam(struct net_device *dev,
2330                          struct ethtool_pauseparam *epause)
2331{
2332        struct port_info *p = netdev_priv(dev);
2333        struct link_config *lc = &p->link_cfg;
2334
2335        if (epause->autoneg == AUTONEG_DISABLE)
2336                lc->requested_fc = 0;
2337        else if (lc->supported & FW_PORT_CAP_ANEG)
2338                lc->requested_fc = PAUSE_AUTONEG;
2339        else
2340                return -EINVAL;
2341
2342        if (epause->rx_pause)
2343                lc->requested_fc |= PAUSE_RX;
2344        if (epause->tx_pause)
2345                lc->requested_fc |= PAUSE_TX;
2346        if (netif_running(dev))
2347                return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2348                                     lc);
2349        return 0;
2350}
2351
2352static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2353{
2354        const struct port_info *pi = netdev_priv(dev);
2355        const struct sge *s = &pi->adapter->sge;
2356
2357        e->rx_max_pending = MAX_RX_BUFFERS;
2358        e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2359        e->rx_jumbo_max_pending = 0;
2360        e->tx_max_pending = MAX_TXQ_ENTRIES;
2361
2362        e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2363        e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2364        e->rx_jumbo_pending = 0;
2365        e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2366}
2367
2368static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2369{
2370        int i;
2371        const struct port_info *pi = netdev_priv(dev);
2372        struct adapter *adapter = pi->adapter;
2373        struct sge *s = &adapter->sge;
2374
2375        if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2376            e->tx_pending > MAX_TXQ_ENTRIES ||
2377            e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2378            e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2379            e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2380                return -EINVAL;
2381
2382        if (adapter->flags & FULL_INIT_DONE)
2383                return -EBUSY;
2384
2385        for (i = 0; i < pi->nqsets; ++i) {
2386                s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2387                s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2388                s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2389        }
2390        return 0;
2391}
2392
2393static int closest_timer(const struct sge *s, int time)
2394{
2395        int i, delta, match = 0, min_delta = INT_MAX;
2396
2397        for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2398                delta = time - s->timer_val[i];
2399                if (delta < 0)
2400                        delta = -delta;
2401                if (delta < min_delta) {
2402                        min_delta = delta;
2403                        match = i;
2404                }
2405        }
2406        return match;
2407}
2408
2409static int closest_thres(const struct sge *s, int thres)
2410{
2411        int i, delta, match = 0, min_delta = INT_MAX;
2412
2413        for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2414                delta = thres - s->counter_val[i];
2415                if (delta < 0)
2416                        delta = -delta;
2417                if (delta < min_delta) {
2418                        min_delta = delta;
2419                        match = i;
2420                }
2421        }
2422        return match;
2423}
2424
2425/*
2426 * Return a queue's interrupt hold-off time in us.  0 means no timer.
2427 */
2428static unsigned int qtimer_val(const struct adapter *adap,
2429                               const struct sge_rspq *q)
2430{
2431        unsigned int idx = q->intr_params >> 1;
2432
2433        return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2434}
2435
2436/**
2437 *      set_rxq_intr_params - set a queue's interrupt holdoff parameters
2438 *      @adap: the adapter
2439 *      @q: the Rx queue
2440 *      @us: the hold-off time in us, or 0 to disable timer
2441 *      @cnt: the hold-off packet count, or 0 to disable counter
2442 *
2443 *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
2444 *      one of the two needs to be enabled for the queue to generate interrupts.
2445 */
2446static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2447                               unsigned int us, unsigned int cnt)
2448{
2449        if ((us | cnt) == 0)
2450                cnt = 1;
2451
2452        if (cnt) {
2453                int err;
2454                u32 v, new_idx;
2455
2456                new_idx = closest_thres(&adap->sge, cnt);
2457                if (q->desc && q->pktcnt_idx != new_idx) {
2458                        /* the queue has already been created, update it */
2459                        v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2460                            FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2461                            FW_PARAMS_PARAM_YZ(q->cntxt_id);
2462                        err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2463                                            &new_idx);
2464                        if (err)
2465                                return err;
2466                }
2467                q->pktcnt_idx = new_idx;
2468        }
2469
2470        us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2471        q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2472        return 0;
2473}
2474
2475static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2476{
2477        const struct port_info *pi = netdev_priv(dev);
2478        struct adapter *adap = pi->adapter;
2479        struct sge_rspq *q;
2480        int i;
2481        int r = 0;
2482
2483        for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2484                q = &adap->sge.ethrxq[i].rspq;
2485                r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2486                        c->rx_max_coalesced_frames);
2487                if (r) {
2488                        dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2489                        break;
2490                }
2491        }
2492        return r;
2493}
2494
2495static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2496{
2497        const struct port_info *pi = netdev_priv(dev);
2498        const struct adapter *adap = pi->adapter;
2499        const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2500
2501        c->rx_coalesce_usecs = qtimer_val(adap, rq);
2502        c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2503                adap->sge.counter_val[rq->pktcnt_idx] : 0;
2504        return 0;
2505}
2506
2507/**
2508 *      eeprom_ptov - translate a physical EEPROM address to virtual
2509 *      @phys_addr: the physical EEPROM address
2510 *      @fn: the PCI function number
2511 *      @sz: size of function-specific area
2512 *
2513 *      Translate a physical EEPROM address to virtual.  The first 1K is
2514 *      accessed through virtual addresses starting at 31K, the rest is
2515 *      accessed through virtual addresses starting at 0.
2516 *
2517 *      The mapping is as follows:
2518 *      [0..1K) -> [31K..32K)
2519 *      [1K..1K+A) -> [31K-A..31K)
2520 *      [1K+A..ES) -> [0..ES-A-1K)
2521 *
2522 *      where A = @fn * @sz, and ES = EEPROM size.
2523 */
2524static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2525{
2526        fn *= sz;
2527        if (phys_addr < 1024)
2528                return phys_addr + (31 << 10);
2529        if (phys_addr < 1024 + fn)
2530                return 31744 - fn + phys_addr - 1024;
2531        if (phys_addr < EEPROMSIZE)
2532                return phys_addr - 1024 - fn;
2533        return -EINVAL;
2534}
2535
2536/*
2537 * The next two routines implement eeprom read/write from physical addresses.
2538 */
2539static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2540{
2541        int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2542
2543        if (vaddr >= 0)
2544                vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2545        return vaddr < 0 ? vaddr : 0;
2546}
2547
2548static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2549{
2550        int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2551
2552        if (vaddr >= 0)
2553                vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2554        return vaddr < 0 ? vaddr : 0;
2555}
2556
2557#define EEPROM_MAGIC 0x38E2F10C
2558
2559static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2560                      u8 *data)
2561{
2562        int i, err = 0;
2563        struct adapter *adapter = netdev2adap(dev);
2564
2565        u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2566        if (!buf)
2567                return -ENOMEM;
2568
2569        e->magic = EEPROM_MAGIC;
2570        for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2571                err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2572
2573        if (!err)
2574                memcpy(data, buf + e->offset, e->len);
2575        kfree(buf);
2576        return err;
2577}
2578
2579static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2580                      u8 *data)
2581{
2582        u8 *buf;
2583        int err = 0;
2584        u32 aligned_offset, aligned_len, *p;
2585        struct adapter *adapter = netdev2adap(dev);
2586
2587        if (eeprom->magic != EEPROM_MAGIC)
2588                return -EINVAL;
2589
2590        aligned_offset = eeprom->offset & ~3;
2591        aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2592
2593        if (adapter->fn > 0) {
2594                u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2595
2596                if (aligned_offset < start ||
2597                    aligned_offset + aligned_len > start + EEPROMPFSIZE)
2598                        return -EPERM;
2599        }
2600
2601        if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2602                /*
2603                 * RMW possibly needed for first or last words.
2604                 */
2605                buf = kmalloc(aligned_len, GFP_KERNEL);
2606                if (!buf)
2607                        return -ENOMEM;
2608                err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2609                if (!err && aligned_len > 4)
2610                        err = eeprom_rd_phys(adapter,
2611                                             aligned_offset + aligned_len - 4,
2612                                             (u32 *)&buf[aligned_len - 4]);
2613                if (err)
2614                        goto out;
2615                memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2616        } else
2617                buf = data;
2618
2619        err = t4_seeprom_wp(adapter, false);
2620        if (err)
2621                goto out;
2622
2623        for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2624                err = eeprom_wr_phys(adapter, aligned_offset, *p);
2625                aligned_offset += 4;
2626        }
2627
2628        if (!err)
2629                err = t4_seeprom_wp(adapter, true);
2630out:
2631        if (buf != data)
2632                kfree(buf);
2633        return err;
2634}
2635
2636static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2637{
2638        int ret;
2639        const struct firmware *fw;
2640        struct adapter *adap = netdev2adap(netdev);
2641
2642        ef->data[sizeof(ef->data) - 1] = '\0';
2643        ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2644        if (ret < 0)
2645                return ret;
2646
2647        ret = t4_load_fw(adap, fw->data, fw->size);
2648        release_firmware(fw);
2649        if (!ret)
2650                dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2651        return ret;
2652}
2653
2654#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2655#define BCAST_CRC 0xa0ccc1a6
2656
2657static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2658{
2659        wol->supported = WAKE_BCAST | WAKE_MAGIC;
2660        wol->wolopts = netdev2adap(dev)->wol;
2661        memset(&wol->sopass, 0, sizeof(wol->sopass));
2662}
2663
2664static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2665{
2666        int err = 0;
2667        struct port_info *pi = netdev_priv(dev);
2668
2669        if (wol->wolopts & ~WOL_SUPPORTED)
2670                return -EINVAL;
2671        t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2672                            (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2673        if (wol->wolopts & WAKE_BCAST) {
2674                err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2675                                        ~0ULL, 0, false);
2676                if (!err)
2677                        err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2678                                                ~6ULL, ~0ULL, BCAST_CRC, true);
2679        } else
2680                t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2681        return err;
2682}
2683
2684static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2685{
2686        const struct port_info *pi = netdev_priv(dev);
2687        netdev_features_t changed = dev->features ^ features;
2688        int err;
2689
2690        if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2691                return 0;
2692
2693        err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2694                            -1, -1, -1,
2695                            !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2696        if (unlikely(err))
2697                dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2698        return err;
2699}
2700
2701static u32 get_rss_table_size(struct net_device *dev)
2702{
2703        const struct port_info *pi = netdev_priv(dev);
2704
2705        return pi->rss_size;
2706}
2707
2708static int get_rss_table(struct net_device *dev, u32 *p)
2709{
2710        const struct port_info *pi = netdev_priv(dev);
2711        unsigned int n = pi->rss_size;
2712
2713        while (n--)
2714                p[n] = pi->rss[n];
2715        return 0;
2716}
2717
2718static int set_rss_table(struct net_device *dev, const u32 *p)
2719{
2720        unsigned int i;
2721        struct port_info *pi = netdev_priv(dev);
2722
2723        for (i = 0; i < pi->rss_size; i++)
2724                pi->rss[i] = p[i];
2725        if (pi->adapter->flags & FULL_INIT_DONE)
2726                return write_rss(pi, pi->rss);
2727        return 0;
2728}
2729
2730static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2731                     u32 *rules)
2732{
2733        const struct port_info *pi = netdev_priv(dev);
2734
2735        switch (info->cmd) {
2736        case ETHTOOL_GRXFH: {
2737                unsigned int v = pi->rss_mode;
2738
2739                info->data = 0;
2740                switch (info->flow_type) {
2741                case TCP_V4_FLOW:
2742                        if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2743                                info->data = RXH_IP_SRC | RXH_IP_DST |
2744                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
2745                        else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2746                                info->data = RXH_IP_SRC | RXH_IP_DST;
2747                        break;
2748                case UDP_V4_FLOW:
2749                        if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2750                            (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2751                                info->data = RXH_IP_SRC | RXH_IP_DST |
2752                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
2753                        else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2754                                info->data = RXH_IP_SRC | RXH_IP_DST;
2755                        break;
2756                case SCTP_V4_FLOW:
2757                case AH_ESP_V4_FLOW:
2758                case IPV4_FLOW:
2759                        if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2760                                info->data = RXH_IP_SRC | RXH_IP_DST;
2761                        break;
2762                case TCP_V6_FLOW:
2763                        if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2764                                info->data = RXH_IP_SRC | RXH_IP_DST |
2765                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
2766                        else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2767                                info->data = RXH_IP_SRC | RXH_IP_DST;
2768                        break;
2769                case UDP_V6_FLOW:
2770                        if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2771                            (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2772                                info->data = RXH_IP_SRC | RXH_IP_DST |
2773                                             RXH_L4_B_0_1 | RXH_L4_B_2_3;
2774                        else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2775                                info->data = RXH_IP_SRC | RXH_IP_DST;
2776                        break;
2777                case SCTP_V6_FLOW:
2778                case AH_ESP_V6_FLOW:
2779                case IPV6_FLOW:
2780                        if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2781                                info->data = RXH_IP_SRC | RXH_IP_DST;
2782                        break;
2783                }
2784                return 0;
2785        }
2786        case ETHTOOL_GRXRINGS:
2787                info->data = pi->nqsets;
2788                return 0;
2789        }
2790        return -EOPNOTSUPP;
2791}
2792
2793static const struct ethtool_ops cxgb_ethtool_ops = {
2794        .get_settings      = get_settings,
2795        .set_settings      = set_settings,
2796        .get_drvinfo       = get_drvinfo,
2797        .get_msglevel      = get_msglevel,
2798        .set_msglevel      = set_msglevel,
2799        .get_ringparam     = get_sge_param,
2800        .set_ringparam     = set_sge_param,
2801        .get_coalesce      = get_coalesce,
2802        .set_coalesce      = set_coalesce,
2803        .get_eeprom_len    = get_eeprom_len,
2804        .get_eeprom        = get_eeprom,
2805        .set_eeprom        = set_eeprom,
2806        .get_pauseparam    = get_pauseparam,
2807        .set_pauseparam    = set_pauseparam,
2808        .get_link          = ethtool_op_get_link,
2809        .get_strings       = get_strings,
2810        .set_phys_id       = identify_port,
2811        .nway_reset        = restart_autoneg,
2812        .get_sset_count    = get_sset_count,
2813        .get_ethtool_stats = get_stats,
2814        .get_regs_len      = get_regs_len,
2815        .get_regs          = get_regs,
2816        .get_wol           = get_wol,
2817        .set_wol           = set_wol,
2818        .get_rxnfc         = get_rxnfc,
2819        .get_rxfh_indir_size = get_rss_table_size,
2820        .get_rxfh_indir    = get_rss_table,
2821        .set_rxfh_indir    = set_rss_table,
2822        .flash_device      = set_flash,
2823};
2824
2825/*
2826 * debugfs support
2827 */
2828static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2829                        loff_t *ppos)
2830{
2831        loff_t pos = *ppos;
2832        loff_t avail = file_inode(file)->i_size;
2833        unsigned int mem = (uintptr_t)file->private_data & 3;
2834        struct adapter *adap = file->private_data - mem;
2835
2836        if (pos < 0)
2837                return -EINVAL;
2838        if (pos >= avail)
2839                return 0;
2840        if (count > avail - pos)
2841                count = avail - pos;
2842
2843        while (count) {
2844                size_t len;
2845                int ret, ofst;
2846                __be32 data[16];
2847
2848                if ((mem == MEM_MC) || (mem == MEM_MC1))
2849                        ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2850                else
2851                        ret = t4_edc_read(adap, mem, pos, data, NULL);
2852                if (ret)
2853                        return ret;
2854
2855                ofst = pos % sizeof(data);
2856                len = min(count, sizeof(data) - ofst);
2857                if (copy_to_user(buf, (u8 *)data + ofst, len))
2858                        return -EFAULT;
2859
2860                buf += len;
2861                pos += len;
2862                count -= len;
2863        }
2864        count = pos - *ppos;
2865        *ppos = pos;
2866        return count;
2867}
2868
2869static const struct file_operations mem_debugfs_fops = {
2870        .owner   = THIS_MODULE,
2871        .open    = simple_open,
2872        .read    = mem_read,
2873        .llseek  = default_llseek,
2874};
2875
2876static void add_debugfs_mem(struct adapter *adap, const char *name,
2877                            unsigned int idx, unsigned int size_mb)
2878{
2879        struct dentry *de;
2880
2881        de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2882                                 (void *)adap + idx, &mem_debugfs_fops);
2883        if (de && de->d_inode)
2884                de->d_inode->i_size = size_mb << 20;
2885}
2886
2887static int setup_debugfs(struct adapter *adap)
2888{
2889        int i;
2890        u32 size;
2891
2892        if (IS_ERR_OR_NULL(adap->debugfs_root))
2893                return -1;
2894
2895        i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2896        if (i & EDRAM0_ENABLE) {
2897                size = t4_read_reg(adap, MA_EDRAM0_BAR);
2898                add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2899        }
2900        if (i & EDRAM1_ENABLE) {
2901                size = t4_read_reg(adap, MA_EDRAM1_BAR);
2902                add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2903        }
2904        if (is_t4(adap->params.chip)) {
2905                size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2906                if (i & EXT_MEM_ENABLE)
2907                        add_debugfs_mem(adap, "mc", MEM_MC,
2908                                        EXT_MEM_SIZE_GET(size));
2909        } else {
2910                if (i & EXT_MEM_ENABLE) {
2911                        size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2912                        add_debugfs_mem(adap, "mc0", MEM_MC0,
2913                                        EXT_MEM_SIZE_GET(size));
2914                }
2915                if (i & EXT_MEM1_ENABLE) {
2916                        size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2917                        add_debugfs_mem(adap, "mc1", MEM_MC1,
2918                                        EXT_MEM_SIZE_GET(size));
2919                }
2920        }
2921        if (adap->l2t)
2922                debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2923                                    &t4_l2t_fops);
2924        return 0;
2925}
2926
2927/*
2928 * upper-layer driver support
2929 */
2930
2931/*
2932 * Allocate an active-open TID and set it to the supplied value.
2933 */
2934int cxgb4_alloc_atid(struct tid_info *t, void *data)
2935{
2936        int atid = -1;
2937
2938        spin_lock_bh(&t->atid_lock);
2939        if (t->afree) {
2940                union aopen_entry *p = t->afree;
2941
2942                atid = (p - t->atid_tab) + t->atid_base;
2943                t->afree = p->next;
2944                p->data = data;
2945                t->atids_in_use++;
2946        }
2947        spin_unlock_bh(&t->atid_lock);
2948        return atid;
2949}
2950EXPORT_SYMBOL(cxgb4_alloc_atid);
2951
2952/*
2953 * Release an active-open TID.
2954 */
2955void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2956{
2957        union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2958
2959        spin_lock_bh(&t->atid_lock);
2960        p->next = t->afree;
2961        t->afree = p;
2962        t->atids_in_use--;
2963        spin_unlock_bh(&t->atid_lock);
2964}
2965EXPORT_SYMBOL(cxgb4_free_atid);
2966
2967/*
2968 * Allocate a server TID and set it to the supplied value.
2969 */
2970int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2971{
2972        int stid;
2973
2974        spin_lock_bh(&t->stid_lock);
2975        if (family == PF_INET) {
2976                stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2977                if (stid < t->nstids)
2978                        __set_bit(stid, t->stid_bmap);
2979                else
2980                        stid = -1;
2981        } else {
2982                stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2983                if (stid < 0)
2984                        stid = -1;
2985        }
2986        if (stid >= 0) {
2987                t->stid_tab[stid].data = data;
2988                stid += t->stid_base;
2989                /* IPv6 requires max of 520 bits or 16 cells in TCAM
2990                 * This is equivalent to 4 TIDs. With CLIP enabled it
2991                 * needs 2 TIDs.
2992                 */
2993                if (family == PF_INET)
2994                        t->stids_in_use++;
2995                else
2996                        t->stids_in_use += 4;
2997        }
2998        spin_unlock_bh(&t->stid_lock);
2999        return stid;
3000}
3001EXPORT_SYMBOL(cxgb4_alloc_stid);
3002
3003/* Allocate a server filter TID and set it to the supplied value.
3004 */
3005int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3006{
3007        int stid;
3008
3009        spin_lock_bh(&t->stid_lock);
3010        if (family == PF_INET) {
3011                stid = find_next_zero_bit(t->stid_bmap,
3012                                t->nstids + t->nsftids, t->nstids);
3013                if (stid < (t->nstids + t->nsftids))
3014                        __set_bit(stid, t->stid_bmap);
3015                else
3016                        stid = -1;
3017        } else {
3018                stid = -1;
3019        }
3020        if (stid >= 0) {
3021                t->stid_tab[stid].data = data;
3022                stid -= t->nstids;
3023                stid += t->sftid_base;
3024                t->stids_in_use++;
3025        }
3026        spin_unlock_bh(&t->stid_lock);
3027        return stid;
3028}
3029EXPORT_SYMBOL(cxgb4_alloc_sftid);
3030
3031/* Release a server TID.
3032 */
3033void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3034{
3035        /* Is it a server filter TID? */
3036        if (t->nsftids && (stid >= t->sftid_base)) {
3037                stid -= t->sftid_base;
3038                stid += t->nstids;
3039        } else {
3040                stid -= t->stid_base;
3041        }
3042
3043        spin_lock_bh(&t->stid_lock);
3044        if (family == PF_INET)
3045                __clear_bit(stid, t->stid_bmap);
3046        else
3047                bitmap_release_region(t->stid_bmap, stid, 2);
3048        t->stid_tab[stid].data = NULL;
3049        if (family == PF_INET)
3050                t->stids_in_use--;
3051        else
3052                t->stids_in_use -= 4;
3053        spin_unlock_bh(&t->stid_lock);
3054}
3055EXPORT_SYMBOL(cxgb4_free_stid);
3056
3057/*
3058 * Populate a TID_RELEASE WR.  Caller must properly size the skb.
3059 */
3060static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3061                           unsigned int tid)
3062{
3063        struct cpl_tid_release *req;
3064
3065        set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3066        req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3067        INIT_TP_WR(req, tid);
3068        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3069}
3070
3071/*
3072 * Queue a TID release request and if necessary schedule a work queue to
3073 * process it.
3074 */
3075static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3076                                    unsigned int tid)
3077{
3078        void **p = &t->tid_tab[tid];
3079        struct adapter *adap = container_of(t, struct adapter, tids);
3080
3081        spin_lock_bh(&adap->tid_release_lock);
3082        *p = adap->tid_release_head;
3083        /* Low 2 bits encode the Tx channel number */
3084        adap->tid_release_head = (void **)((uintptr_t)p | chan);
3085        if (!adap->tid_release_task_busy) {
3086                adap->tid_release_task_busy = true;
3087                queue_work(workq, &adap->tid_release_task);
3088        }
3089        spin_unlock_bh(&adap->tid_release_lock);
3090}
3091
3092/*
3093 * Process the list of pending TID release requests.
3094 */
3095static void process_tid_release_list(struct work_struct *work)
3096{
3097        struct sk_buff *skb;
3098        struct adapter *adap;
3099
3100        adap = container_of(work, struct adapter, tid_release_task);
3101
3102        spin_lock_bh(&adap->tid_release_lock);
3103        while (adap->tid_release_head) {
3104                void **p = adap->tid_release_head;
3105                unsigned int chan = (uintptr_t)p & 3;
3106                p = (void *)p - chan;
3107
3108                adap->tid_release_head = *p;
3109                *p = NULL;
3110                spin_unlock_bh(&adap->tid_release_lock);
3111
3112                while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3113                                         GFP_KERNEL)))
3114                        schedule_timeout_uninterruptible(1);
3115
3116                mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3117                t4_ofld_send(adap, skb);
3118                spin_lock_bh(&adap->tid_release_lock);
3119        }
3120        adap->tid_release_task_busy = false;
3121        spin_unlock_bh(&adap->tid_release_lock);
3122}
3123
3124/*
3125 * Release a TID and inform HW.  If we are unable to allocate the release
3126 * message we defer to a work queue.
3127 */
3128void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3129{
3130        void *old;
3131        struct sk_buff *skb;
3132        struct adapter *adap = container_of(t, struct adapter, tids);
3133
3134        old = t->tid_tab[tid];
3135        skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3136        if (likely(skb)) {
3137                t->tid_tab[tid] = NULL;
3138                mk_tid_release(skb, chan, tid);
3139                t4_ofld_send(adap, skb);
3140        } else
3141                cxgb4_queue_tid_release(t, chan, tid);
3142        if (old)
3143                atomic_dec(&t->tids_in_use);
3144}
3145EXPORT_SYMBOL(cxgb4_remove_tid);
3146
3147/*
3148 * Allocate and initialize the TID tables.  Returns 0 on success.
3149 */
3150static int tid_init(struct tid_info *t)
3151{
3152        size_t size;
3153        unsigned int stid_bmap_size;
3154        unsigned int natids = t->natids;
3155        struct adapter *adap = container_of(t, struct adapter, tids);
3156
3157        stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3158        size = t->ntids * sizeof(*t->tid_tab) +
3159               natids * sizeof(*t->atid_tab) +
3160               t->nstids * sizeof(*t->stid_tab) +
3161               t->nsftids * sizeof(*t->stid_tab) +
3162               stid_bmap_size * sizeof(long) +
3163               t->nftids * sizeof(*t->ftid_tab) +
3164               t->nsftids * sizeof(*t->ftid_tab);
3165
3166        t->tid_tab = t4_alloc_mem(size);
3167        if (!t->tid_tab)
3168                return -ENOMEM;
3169
3170        t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3171        t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3172        t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3173        t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3174        spin_lock_init(&t->stid_lock);
3175        spin_lock_init(&t->atid_lock);
3176
3177        t->stids_in_use = 0;
3178        t->afree = NULL;
3179        t->atids_in_use = 0;
3180        atomic_set(&t->tids_in_use, 0);
3181
3182        /* Setup the free list for atid_tab and clear the stid bitmap. */
3183        if (natids) {
3184                while (--natids)
3185                        t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3186                t->afree = t->atid_tab;
3187        }
3188        bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3189        /* Reserve stid 0 for T4/T5 adapters */
3190        if (!t->stid_base &&
3191            (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3192                __set_bit(0, t->stid_bmap);
3193
3194        return 0;
3195}
3196
3197static int cxgb4_clip_get(const struct net_device *dev,
3198                          const struct in6_addr *lip)
3199{
3200        struct adapter *adap;
3201        struct fw_clip_cmd c;
3202
3203        adap = netdev2adap(dev);
3204        memset(&c, 0, sizeof(c));
3205        c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3206                        FW_CMD_REQUEST | FW_CMD_WRITE);
3207        c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3208        *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3209        *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3210        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3211}
3212
3213static int cxgb4_clip_release(const struct net_device *dev,
3214                              const struct in6_addr *lip)
3215{
3216        struct adapter *adap;
3217        struct fw_clip_cmd c;
3218
3219        adap = netdev2adap(dev);
3220        memset(&c, 0, sizeof(c));
3221        c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3222                        FW_CMD_REQUEST | FW_CMD_READ);
3223        c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3224        *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3225        *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3226        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3227}
3228
3229/**
3230 *      cxgb4_create_server - create an IP server
3231 *      @dev: the device
3232 *      @stid: the server TID
3233 *      @sip: local IP address to bind server to
3234 *      @sport: the server's TCP port
3235 *      @queue: queue to direct messages from this server to
3236 *
3237 *      Create an IP server for the given port and address.
3238 *      Returns <0 on error and one of the %NET_XMIT_* values on success.
3239 */
3240int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3241                        __be32 sip, __be16 sport, __be16 vlan,
3242                        unsigned int queue)
3243{
3244        unsigned int chan;
3245        struct sk_buff *skb;
3246        struct adapter *adap;
3247        struct cpl_pass_open_req *req;
3248        int ret;
3249
3250        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3251        if (!skb)
3252                return -ENOMEM;
3253
3254        adap = netdev2adap(dev);
3255        req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3256        INIT_TP_WR(req, 0);
3257        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3258        req->local_port = sport;
3259        req->peer_port = htons(0);
3260        req->local_ip = sip;
3261        req->peer_ip = htonl(0);
3262        chan = rxq_to_chan(&adap->sge, queue);
3263        req->opt0 = cpu_to_be64(TX_CHAN(chan));
3264        req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3265                                SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3266        ret = t4_mgmt_tx(adap, skb);
3267        return net_xmit_eval(ret);
3268}
3269EXPORT_SYMBOL(cxgb4_create_server);
3270
3271/*      cxgb4_create_server6 - create an IPv6 server
3272 *      @dev: the device
3273 *      @stid: the server TID
3274 *      @sip: local IPv6 address to bind server to
3275 *      @sport: the server's TCP port
3276 *      @queue: queue to direct messages from this server to
3277 *
3278 *      Create an IPv6 server for the given port and address.
3279 *      Returns <0 on error and one of the %NET_XMIT_* values on success.
3280 */
3281int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3282                         const struct in6_addr *sip, __be16 sport,
3283                         unsigned int queue)
3284{
3285        unsigned int chan;
3286        struct sk_buff *skb;
3287        struct adapter *adap;
3288        struct cpl_pass_open_req6 *req;
3289        int ret;
3290
3291        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3292        if (!skb)
3293                return -ENOMEM;
3294
3295        adap = netdev2adap(dev);
3296        req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3297        INIT_TP_WR(req, 0);
3298        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3299        req->local_port = sport;
3300        req->peer_port = htons(0);
3301        req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3302        req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3303        req->peer_ip_hi = cpu_to_be64(0);
3304        req->peer_ip_lo = cpu_to_be64(0);
3305        chan = rxq_to_chan(&adap->sge, queue);
3306        req->opt0 = cpu_to_be64(TX_CHAN(chan));
3307        req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3308                                SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3309        ret = t4_mgmt_tx(adap, skb);
3310        return net_xmit_eval(ret);
3311}
3312EXPORT_SYMBOL(cxgb4_create_server6);
3313
3314int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3315                        unsigned int queue, bool ipv6)
3316{
3317        struct sk_buff *skb;
3318        struct adapter *adap;
3319        struct cpl_close_listsvr_req *req;
3320        int ret;
3321
3322        adap = netdev2adap(dev);
3323
3324        skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3325        if (!skb)
3326                return -ENOMEM;
3327
3328        req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3329        INIT_TP_WR(req, 0);
3330        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3331        req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3332                                LISTSVR_IPV6(0)) | QUEUENO(queue));
3333        ret = t4_mgmt_tx(adap, skb);
3334        return net_xmit_eval(ret);
3335}
3336EXPORT_SYMBOL(cxgb4_remove_server);
3337
3338/**
3339 *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3340 *      @mtus: the HW MTU table
3341 *      @mtu: the target MTU
3342 *      @idx: index of selected entry in the MTU table
3343 *
3344 *      Returns the index and the value in the HW MTU table that is closest to
3345 *      but does not exceed @mtu, unless @mtu is smaller than any value in the
3346 *      table, in which case that smallest available value is selected.
3347 */
3348unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3349                            unsigned int *idx)
3350{
3351        unsigned int i = 0;
3352
3353        while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3354                ++i;
3355        if (idx)
3356                *idx = i;
3357        return mtus[i];
3358}
3359EXPORT_SYMBOL(cxgb4_best_mtu);
3360
3361/**
3362 *      cxgb4_port_chan - get the HW channel of a port
3363 *      @dev: the net device for the port
3364 *
3365 *      Return the HW Tx channel of the given port.
3366 */
3367unsigned int cxgb4_port_chan(const struct net_device *dev)
3368{
3369        return netdev2pinfo(dev)->tx_chan;
3370}
3371EXPORT_SYMBOL(cxgb4_port_chan);
3372
3373unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3374{
3375        struct adapter *adap = netdev2adap(dev);
3376        u32 v1, v2, lp_count, hp_count;
3377
3378        v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3379        v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3380        if (is_t4(adap->params.chip)) {
3381                lp_count = G_LP_COUNT(v1);
3382                hp_count = G_HP_COUNT(v1);
3383        } else {
3384                lp_count = G_LP_COUNT_T5(v1);
3385                hp_count = G_HP_COUNT_T5(v2);
3386        }
3387        return lpfifo ? lp_count : hp_count;
3388}
3389EXPORT_SYMBOL(cxgb4_dbfifo_count);
3390
3391/**
3392 *      cxgb4_port_viid - get the VI id of a port
3393 *      @dev: the net device for the port
3394 *
3395 *      Return the VI id of the given port.
3396 */
3397unsigned int cxgb4_port_viid(const struct net_device *dev)
3398{
3399        return netdev2pinfo(dev)->viid;
3400}
3401EXPORT_SYMBOL(cxgb4_port_viid);
3402
3403/**
3404 *      cxgb4_port_idx - get the index of a port
3405 *      @dev: the net device for the port
3406 *
3407 *      Return the index of the given port.
3408 */
3409unsigned int cxgb4_port_idx(const struct net_device *dev)
3410{
3411        return netdev2pinfo(dev)->port_id;
3412}
3413EXPORT_SYMBOL(cxgb4_port_idx);
3414
3415void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3416                         struct tp_tcp_stats *v6)
3417{
3418        struct adapter *adap = pci_get_drvdata(pdev);
3419
3420        spin_lock(&adap->stats_lock);
3421        t4_tp_get_tcp_stats(adap, v4, v6);
3422        spin_unlock(&adap->stats_lock);
3423}
3424EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3425
3426void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3427                      const unsigned int *pgsz_order)
3428{
3429        struct adapter *adap = netdev2adap(dev);
3430
3431        t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3432        t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3433                     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3434                     HPZ3(pgsz_order[3]));
3435}
3436EXPORT_SYMBOL(cxgb4_iscsi_init);
3437
3438int cxgb4_flush_eq_cache(struct net_device *dev)
3439{
3440        struct adapter *adap = netdev2adap(dev);
3441        int ret;
3442
3443        ret = t4_fwaddrspace_write(adap, adap->mbox,
3444                                   0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3445        return ret;
3446}
3447EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3448
3449static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3450{
3451        u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3452        __be64 indices;
3453        int ret;
3454
3455        ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3456        if (!ret) {
3457                *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3458                *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3459        }
3460        return ret;
3461}
3462
3463int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3464                        u16 size)
3465{
3466        struct adapter *adap = netdev2adap(dev);
3467        u16 hw_pidx, hw_cidx;
3468        int ret;
3469
3470        ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3471        if (ret)
3472                goto out;
3473
3474        if (pidx != hw_pidx) {
3475                u16 delta;
3476
3477                if (pidx >= hw_pidx)
3478                        delta = pidx - hw_pidx;
3479                else
3480                        delta = size - hw_pidx + pidx;
3481                wmb();
3482                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3483                             QID(qid) | PIDX(delta));
3484        }
3485out:
3486        return ret;
3487}
3488EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3489
3490void cxgb4_disable_db_coalescing(struct net_device *dev)
3491{
3492        struct adapter *adap;
3493
3494        adap = netdev2adap(dev);
3495        t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3496                         F_NOCOALESCE);
3497}
3498EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3499
3500void cxgb4_enable_db_coalescing(struct net_device *dev)
3501{
3502        struct adapter *adap;
3503
3504        adap = netdev2adap(dev);
3505        t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3506}
3507EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3508
3509static struct pci_driver cxgb4_driver;
3510
3511static void check_neigh_update(struct neighbour *neigh)
3512{
3513        const struct device *parent;
3514        const struct net_device *netdev = neigh->dev;
3515
3516        if (netdev->priv_flags & IFF_802_1Q_VLAN)
3517                netdev = vlan_dev_real_dev(netdev);
3518        parent = netdev->dev.parent;
3519        if (parent && parent->driver == &cxgb4_driver.driver)
3520                t4_l2t_update(dev_get_drvdata(parent), neigh);
3521}
3522
3523static int netevent_cb(struct notifier_block *nb, unsigned long event,
3524                       void *data)
3525{
3526        switch (event) {
3527        case NETEVENT_NEIGH_UPDATE:
3528                check_neigh_update(data);
3529                break;
3530        case NETEVENT_REDIRECT:
3531        default:
3532                break;
3533        }
3534        return 0;
3535}
3536
3537static bool netevent_registered;
3538static struct notifier_block cxgb4_netevent_nb = {
3539        .notifier_call = netevent_cb
3540};
3541
3542static void drain_db_fifo(struct adapter *adap, int usecs)
3543{
3544        u32 v1, v2, lp_count, hp_count;
3545
3546        do {
3547                v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3548                v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3549                if (is_t4(adap->params.chip)) {
3550                        lp_count = G_LP_COUNT(v1);
3551                        hp_count = G_HP_COUNT(v1);
3552                } else {
3553                        lp_count = G_LP_COUNT_T5(v1);
3554                        hp_count = G_HP_COUNT_T5(v2);
3555                }
3556
3557                if (lp_count == 0 && hp_count == 0)
3558                        break;
3559                set_current_state(TASK_UNINTERRUPTIBLE);
3560                schedule_timeout(usecs_to_jiffies(usecs));
3561        } while (1);
3562}
3563
3564static void disable_txq_db(struct sge_txq *q)
3565{
3566        spin_lock_irq(&q->db_lock);
3567        q->db_disabled = 1;
3568        spin_unlock_irq(&q->db_lock);
3569}
3570
3571static void enable_txq_db(struct sge_txq *q)
3572{
3573        spin_lock_irq(&q->db_lock);
3574        q->db_disabled = 0;
3575        spin_unlock_irq(&q->db_lock);
3576}
3577
3578static void disable_dbs(struct adapter *adap)
3579{
3580        int i;
3581
3582        for_each_ethrxq(&adap->sge, i)
3583                disable_txq_db(&adap->sge.ethtxq[i].q);
3584        for_each_ofldrxq(&adap->sge, i)
3585                disable_txq_db(&adap->sge.ofldtxq[i].q);
3586        for_each_port(adap, i)
3587                disable_txq_db(&adap->sge.ctrlq[i].q);
3588}
3589
3590static void enable_dbs(struct adapter *adap)
3591{
3592        int i;
3593
3594        for_each_ethrxq(&adap->sge, i)
3595                enable_txq_db(&adap->sge.ethtxq[i].q);
3596        for_each_ofldrxq(&adap->sge, i)
3597                enable_txq_db(&adap->sge.ofldtxq[i].q);
3598        for_each_port(adap, i)
3599                enable_txq_db(&adap->sge.ctrlq[i].q);
3600}
3601
3602static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3603{
3604        u16 hw_pidx, hw_cidx;
3605        int ret;
3606
3607        spin_lock_bh(&q->db_lock);
3608        ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3609        if (ret)
3610                goto out;
3611        if (q->db_pidx != hw_pidx) {
3612                u16 delta;
3613
3614                if (q->db_pidx >= hw_pidx)
3615                        delta = q->db_pidx - hw_pidx;
3616                else
3617                        delta = q->size - hw_pidx + q->db_pidx;
3618                wmb();
3619                t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3620                             QID(q->cntxt_id) | PIDX(delta));
3621        }
3622out:
3623        q->db_disabled = 0;
3624        spin_unlock_bh(&q->db_lock);
3625        if (ret)
3626                CH_WARN(adap, "DB drop recovery failed.\n");
3627}
3628static void recover_all_queues(struct adapter *adap)
3629{
3630        int i;
3631
3632        for_each_ethrxq(&adap->sge, i)
3633                sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3634        for_each_ofldrxq(&adap->sge, i)
3635                sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3636        for_each_port(adap, i)
3637                sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3638}
3639
3640static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3641{
3642        mutex_lock(&uld_mutex);
3643        if (adap->uld_handle[CXGB4_ULD_RDMA])
3644                ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3645                                cmd);
3646        mutex_unlock(&uld_mutex);
3647}
3648
3649static void process_db_full(struct work_struct *work)
3650{
3651        struct adapter *adap;
3652
3653        adap = container_of(work, struct adapter, db_full_task);
3654
3655        notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3656        drain_db_fifo(adap, dbfifo_drain_delay);
3657        t4_set_reg_field(adap, SGE_INT_ENABLE3,
3658                         DBFIFO_HP_INT | DBFIFO_LP_INT,
3659                         DBFIFO_HP_INT | DBFIFO_LP_INT);
3660        notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3661}
3662
3663static void process_db_drop(struct work_struct *work)
3664{
3665        struct adapter *adap;
3666
3667        adap = container_of(work, struct adapter, db_drop_task);
3668
3669        if (is_t4(adap->params.chip)) {
3670                disable_dbs(adap);
3671                notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3672                drain_db_fifo(adap, 1);
3673                recover_all_queues(adap);
3674                enable_dbs(adap);
3675        } else {
3676                u32 dropped_db = t4_read_reg(adap, 0x010ac);
3677                u16 qid = (dropped_db >> 15) & 0x1ffff;
3678                u16 pidx_inc = dropped_db & 0x1fff;
3679                unsigned int s_qpp;
3680                unsigned short udb_density;
3681                unsigned long qpshift;
3682                int page;
3683                u32 udb;
3684
3685                dev_warn(adap->pdev_dev,
3686                         "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3687                         dropped_db, qid,
3688                         (dropped_db >> 14) & 1,
3689                         (dropped_db >> 13) & 1,
3690                         pidx_inc);
3691
3692                drain_db_fifo(adap, 1);
3693
3694                s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3695                udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3696                                SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3697                qpshift = PAGE_SHIFT - ilog2(udb_density);
3698                udb = qid << qpshift;
3699                udb &= PAGE_MASK;
3700                page = udb / PAGE_SIZE;
3701                udb += (qid - (page * udb_density)) * 128;
3702
3703                writel(PIDX(pidx_inc),  adap->bar2 + udb + 8);
3704
3705                /* Re-enable BAR2 WC */
3706                t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3707        }
3708
3709        t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3710}
3711
3712void t4_db_full(struct adapter *adap)
3713{
3714        if (is_t4(adap->params.chip)) {
3715                t4_set_reg_field(adap, SGE_INT_ENABLE3,
3716                                 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3717                queue_work(workq, &adap->db_full_task);
3718        }
3719}
3720
3721void t4_db_dropped(struct adapter *adap)
3722{
3723        if (is_t4(adap->params.chip))
3724                queue_work(workq, &adap->db_drop_task);
3725}
3726
3727static void uld_attach(struct adapter *adap, unsigned int uld)
3728{
3729        void *handle;
3730        struct cxgb4_lld_info lli;
3731        unsigned short i;
3732
3733        lli.pdev = adap->pdev;
3734        lli.l2t = adap->l2t;
3735        lli.tids = &adap->tids;
3736        lli.ports = adap->port;
3737        lli.vr = &adap->vres;
3738        lli.mtus = adap->params.mtus;
3739        if (uld == CXGB4_ULD_RDMA) {
3740                lli.rxq_ids = adap->sge.rdma_rxq;
3741                lli.nrxq = adap->sge.rdmaqs;
3742        } else if (uld == CXGB4_ULD_ISCSI) {
3743                lli.rxq_ids = adap->sge.ofld_rxq;
3744                lli.nrxq = adap->sge.ofldqsets;
3745        }
3746        lli.ntxq = adap->sge.ofldqsets;
3747        lli.nchan = adap->params.nports;
3748        lli.nports = adap->params.nports;
3749        lli.wr_cred = adap->params.ofldq_wr_cred;
3750        lli.adapter_type = adap->params.chip;
3751        lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3752        lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3753                        t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3754                        (adap->fn * 4));
3755        lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3756                        t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3757                        (adap->fn * 4));
3758        lli.filt_mode = adap->params.tp.vlan_pri_map;
3759        /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3760        for (i = 0; i < NCHAN; i++)
3761                lli.tx_modq[i] = i;
3762        lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3763        lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3764        lli.fw_vers = adap->params.fw_vers;
3765        lli.dbfifo_int_thresh = dbfifo_int_thresh;
3766        lli.sge_pktshift = adap->sge.pktshift;
3767        lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3768
3769        handle = ulds[uld].add(&lli);
3770        if (IS_ERR(handle)) {
3771                dev_warn(adap->pdev_dev,
3772                         "could not attach to the %s driver, error %ld\n",
3773                         uld_str[uld], PTR_ERR(handle));
3774                return;
3775        }
3776
3777        adap->uld_handle[uld] = handle;
3778
3779        if (!netevent_registered) {
3780                register_netevent_notifier(&cxgb4_netevent_nb);
3781                netevent_registered = true;
3782        }
3783
3784        if (adap->flags & FULL_INIT_DONE)
3785                ulds[uld].state_change(handle, CXGB4_STATE_UP);
3786}
3787
3788static void attach_ulds(struct adapter *adap)
3789{
3790        unsigned int i;
3791
3792        spin_lock(&adap_rcu_lock);
3793        list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3794        spin_unlock(&adap_rcu_lock);
3795
3796        mutex_lock(&uld_mutex);
3797        list_add_tail(&adap->list_node, &adapter_list);
3798        for (i = 0; i < CXGB4_ULD_MAX; i++)
3799                if (ulds[i].add)
3800                        uld_attach(adap, i);
3801        mutex_unlock(&uld_mutex);
3802}
3803
3804static void detach_ulds(struct adapter *adap)
3805{
3806        unsigned int i;
3807
3808        mutex_lock(&uld_mutex);
3809        list_del(&adap->list_node);
3810        for (i = 0; i < CXGB4_ULD_MAX; i++)
3811                if (adap->uld_handle[i]) {
3812                        ulds[i].state_change(adap->uld_handle[i],
3813                                             CXGB4_STATE_DETACH);
3814                        adap->uld_handle[i] = NULL;
3815                }
3816        if (netevent_registered && list_empty(&adapter_list)) {
3817                unregister_netevent_notifier(&cxgb4_netevent_nb);
3818                netevent_registered = false;
3819        }
3820        mutex_unlock(&uld_mutex);
3821
3822        spin_lock(&adap_rcu_lock);
3823        list_del_rcu(&adap->rcu_node);
3824        spin_unlock(&adap_rcu_lock);
3825}
3826
3827static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3828{
3829        unsigned int i;
3830
3831        mutex_lock(&uld_mutex);
3832        for (i = 0; i < CXGB4_ULD_MAX; i++)
3833                if (adap->uld_handle[i])
3834                        ulds[i].state_change(adap->uld_handle[i], new_state);
3835        mutex_unlock(&uld_mutex);
3836}
3837
3838/**
3839 *      cxgb4_register_uld - register an upper-layer driver
3840 *      @type: the ULD type
3841 *      @p: the ULD methods
3842 *
3843 *      Registers an upper-layer driver with this driver and notifies the ULD
3844 *      about any presently available devices that support its type.  Returns
3845 *      %-EBUSY if a ULD of the same type is already registered.
3846 */
3847int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3848{
3849        int ret = 0;
3850        struct adapter *adap;
3851
3852        if (type >= CXGB4_ULD_MAX)
3853                return -EINVAL;
3854        mutex_lock(&uld_mutex);
3855        if (ulds[type].add) {
3856                ret = -EBUSY;
3857                goto out;
3858        }
3859        ulds[type] = *p;
3860        list_for_each_entry(adap, &adapter_list, list_node)
3861                uld_attach(adap, type);
3862out:    mutex_unlock(&uld_mutex);
3863        return ret;
3864}
3865EXPORT_SYMBOL(cxgb4_register_uld);
3866
3867/**
3868 *      cxgb4_unregister_uld - unregister an upper-layer driver
3869 *      @type: the ULD type
3870 *
3871 *      Unregisters an existing upper-layer driver.
3872 */
3873int cxgb4_unregister_uld(enum cxgb4_uld type)
3874{
3875        struct adapter *adap;
3876
3877        if (type >= CXGB4_ULD_MAX)
3878                return -EINVAL;
3879        mutex_lock(&uld_mutex);
3880        list_for_each_entry(adap, &adapter_list, list_node)
3881                adap->uld_handle[type] = NULL;
3882        ulds[type].add = NULL;
3883        mutex_unlock(&uld_mutex);
3884        return 0;
3885}
3886EXPORT_SYMBOL(cxgb4_unregister_uld);
3887
3888/* Check if netdev on which event is occured belongs to us or not. Return
3889 * suceess (1) if it belongs otherwise failure (0).
3890 */
3891static int cxgb4_netdev(struct net_device *netdev)
3892{
3893        struct adapter *adap;
3894        int i;
3895
3896        spin_lock(&adap_rcu_lock);
3897        list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3898                for (i = 0; i < MAX_NPORTS; i++)
3899                        if (adap->port[i] == netdev) {
3900                                spin_unlock(&adap_rcu_lock);
3901                                return 1;
3902                        }
3903        spin_unlock(&adap_rcu_lock);
3904        return 0;
3905}
3906
3907static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3908                    unsigned long event)
3909{
3910        int ret = NOTIFY_DONE;
3911
3912        rcu_read_lock();
3913        if (cxgb4_netdev(event_dev)) {
3914                switch (event) {
3915                case NETDEV_UP:
3916                        ret = cxgb4_clip_get(event_dev,
3917                                (const struct in6_addr *)ifa->addr.s6_addr);
3918                        if (ret < 0) {
3919                                rcu_read_unlock();
3920                                return ret;
3921                        }
3922                        ret = NOTIFY_OK;
3923                        break;
3924                case NETDEV_DOWN:
3925                        cxgb4_clip_release(event_dev,
3926                                (const struct in6_addr *)ifa->addr.s6_addr);
3927                        ret = NOTIFY_OK;
3928                        break;
3929                default:
3930                        break;
3931                }
3932        }
3933        rcu_read_unlock();
3934        return ret;
3935}
3936
3937static int cxgb4_inet6addr_handler(struct notifier_block *this,
3938                unsigned long event, void *data)
3939{
3940        struct inet6_ifaddr *ifa = data;
3941        struct net_device *event_dev;
3942        int ret = NOTIFY_DONE;
3943        struct bonding *bond = netdev_priv(ifa->idev->dev);
3944        struct list_head *iter;
3945        struct slave *slave;
3946        struct pci_dev *first_pdev = NULL;
3947
3948        if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
3949                event_dev = vlan_dev_real_dev(ifa->idev->dev);
3950                ret = clip_add(event_dev, ifa, event);
3951        } else if (ifa->idev->dev->flags & IFF_MASTER) {
3952                /* It is possible that two different adapters are bonded in one
3953                 * bond. We need to find such different adapters and add clip
3954                 * in all of them only once.
3955                 */
3956                read_lock(&bond->lock);
3957                bond_for_each_slave(bond, slave, iter) {
3958                        if (!first_pdev) {
3959                                ret = clip_add(slave->dev, ifa, event);
3960                                /* If clip_add is success then only initialize
3961                                 * first_pdev since it means it is our device
3962                                 */
3963                                if (ret == NOTIFY_OK)
3964                                        first_pdev = to_pci_dev(
3965                                                        slave->dev->dev.parent);
3966                        } else if (first_pdev !=
3967                                   to_pci_dev(slave->dev->dev.parent))
3968                                        ret = clip_add(slave->dev, ifa, event);
3969                }
3970                read_unlock(&bond->lock);
3971        } else
3972                ret = clip_add(ifa->idev->dev, ifa, event);
3973
3974        return ret;
3975}
3976
3977static struct notifier_block cxgb4_inet6addr_notifier = {
3978        .notifier_call = cxgb4_inet6addr_handler
3979};
3980
3981/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
3982 * a physical device.
3983 * The physical device reference is needed to send the actul CLIP command.
3984 */
3985static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
3986{
3987        struct inet6_dev *idev = NULL;
3988        struct inet6_ifaddr *ifa;
3989        int ret = 0;
3990
3991        idev = __in6_dev_get(root_dev);
3992        if (!idev)
3993                return ret;
3994
3995        read_lock_bh(&idev->lock);
3996        list_for_each_entry(ifa, &idev->addr_list, if_list) {
3997                ret = cxgb4_clip_get(dev,
3998                                (const struct in6_addr *)ifa->addr.s6_addr);
3999                if (ret < 0)
4000                        break;
4001        }
4002        read_unlock_bh(&idev->lock);
4003
4004        return ret;
4005}
4006
4007static int update_root_dev_clip(struct net_device *dev)
4008{
4009        struct net_device *root_dev = NULL;
4010        int i, ret = 0;
4011
4012        /* First populate the real net device's IPv6 addresses */
4013        ret = update_dev_clip(dev, dev);
4014        if (ret)
4015                return ret;
4016
4017        /* Parse all bond and vlan devices layered on top of the physical dev */
4018        for (i = 0; i < VLAN_N_VID; i++) {
4019                root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
4020                if (!root_dev)
4021                        continue;
4022
4023                ret = update_dev_clip(root_dev, dev);
4024                if (ret)
4025                        break;
4026        }
4027        return ret;
4028}
4029
4030static void update_clip(const struct adapter *adap)
4031{
4032        int i;
4033        struct net_device *dev;
4034        int ret;
4035
4036        rcu_read_lock();
4037
4038        for (i = 0; i < MAX_NPORTS; i++) {
4039                dev = adap->port[i];
4040                ret = 0;
4041
4042                if (dev)
4043                        ret = update_root_dev_clip(dev);
4044
4045                if (ret < 0)
4046                        break;
4047        }
4048        rcu_read_unlock();
4049}
4050
4051/**
4052 *      cxgb_up - enable the adapter
4053 *      @adap: adapter being enabled
4054 *
4055 *      Called when the first port is enabled, this function performs the
4056 *      actions necessary to make an adapter operational, such as completing
4057 *      the initialization of HW modules, and enabling interrupts.
4058 *
4059 *      Must be called with the rtnl lock held.
4060 */
4061static int cxgb_up(struct adapter *adap)
4062{
4063        int err;
4064
4065        err = setup_sge_queues(adap);
4066        if (err)
4067                goto out;
4068        err = setup_rss(adap);
4069        if (err)
4070                goto freeq;
4071
4072        if (adap->flags & USING_MSIX) {
4073                name_msix_vecs(adap);
4074                err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4075                                  adap->msix_info[0].desc, adap);
4076                if (err)
4077                        goto irq_err;
4078
4079                err = request_msix_queue_irqs(adap);
4080                if (err) {
4081                        free_irq(adap->msix_info[0].vec, adap);
4082                        goto irq_err;
4083                }
4084        } else {
4085                err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4086                                  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4087                                  adap->port[0]->name, adap);
4088                if (err)
4089                        goto irq_err;
4090        }
4091        enable_rx(adap);
4092        t4_sge_start(adap);
4093        t4_intr_enable(adap);
4094        adap->flags |= FULL_INIT_DONE;
4095        notify_ulds(adap, CXGB4_STATE_UP);
4096        update_clip(adap);
4097 out:
4098        return err;
4099 irq_err:
4100        dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4101 freeq:
4102        t4_free_sge_resources(adap);
4103        goto out;
4104}
4105
4106static void cxgb_down(struct adapter *adapter)
4107{
4108        t4_intr_disable(adapter);
4109        cancel_work_sync(&adapter->tid_release_task);
4110        cancel_work_sync(&adapter->db_full_task);
4111        cancel_work_sync(&adapter->db_drop_task);
4112        adapter->tid_release_task_busy = false;
4113        adapter->tid_release_head = NULL;
4114
4115        if (adapter->flags & USING_MSIX) {
4116                free_msix_queue_irqs(adapter);
4117                free_irq(adapter->msix_info[0].vec, adapter);
4118        } else
4119                free_irq(adapter->pdev->irq, adapter);
4120        quiesce_rx(adapter);
4121        t4_sge_stop(adapter);
4122        t4_free_sge_resources(adapter);
4123        adapter->flags &= ~FULL_INIT_DONE;
4124}
4125
4126/*
4127 * net_device operations
4128 */
4129static int cxgb_open(struct net_device *dev)
4130{
4131        int err;
4132        struct port_info *pi = netdev_priv(dev);
4133        struct adapter *adapter = pi->adapter;
4134
4135        netif_carrier_off(dev);
4136
4137        if (!(adapter->flags & FULL_INIT_DONE)) {
4138                err = cxgb_up(adapter);
4139                if (err < 0)
4140                        return err;
4141        }
4142
4143        err = link_start(dev);
4144        if (!err)
4145                netif_tx_start_all_queues(dev);
4146        return err;
4147}
4148
4149static int cxgb_close(struct net_device *dev)
4150{
4151        struct port_info *pi = netdev_priv(dev);
4152        struct adapter *adapter = pi->adapter;
4153
4154        netif_tx_stop_all_queues(dev);
4155        netif_carrier_off(dev);
4156        return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4157}
4158
4159/* Return an error number if the indicated filter isn't writable ...
4160 */
4161static int writable_filter(struct filter_entry *f)
4162{
4163        if (f->locked)
4164                return -EPERM;
4165        if (f->pending)
4166                return -EBUSY;
4167
4168        return 0;
4169}
4170
4171/* Delete the filter at the specified index (if valid).  The checks for all
4172 * the common problems with doing this like the filter being locked, currently
4173 * pending in another operation, etc.
4174 */
4175static int delete_filter(struct adapter *adapter, unsigned int fidx)
4176{
4177        struct filter_entry *f;
4178        int ret;
4179
4180        if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4181                return -EINVAL;
4182
4183        f = &adapter->tids.ftid_tab[fidx];
4184        ret = writable_filter(f);
4185        if (ret)
4186                return ret;
4187        if (f->valid)
4188                return del_filter_wr(adapter, fidx);
4189
4190        return 0;
4191}
4192
4193int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4194                __be32 sip, __be16 sport, __be16 vlan,
4195                unsigned int queue, unsigned char port, unsigned char mask)
4196{
4197        int ret;
4198        struct filter_entry *f;
4199        struct adapter *adap;
4200        int i;
4201        u8 *val;
4202
4203        adap = netdev2adap(dev);
4204
4205        /* Adjust stid to correct filter index */
4206        stid -= adap->tids.sftid_base;
4207        stid += adap->tids.nftids;
4208
4209        /* Check to make sure the filter requested is writable ...
4210         */
4211        f = &adap->tids.ftid_tab[stid];
4212        ret = writable_filter(f);
4213        if (ret)
4214                return ret;
4215
4216        /* Clear out any old resources being used by the filter before
4217         * we start constructing the new filter.
4218         */
4219        if (f->valid)
4220                clear_filter(adap, f);
4221
4222        /* Clear out filter specifications */
4223        memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4224        f->fs.val.lport = cpu_to_be16(sport);
4225        f->fs.mask.lport  = ~0;
4226        val = (u8 *)&sip;
4227        if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4228                for (i = 0; i < 4; i++) {
4229                        f->fs.val.lip[i] = val[i];
4230                        f->fs.mask.lip[i] = ~0;
4231                }
4232                if (adap->params.tp.vlan_pri_map & F_PORT) {
4233                        f->fs.val.iport = port;
4234                        f->fs.mask.iport = mask;
4235                }
4236        }
4237
4238        if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4239                f->fs.val.proto = IPPROTO_TCP;
4240                f->fs.mask.proto = ~0;
4241        }
4242
4243        f->fs.dirsteer = 1;
4244        f->fs.iq = queue;
4245        /* Mark filter as locked */
4246        f->locked = 1;
4247        f->fs.rpttid = 1;
4248
4249        ret = set_filter_wr(adap, stid);
4250        if (ret) {
4251                clear_filter(adap, f);
4252                return ret;
4253        }
4254
4255        return 0;
4256}
4257EXPORT_SYMBOL(cxgb4_create_server_filter);
4258
4259int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4260                unsigned int queue, bool ipv6)
4261{
4262        int ret;
4263        struct filter_entry *f;
4264        struct adapter *adap;
4265
4266        adap = netdev2adap(dev);
4267
4268        /* Adjust stid to correct filter index */
4269        stid -= adap->tids.sftid_base;
4270        stid += adap->tids.nftids;
4271
4272        f = &adap->tids.ftid_tab[stid];
4273        /* Unlock the filter */
4274        f->locked = 0;
4275
4276        ret = delete_filter(adap, stid);
4277        if (ret)
4278                return ret;
4279
4280        return 0;
4281}
4282EXPORT_SYMBOL(cxgb4_remove_server_filter);
4283
4284static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4285                                                struct rtnl_link_stats64 *ns)
4286{
4287        struct port_stats stats;
4288        struct port_info *p = netdev_priv(dev);
4289        struct adapter *adapter = p->adapter;
4290
4291        spin_lock(&adapter->stats_lock);
4292        t4_get_port_stats(adapter, p->tx_chan, &stats);
4293        spin_unlock(&adapter->stats_lock);
4294
4295        ns->tx_bytes   = stats.tx_octets;
4296        ns->tx_packets = stats.tx_frames;
4297        ns->rx_bytes   = stats.rx_octets;
4298        ns->rx_packets = stats.rx_frames;
4299        ns->multicast  = stats.rx_mcast_frames;
4300
4301        /* detailed rx_errors */
4302        ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4303                               stats.rx_runt;
4304        ns->rx_over_errors   = 0;
4305        ns->rx_crc_errors    = stats.rx_fcs_err;
4306        ns->rx_frame_errors  = stats.rx_symbol_err;
4307        ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
4308                               stats.rx_ovflow2 + stats.rx_ovflow3 +
4309                               stats.rx_trunc0 + stats.rx_trunc1 +
4310                               stats.rx_trunc2 + stats.rx_trunc3;
4311        ns->rx_missed_errors = 0;
4312
4313        /* detailed tx_errors */
4314        ns->tx_aborted_errors   = 0;
4315        ns->tx_carrier_errors   = 0;
4316        ns->tx_fifo_errors      = 0;
4317        ns->tx_heartbeat_errors = 0;
4318        ns->tx_window_errors    = 0;
4319
4320        ns->tx_errors = stats.tx_error_frames;
4321        ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4322                ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4323        return ns;
4324}
4325
4326static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4327{
4328        unsigned int mbox;
4329        int ret = 0, prtad, devad;
4330        struct port_info *pi = netdev_priv(dev);
4331        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4332
4333        switch (cmd) {
4334        case SIOCGMIIPHY:
4335                if (pi->mdio_addr < 0)
4336                        return -EOPNOTSUPP;
4337                data->phy_id = pi->mdio_addr;
4338                break;
4339        case SIOCGMIIREG:
4340        case SIOCSMIIREG:
4341                if (mdio_phy_id_is_c45(data->phy_id)) {
4342                        prtad = mdio_phy_id_prtad(data->phy_id);
4343                        devad = mdio_phy_id_devad(data->phy_id);
4344                } else if (data->phy_id < 32) {
4345                        prtad = data->phy_id;
4346                        devad = 0;
4347                        data->reg_num &= 0x1f;
4348                } else
4349                        return -EINVAL;
4350
4351                mbox = pi->adapter->fn;
4352                if (cmd == SIOCGMIIREG)
4353                        ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4354                                         data->reg_num, &data->val_out);
4355                else
4356                        ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4357                                         data->reg_num, data->val_in);
4358                break;
4359        default:
4360                return -EOPNOTSUPP;
4361        }
4362        return ret;
4363}
4364
4365static void cxgb_set_rxmode(struct net_device *dev)
4366{
4367        /* unfortunately we can't return errors to the stack */
4368        set_rxmode(dev, -1, false);
4369}
4370
4371static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4372{
4373        int ret;
4374        struct port_info *pi = netdev_priv(dev);
4375
4376        if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
4377                return -EINVAL;
4378        ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4379                            -1, -1, -1, true);
4380        if (!ret)
4381                dev->mtu = new_mtu;
4382        return ret;
4383}
4384
4385static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4386{
4387        int ret;
4388        struct sockaddr *addr = p;
4389        struct port_info *pi = netdev_priv(dev);
4390
4391        if (!is_valid_ether_addr(addr->sa_data))
4392                return -EADDRNOTAVAIL;
4393
4394        ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4395                            pi->xact_addr_filt, addr->sa_data, true, true);
4396        if (ret < 0)
4397                return ret;
4398
4399        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4400        pi->xact_addr_filt = ret;
4401        return 0;
4402}
4403
4404#ifdef CONFIG_NET_POLL_CONTROLLER
4405static void cxgb_netpoll(struct net_device *dev)
4406{
4407        struct port_info *pi = netdev_priv(dev);
4408        struct adapter *adap = pi->adapter;
4409
4410        if (adap->flags & USING_MSIX) {
4411                int i;
4412                struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4413
4414                for (i = pi->nqsets; i; i--, rx++)
4415                        t4_sge_intr_msix(0, &rx->rspq);
4416        } else
4417                t4_intr_handler(adap)(0, adap);
4418}
4419#endif
4420
4421static const struct net_device_ops cxgb4_netdev_ops = {
4422        .ndo_open             = cxgb_open,
4423        .ndo_stop             = cxgb_close,
4424        .ndo_start_xmit       = t4_eth_xmit,
4425        .ndo_get_stats64      = cxgb_get_stats,
4426        .ndo_set_rx_mode      = cxgb_set_rxmode,
4427        .ndo_set_mac_address  = cxgb_set_mac_addr,
4428        .ndo_set_features     = cxgb_set_features,
4429        .ndo_validate_addr    = eth_validate_addr,
4430        .ndo_do_ioctl         = cxgb_ioctl,
4431        .ndo_change_mtu       = cxgb_change_mtu,
4432#ifdef CONFIG_NET_POLL_CONTROLLER
4433        .ndo_poll_controller  = cxgb_netpoll,
4434#endif
4435};
4436
4437void t4_fatal_err(struct adapter *adap)
4438{
4439        t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4440        t4_intr_disable(adap);
4441        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4442}
4443
4444static void setup_memwin(struct adapter *adap)
4445{
4446        u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4447
4448        bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
4449        if (is_t4(adap->params.chip)) {
4450                mem_win0_base = bar0 + MEMWIN0_BASE;
4451                mem_win1_base = bar0 + MEMWIN1_BASE;
4452                mem_win2_base = bar0 + MEMWIN2_BASE;
4453        } else {
4454                /* For T5, only relative offset inside the PCIe BAR is passed */
4455                mem_win0_base = MEMWIN0_BASE;
4456                mem_win1_base = MEMWIN1_BASE_T5;
4457                mem_win2_base = MEMWIN2_BASE_T5;
4458        }
4459        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4460                     mem_win0_base | BIR(0) |
4461                     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4462        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4463                     mem_win1_base | BIR(0) |
4464                     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4465        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4466                     mem_win2_base | BIR(0) |
4467                     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4468}
4469
4470static void setup_memwin_rdma(struct adapter *adap)
4471{
4472        if (adap->vres.ocq.size) {
4473                unsigned int start, sz_kb;
4474
4475                start = pci_resource_start(adap->pdev, 2) +
4476                        OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4477                sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4478                t4_write_reg(adap,
4479                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4480                             start | BIR(1) | WINDOW(ilog2(sz_kb)));
4481                t4_write_reg(adap,
4482                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4483                             adap->vres.ocq.start);
4484                t4_read_reg(adap,
4485                            PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4486        }
4487}
4488
4489static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4490{
4491        u32 v;
4492        int ret;
4493
4494        /* get device capabilities */
4495        memset(c, 0, sizeof(*c));
4496        c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4497                               FW_CMD_REQUEST | FW_CMD_READ);
4498        c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4499        ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4500        if (ret < 0)
4501                return ret;
4502
4503        /* select capabilities we'll be using */
4504        if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4505                if (!vf_acls)
4506                        c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4507                else
4508                        c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4509        } else if (vf_acls) {
4510                dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4511                return ret;
4512        }
4513        c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4514                               FW_CMD_REQUEST | FW_CMD_WRITE);
4515        ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4516        if (ret < 0)
4517                return ret;
4518
4519        ret = t4_config_glbl_rss(adap, adap->fn,
4520                                 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4521                                 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4522                                 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4523        if (ret < 0)
4524                return ret;
4525
4526        ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4527                          0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4528        if (ret < 0)
4529                return ret;
4530
4531        t4_sge_init(adap);
4532
4533        /* tweak some settings */
4534        t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4535        t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4536        t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4537        v = t4_read_reg(adap, TP_PIO_DATA);
4538        t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4539
4540        /* first 4 Tx modulation queues point to consecutive Tx channels */
4541        adap->params.tp.tx_modq_map = 0xE4;
4542        t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4543                     V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4544
4545        /* associate each Tx modulation queue with consecutive Tx channels */
4546        v = 0x84218421;
4547        t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4548                          &v, 1, A_TP_TX_SCHED_HDR);
4549        t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4550                          &v, 1, A_TP_TX_SCHED_FIFO);
4551        t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4552                          &v, 1, A_TP_TX_SCHED_PCMD);
4553
4554#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4555        if (is_offload(adap)) {
4556                t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4557                             V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4558                             V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4559                             V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4560                             V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4561                t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4562                             V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4563                             V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4564                             V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4565                             V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4566        }
4567
4568        /* get basic stuff going */
4569        return t4_early_init(adap, adap->fn);
4570}
4571
4572/*
4573 * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
4574 */
4575#define MAX_ATIDS 8192U
4576
4577/*
4578 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4579 *
4580 * If the firmware we're dealing with has Configuration File support, then
4581 * we use that to perform all configuration
4582 */
4583
4584/*
4585 * Tweak configuration based on module parameters, etc.  Most of these have
4586 * defaults assigned to them by Firmware Configuration Files (if we're using
4587 * them) but need to be explicitly set if we're using hard-coded
4588 * initialization.  But even in the case of using Firmware Configuration
4589 * Files, we'd like to expose the ability to change these via module
4590 * parameters so these are essentially common tweaks/settings for
4591 * Configuration Files and hard-coded initialization ...
4592 */
4593static int adap_init0_tweaks(struct adapter *adapter)
4594{
4595        /*
4596         * Fix up various Host-Dependent Parameters like Page Size, Cache
4597         * Line Size, etc.  The firmware default is for a 4KB Page Size and
4598         * 64B Cache Line Size ...
4599         */
4600        t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4601
4602        /*
4603         * Process module parameters which affect early initialization.
4604         */
4605        if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4606                dev_err(&adapter->pdev->dev,
4607                        "Ignoring illegal rx_dma_offset=%d, using 2\n",
4608                        rx_dma_offset);
4609                rx_dma_offset = 2;
4610        }
4611        t4_set_reg_field(adapter, SGE_CONTROL,
4612                         PKTSHIFT_MASK,
4613                         PKTSHIFT(rx_dma_offset));
4614
4615        /*
4616         * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4617         * adds the pseudo header itself.
4618         */
4619        t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4620                               CSUM_HAS_PSEUDO_HDR, 0);
4621
4622        return 0;
4623}
4624
4625/*
4626 * Attempt to initialize the adapter via a Firmware Configuration File.
4627 */
4628static int adap_init0_config(struct adapter *adapter, int reset)
4629{
4630        struct fw_caps_config_cmd caps_cmd;
4631        const struct firmware *cf;
4632        unsigned long mtype = 0, maddr = 0;
4633        u32 finiver, finicsum, cfcsum;
4634        int ret;
4635        int config_issued = 0;
4636        char *fw_config_file, fw_config_file_path[256];
4637        char *config_name = NULL;
4638
4639        /*
4640         * Reset device if necessary.
4641         */
4642        if (reset) {
4643                ret = t4_fw_reset(adapter, adapter->mbox,
4644                                  PIORSTMODE | PIORST);
4645                if (ret < 0)
4646                        goto bye;
4647        }
4648
4649        /*
4650         * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4651         * then use that.  Otherwise, use the configuration file stored
4652         * in the adapter flash ...
4653         */
4654        switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4655        case CHELSIO_T4:
4656                fw_config_file = FW4_CFNAME;
4657                break;
4658        case CHELSIO_T5:
4659                fw_config_file = FW5_CFNAME;
4660                break;
4661        default:
4662                dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4663                       adapter->pdev->device);
4664                ret = -EINVAL;
4665                goto bye;
4666        }
4667
4668        ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4669        if (ret < 0) {
4670                config_name = "On FLASH";
4671                mtype = FW_MEMTYPE_CF_FLASH;
4672                maddr = t4_flash_cfg_addr(adapter);
4673        } else {
4674                u32 params[7], val[7];
4675
4676                sprintf(fw_config_file_path,
4677                        "/lib/firmware/%s", fw_config_file);
4678                config_name = fw_config_file_path;
4679
4680                if (cf->size >= FLASH_CFG_MAX_SIZE)
4681                        ret = -ENOMEM;
4682                else {
4683                        params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4684                             FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4685                        ret = t4_query_params(adapter, adapter->mbox,
4686                                              adapter->fn, 0, 1, params, val);
4687                        if (ret == 0) {
4688                                /*
4689                                 * For t4_memory_write() below addresses and
4690                                 * sizes have to be in terms of multiples of 4
4691                                 * bytes.  So, if the Configuration File isn't
4692                                 * a multiple of 4 bytes in length we'll have
4693                                 * to write that out separately since we can't
4694                                 * guarantee that the bytes following the
4695                                 * residual byte in the buffer returned by
4696                                 * request_firmware() are zeroed out ...
4697                                 */
4698                                size_t resid = cf->size & 0x3;
4699                                size_t size = cf->size & ~0x3;
4700                                __be32 *data = (__be32 *)cf->data;
4701
4702                                mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4703                                maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4704
4705                                ret = t4_memory_write(adapter, mtype, maddr,
4706                                                      size, data);
4707                                if (ret == 0 && resid != 0) {
4708                                        union {
4709                                                __be32 word;
4710                                                char buf[4];
4711                                        } last;
4712                                        int i;
4713
4714                                        last.word = data[size >> 2];
4715                                        for (i = resid; i < 4; i++)
4716                                                last.buf[i] = 0;
4717                                        ret = t4_memory_write(adapter, mtype,
4718                                                              maddr + size,
4719                                                              4, &last.word);
4720                                }
4721                        }
4722                }
4723
4724                release_firmware(cf);
4725                if (ret)
4726                        goto bye;
4727        }
4728
4729        /*
4730         * Issue a Capability Configuration command to the firmware to get it
4731         * to parse the Configuration File.  We don't use t4_fw_config_file()
4732         * because we want the ability to modify various features after we've
4733         * processed the configuration file ...
4734         */
4735        memset(&caps_cmd, 0, sizeof(caps_cmd));
4736        caps_cmd.op_to_write =
4737                htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4738                      FW_CMD_REQUEST |
4739                      FW_CMD_READ);
4740        caps_cmd.cfvalid_to_len16 =
4741                htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4742                      FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4743                      FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4744                      FW_LEN16(caps_cmd));
4745        ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4746                         &caps_cmd);
4747
4748        /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4749         * Configuration File in FLASH), our last gasp effort is to use the
4750         * Firmware Configuration File which is embedded in the firmware.  A
4751         * very few early versions of the firmware didn't have one embedded
4752         * but we can ignore those.
4753         */
4754        if (ret == -ENOENT) {
4755                memset(&caps_cmd, 0, sizeof(caps_cmd));
4756                caps_cmd.op_to_write =
4757                        htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4758                                        FW_CMD_REQUEST |
4759                                        FW_CMD_READ);
4760                caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4761                ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4762                                sizeof(caps_cmd), &caps_cmd);
4763                config_name = "Firmware Default";
4764        }
4765
4766        config_issued = 1;
4767        if (ret < 0)
4768                goto bye;
4769
4770        finiver = ntohl(caps_cmd.finiver);
4771        finicsum = ntohl(caps_cmd.finicsum);
4772        cfcsum = ntohl(caps_cmd.cfcsum);
4773        if (finicsum != cfcsum)
4774                dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4775                         "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4776                         finicsum, cfcsum);
4777
4778        /*
4779         * And now tell the firmware to use the configuration we just loaded.
4780         */
4781        caps_cmd.op_to_write =
4782                htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4783                      FW_CMD_REQUEST |
4784                      FW_CMD_WRITE);
4785        caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4786        ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4787                         NULL);
4788        if (ret < 0)
4789                goto bye;
4790
4791        /*
4792         * Tweak configuration based on system architecture, module
4793         * parameters, etc.
4794         */
4795        ret = adap_init0_tweaks(adapter);
4796        if (ret < 0)
4797                goto bye;
4798
4799        /*
4800         * And finally tell the firmware to initialize itself using the
4801         * parameters from the Configuration File.
4802         */
4803        ret = t4_fw_initialize(adapter, adapter->mbox);
4804        if (ret < 0)
4805                goto bye;
4806
4807        /*
4808         * Return successfully and note that we're operating with parameters
4809         * not supplied by the driver, rather than from hard-wired
4810         * initialization constants burried in the driver.
4811         */
4812        adapter->flags |= USING_SOFT_PARAMS;
4813        dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4814                 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4815                 config_name, finiver, cfcsum);
4816        return 0;
4817
4818        /*
4819         * Something bad happened.  Return the error ...  (If the "error"
4820         * is that there's no Configuration File on the adapter we don't
4821         * want to issue a warning since this is fairly common.)
4822         */
4823bye:
4824        if (config_issued && ret != -ENOENT)
4825                dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4826                         config_name, -ret);
4827        return ret;
4828}
4829
4830/*
4831 * Attempt to initialize the adapter via hard-coded, driver supplied
4832 * parameters ...
4833 */
4834static int adap_init0_no_config(struct adapter *adapter, int reset)
4835{
4836        struct sge *s = &adapter->sge;
4837        struct fw_caps_config_cmd caps_cmd;
4838        u32 v;
4839        int i, ret;
4840
4841        /*
4842         * Reset device if necessary
4843         */
4844        if (reset) {
4845                ret = t4_fw_reset(adapter, adapter->mbox,
4846                                  PIORSTMODE | PIORST);
4847                if (ret < 0)
4848                        goto bye;
4849        }
4850
4851        /*
4852         * Get device capabilities and select which we'll be using.
4853         */
4854        memset(&caps_cmd, 0, sizeof(caps_cmd));
4855        caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4856                                     FW_CMD_REQUEST | FW_CMD_READ);
4857        caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4858        ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4859                         &caps_cmd);
4860        if (ret < 0)
4861                goto bye;
4862
4863        if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4864                if (!vf_acls)
4865                        caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4866                else
4867                        caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4868        } else if (vf_acls) {
4869                dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4870                goto bye;
4871        }
4872        caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4873                              FW_CMD_REQUEST | FW_CMD_WRITE);
4874        ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4875                         NULL);
4876        if (ret < 0)
4877                goto bye;
4878
4879        /*
4880         * Tweak configuration based on system architecture, module
4881         * parameters, etc.
4882         */
4883        ret = adap_init0_tweaks(adapter);
4884        if (ret < 0)
4885                goto bye;
4886
4887        /*
4888         * Select RSS Global Mode we want to use.  We use "Basic Virtual"
4889         * mode which maps each Virtual Interface to its own section of
4890         * the RSS Table and we turn on all map and hash enables ...
4891         */
4892        adapter->flags |= RSS_TNLALLLOOKUP;
4893        ret = t4_config_glbl_rss(adapter, adapter->mbox,
4894                                 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4895                                 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4896                                 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4897                                 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4898                                        FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4899        if (ret < 0)
4900                goto bye;
4901
4902        /*
4903         * Set up our own fundamental resource provisioning ...
4904         */
4905        ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4906                          PFRES_NEQ, PFRES_NETHCTRL,
4907                          PFRES_NIQFLINT, PFRES_NIQ,
4908                          PFRES_TC, PFRES_NVI,
4909                          FW_PFVF_CMD_CMASK_MASK,
4910                          pfvfres_pmask(adapter, adapter->fn, 0),
4911                          PFRES_NEXACTF,
4912                          PFRES_R_CAPS, PFRES_WX_CAPS);
4913        if (ret < 0)
4914                goto bye;
4915
4916        /*
4917         * Perform low level SGE initialization.  We need to do this before we
4918         * send the firmware the INITIALIZE command because that will cause
4919         * any other PF Drivers which are waiting for the Master
4920         * Initialization to proceed forward.
4921         */
4922        for (i = 0; i < SGE_NTIMERS - 1; i++)
4923                s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4924        s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4925        s->counter_val[0] = 1;
4926        for (i = 1; i < SGE_NCOUNTERS; i++)
4927                s->counter_val[i] = min(intr_cnt[i - 1],
4928                                        THRESHOLD_0_GET(THRESHOLD_0_MASK));
4929        t4_sge_init(adapter);
4930
4931#ifdef CONFIG_PCI_IOV
4932        /*
4933         * Provision resource limits for Virtual Functions.  We currently
4934         * grant them all the same static resource limits except for the Port
4935         * Access Rights Mask which we're assigning based on the PF.  All of
4936         * the static provisioning stuff for both the PF and VF really needs
4937         * to be managed in a persistent manner for each device which the
4938         * firmware controls.
4939         */
4940        {
4941                int pf, vf;
4942
4943                for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4944                        if (num_vf[pf] <= 0)
4945                                continue;
4946
4947                        /* VF numbering starts at 1! */
4948                        for (vf = 1; vf <= num_vf[pf]; vf++) {
4949                                ret = t4_cfg_pfvf(adapter, adapter->mbox,
4950                                                  pf, vf,
4951                                                  VFRES_NEQ, VFRES_NETHCTRL,
4952                                                  VFRES_NIQFLINT, VFRES_NIQ,
4953                                                  VFRES_TC, VFRES_NVI,
4954                                                  FW_PFVF_CMD_CMASK_MASK,
4955                                                  pfvfres_pmask(
4956                                                  adapter, pf, vf),
4957                                                  VFRES_NEXACTF,
4958                                                  VFRES_R_CAPS, VFRES_WX_CAPS);
4959                                if (ret < 0)
4960                                        dev_warn(adapter->pdev_dev,
4961                                                 "failed to "\
4962                                                 "provision pf/vf=%d/%d; "
4963                                                 "err=%d\n", pf, vf, ret);
4964                        }
4965                }
4966        }
4967#endif
4968
4969        /*
4970         * Set up the default filter mode.  Later we'll want to implement this
4971         * via a firmware command, etc. ...  This needs to be done before the
4972         * firmare initialization command ...  If the selected set of fields
4973         * isn't equal to the default value, we'll need to make sure that the
4974         * field selections will fit in the 36-bit budget.
4975         */
4976        if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4977                int j, bits = 0;
4978
4979                for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4980                        switch (tp_vlan_pri_map & (1 << j)) {
4981                        case 0:
4982                                /* compressed filter field not enabled */
4983                                break;
4984                        case FCOE_MASK:
4985                                bits +=  1;
4986                                break;
4987                        case PORT_MASK:
4988                                bits +=  3;
4989                                break;
4990                        case VNIC_ID_MASK:
4991                                bits += 17;
4992                                break;
4993                        case VLAN_MASK:
4994                                bits += 17;
4995                                break;
4996                        case TOS_MASK:
4997                                bits +=  8;
4998                                break;
4999                        case PROTOCOL_MASK:
5000                                bits +=  8;
5001                                break;
5002                        case ETHERTYPE_MASK:
5003                                bits += 16;
5004                                break;
5005                        case MACMATCH_MASK:
5006                                bits +=  9;
5007                                break;
5008                        case MPSHITTYPE_MASK:
5009                                bits +=  3;
5010                                break;
5011                        case FRAGMENTATION_MASK:
5012                                bits +=  1;
5013                                break;
5014                        }
5015
5016                if (bits > 36) {
5017                        dev_err(adapter->pdev_dev,
5018                                "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5019                                " using %#x\n", tp_vlan_pri_map, bits,
5020                                TP_VLAN_PRI_MAP_DEFAULT);
5021                        tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5022                }
5023        }
5024        v = tp_vlan_pri_map;
5025        t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5026                          &v, 1, TP_VLAN_PRI_MAP);
5027
5028        /*
5029         * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5030         * to support any of the compressed filter fields above.  Newer
5031         * versions of the firmware do this automatically but it doesn't hurt
5032         * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
5033         * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5034         * since the firmware automatically turns this on and off when we have
5035         * a non-zero number of filters active (since it does have a
5036         * performance impact).
5037         */
5038        if (tp_vlan_pri_map)
5039                t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5040                                 FIVETUPLELOOKUP_MASK,
5041                                 FIVETUPLELOOKUP_MASK);
5042
5043        /*
5044         * Tweak some settings.
5045         */
5046        t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5047                     RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5048                     PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5049                     KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5050
5051        /*
5052         * Get basic stuff going by issuing the Firmware Initialize command.
5053         * Note that this _must_ be after all PFVF commands ...
5054         */
5055        ret = t4_fw_initialize(adapter, adapter->mbox);
5056        if (ret < 0)
5057                goto bye;
5058
5059        /*
5060         * Return successfully!
5061         */
5062        dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5063                 "driver parameters\n");
5064        return 0;
5065
5066        /*
5067         * Something bad happened.  Return the error ...
5068         */
5069bye:
5070        return ret;
5071}
5072
5073static struct fw_info fw_info_array[] = {
5074        {
5075                .chip = CHELSIO_T4,
5076                .fs_name = FW4_CFNAME,
5077                .fw_mod_name = FW4_FNAME,
5078                .fw_hdr = {
5079                        .chip = FW_HDR_CHIP_T4,
5080                        .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5081                        .intfver_nic = FW_INTFVER(T4, NIC),
5082                        .intfver_vnic = FW_INTFVER(T4, VNIC),
5083                        .intfver_ri = FW_INTFVER(T4, RI),
5084                        .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5085                        .intfver_fcoe = FW_INTFVER(T4, FCOE),
5086                },
5087        }, {
5088                .chip = CHELSIO_T5,
5089                .fs_name = FW5_CFNAME,
5090                .fw_mod_name = FW5_FNAME,
5091                .fw_hdr = {
5092                        .chip = FW_HDR_CHIP_T5,
5093                        .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5094                        .intfver_nic = FW_INTFVER(T5, NIC),
5095                        .intfver_vnic = FW_INTFVER(T5, VNIC),
5096                        .intfver_ri = FW_INTFVER(T5, RI),
5097                        .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5098                        .intfver_fcoe = FW_INTFVER(T5, FCOE),
5099                },
5100        }
5101};
5102
5103static struct fw_info *find_fw_info(int chip)
5104{
5105        int i;
5106
5107        for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5108                if (fw_info_array[i].chip == chip)
5109                        return &fw_info_array[i];
5110        }
5111        return NULL;
5112}
5113
5114/*
5115 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5116 */
5117static int adap_init0(struct adapter *adap)
5118{
5119        int ret;
5120        u32 v, port_vec;
5121        enum dev_state state;
5122        u32 params[7], val[7];
5123        struct fw_caps_config_cmd caps_cmd;
5124        int reset = 1;
5125
5126        /*
5127         * Contact FW, advertising Master capability (and potentially forcing
5128         * ourselves as the Master PF if our module parameter force_init is
5129         * set).
5130         */
5131        ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5132                          force_init ? MASTER_MUST : MASTER_MAY,
5133                          &state);
5134        if (ret < 0) {
5135                dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5136                        ret);
5137                return ret;
5138        }
5139        if (ret == adap->mbox)
5140                adap->flags |= MASTER_PF;
5141        if (force_init && state == DEV_STATE_INIT)
5142                state = DEV_STATE_UNINIT;
5143
5144        /*
5145         * If we're the Master PF Driver and the device is uninitialized,
5146         * then let's consider upgrading the firmware ...  (We always want
5147         * to check the firmware version number in order to A. get it for
5148         * later reporting and B. to warn if the currently loaded firmware
5149         * is excessively mismatched relative to the driver.)
5150         */
5151        t4_get_fw_version(adap, &adap->params.fw_vers);
5152        t4_get_tp_version(adap, &adap->params.tp_vers);
5153        if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5154                struct fw_info *fw_info;
5155                struct fw_hdr *card_fw;
5156                const struct firmware *fw;
5157                const u8 *fw_data = NULL;
5158                unsigned int fw_size = 0;
5159
5160                /* This is the firmware whose headers the driver was compiled
5161                 * against
5162                 */
5163                fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5164                if (fw_info == NULL) {
5165                        dev_err(adap->pdev_dev,
5166                                "unable to get firmware info for chip %d.\n",
5167                                CHELSIO_CHIP_VERSION(adap->params.chip));
5168                        return -EINVAL;
5169                }
5170
5171                /* allocate memory to read the header of the firmware on the
5172                 * card
5173                 */
5174                card_fw = t4_alloc_mem(sizeof(*card_fw));
5175
5176                /* Get FW from from /lib/firmware/ */
5177                ret = request_firmware(&fw, fw_info->fw_mod_name,
5178                                       adap->pdev_dev);
5179                if (ret < 0) {
5180                        dev_err(adap->pdev_dev,
5181                                "unable to load firmware image %s, error %d\n",
5182                                fw_info->fw_mod_name, ret);
5183                } else {
5184                        fw_data = fw->data;
5185                        fw_size = fw->size;
5186                }
5187
5188                /* upgrade FW logic */
5189                ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5190                                 state, &reset);
5191
5192                /* Cleaning up */
5193                if (fw != NULL)
5194                        release_firmware(fw);
5195                t4_free_mem(card_fw);
5196
5197                if (ret < 0)
5198                        goto bye;
5199        }
5200
5201        /*
5202         * Grab VPD parameters.  This should be done after we establish a
5203         * connection to the firmware since some of the VPD parameters
5204         * (notably the Core Clock frequency) are retrieved via requests to
5205         * the firmware.  On the other hand, we need these fairly early on
5206         * so we do this right after getting ahold of the firmware.
5207         */
5208        ret = get_vpd_params(adap, &adap->params.vpd);
5209        if (ret < 0)
5210                goto bye;
5211
5212        /*
5213         * Find out what ports are available to us.  Note that we need to do
5214         * this before calling adap_init0_no_config() since it needs nports
5215         * and portvec ...
5216         */
5217        v =
5218            FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5219            FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5220        ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5221        if (ret < 0)
5222                goto bye;
5223
5224        adap->params.nports = hweight32(port_vec);
5225        adap->params.portvec = port_vec;
5226
5227        /*
5228         * If the firmware is initialized already (and we're not forcing a
5229         * master initialization), note that we're living with existing
5230         * adapter parameters.  Otherwise, it's time to try initializing the
5231         * adapter ...
5232         */
5233        if (state == DEV_STATE_INIT) {
5234                dev_info(adap->pdev_dev, "Coming up as %s: "\
5235                         "Adapter already initialized\n",
5236                         adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5237                adap->flags |= USING_SOFT_PARAMS;
5238        } else {
5239                dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5240                         "Initializing adapter\n");
5241
5242                /*
5243                 * If the firmware doesn't support Configuration
5244                 * Files warn user and exit,
5245                 */
5246                if (ret < 0)
5247                        dev_warn(adap->pdev_dev, "Firmware doesn't support "
5248                                 "configuration file.\n");
5249                if (force_old_init)
5250                        ret = adap_init0_no_config(adap, reset);
5251                else {
5252                        /*
5253                         * Find out whether we're dealing with a version of
5254                         * the firmware which has configuration file support.
5255                         */
5256                        params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5257                                     FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5258                        ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5259                                              params, val);
5260
5261                        /*
5262                         * If the firmware doesn't support Configuration
5263                         * Files, use the old Driver-based, hard-wired
5264                         * initialization.  Otherwise, try using the
5265                         * Configuration File support and fall back to the
5266                         * Driver-based initialization if there's no
5267                         * Configuration File found.
5268                         */
5269                        if (ret < 0)
5270                                ret = adap_init0_no_config(adap, reset);
5271                        else {
5272                                /*
5273                                 * The firmware provides us with a memory
5274                                 * buffer where we can load a Configuration
5275                                 * File from the host if we want to override
5276                                 * the Configuration File in flash.
5277                                 */
5278
5279                                ret = adap_init0_config(adap, reset);
5280                                if (ret == -ENOENT) {
5281                                        dev_info(adap->pdev_dev,
5282                                            "No Configuration File present "
5283                                            "on adapter. Using hard-wired "
5284                                            "configuration parameters.\n");
5285                                        ret = adap_init0_no_config(adap, reset);
5286                                }
5287                        }
5288                }
5289                if (ret < 0) {
5290                        dev_err(adap->pdev_dev,
5291                                "could not initialize adapter, error %d\n",
5292                                -ret);
5293                        goto bye;
5294                }
5295        }
5296
5297        /*
5298         * If we're living with non-hard-coded parameters (either from a
5299         * Firmware Configuration File or values programmed by a different PF
5300         * Driver), give the SGE code a chance to pull in anything that it
5301         * needs ...  Note that this must be called after we retrieve our VPD
5302         * parameters in order to know how to convert core ticks to seconds.
5303         */
5304        if (adap->flags & USING_SOFT_PARAMS) {
5305                ret = t4_sge_init(adap);
5306                if (ret < 0)
5307                        goto bye;
5308        }
5309
5310        if (is_bypass_device(adap->pdev->device))
5311                adap->params.bypass = 1;
5312
5313        /*
5314         * Grab some of our basic fundamental operating parameters.
5315         */
5316#define FW_PARAM_DEV(param) \
5317        (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5318        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5319
5320#define FW_PARAM_PFVF(param) \
5321        FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5322        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
5323        FW_PARAMS_PARAM_Y(0) | \
5324        FW_PARAMS_PARAM_Z(0)
5325
5326        params[0] = FW_PARAM_PFVF(EQ_START);
5327        params[1] = FW_PARAM_PFVF(L2T_START);
5328        params[2] = FW_PARAM_PFVF(L2T_END);
5329        params[3] = FW_PARAM_PFVF(FILTER_START);
5330        params[4] = FW_PARAM_PFVF(FILTER_END);
5331        params[5] = FW_PARAM_PFVF(IQFLINT_START);
5332        ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5333        if (ret < 0)
5334                goto bye;
5335        adap->sge.egr_start = val[0];
5336        adap->l2t_start = val[1];
5337        adap->l2t_end = val[2];
5338        adap->tids.ftid_base = val[3];
5339        adap->tids.nftids = val[4] - val[3] + 1;
5340        adap->sge.ingr_start = val[5];
5341
5342        /* query params related to active filter region */
5343        params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5344        params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5345        ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5346        /* If Active filter size is set we enable establishing
5347         * offload connection through firmware work request
5348         */
5349        if ((val[0] != val[1]) && (ret >= 0)) {
5350                adap->flags |= FW_OFLD_CONN;
5351                adap->tids.aftid_base = val[0];
5352                adap->tids.aftid_end = val[1];
5353        }
5354
5355        /* If we're running on newer firmware, let it know that we're
5356         * prepared to deal with encapsulated CPL messages.  Older
5357         * firmware won't understand this and we'll just get
5358         * unencapsulated messages ...
5359         */
5360        params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5361        val[0] = 1;
5362        (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5363
5364        /*
5365         * Get device capabilities so we can determine what resources we need
5366         * to manage.
5367         */
5368        memset(&caps_cmd, 0, sizeof(caps_cmd));
5369        caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5370                                     FW_CMD_REQUEST | FW_CMD_READ);
5371        caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5372        ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5373                         &caps_cmd);
5374        if (ret < 0)
5375                goto bye;
5376
5377        if (caps_cmd.ofldcaps) {
5378                /* query offload-related parameters */
5379                params[0] = FW_PARAM_DEV(NTID);
5380                params[1] = FW_PARAM_PFVF(SERVER_START);
5381                params[2] = FW_PARAM_PFVF(SERVER_END);
5382                params[3] = FW_PARAM_PFVF(TDDP_START);
5383                params[4] = FW_PARAM_PFVF(TDDP_END);
5384                params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5385                ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5386                                      params, val);
5387                if (ret < 0)
5388                        goto bye;
5389                adap->tids.ntids = val[0];
5390                adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5391                adap->tids.stid_base = val[1];
5392                adap->tids.nstids = val[2] - val[1] + 1;
5393                /*
5394                 * Setup server filter region. Divide the availble filter
5395                 * region into two parts. Regular filters get 1/3rd and server
5396                 * filters get 2/3rd part. This is only enabled if workarond
5397                 * path is enabled.
5398                 * 1. For regular filters.
5399                 * 2. Server filter: This are special filters which are used
5400                 * to redirect SYN packets to offload queue.
5401                 */
5402                if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5403                        adap->tids.sftid_base = adap->tids.ftid_base +
5404                                        DIV_ROUND_UP(adap->tids.nftids, 3);
5405                        adap->tids.nsftids = adap->tids.nftids -
5406                                         DIV_ROUND_UP(adap->tids.nftids, 3);
5407                        adap->tids.nftids = adap->tids.sftid_base -
5408                                                adap->tids.ftid_base;
5409                }
5410                adap->vres.ddp.start = val[3];
5411                adap->vres.ddp.size = val[4] - val[3] + 1;
5412                adap->params.ofldq_wr_cred = val[5];
5413
5414                adap->params.offload = 1;
5415        }
5416        if (caps_cmd.rdmacaps) {
5417                params[0] = FW_PARAM_PFVF(STAG_START);
5418                params[1] = FW_PARAM_PFVF(STAG_END);
5419                params[2] = FW_PARAM_PFVF(RQ_START);
5420                params[3] = FW_PARAM_PFVF(RQ_END);
5421                params[4] = FW_PARAM_PFVF(PBL_START);
5422                params[5] = FW_PARAM_PFVF(PBL_END);
5423                ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5424                                      params, val);
5425                if (ret < 0)
5426                        goto bye;
5427                adap->vres.stag.start = val[0];
5428                adap->vres.stag.size = val[1] - val[0] + 1;
5429                adap->vres.rq.start = val[2];
5430                adap->vres.rq.size = val[3] - val[2] + 1;
5431                adap->vres.pbl.start = val[4];
5432                adap->vres.pbl.size = val[5] - val[4] + 1;
5433
5434                params[0] = FW_PARAM_PFVF(SQRQ_START);
5435                params[1] = FW_PARAM_PFVF(SQRQ_END);
5436                params[2] = FW_PARAM_PFVF(CQ_START);
5437                params[3] = FW_PARAM_PFVF(CQ_END);
5438                params[4] = FW_PARAM_PFVF(OCQ_START);
5439                params[5] = FW_PARAM_PFVF(OCQ_END);
5440                ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5441                if (ret < 0)
5442                        goto bye;
5443                adap->vres.qp.start = val[0];
5444                adap->vres.qp.size = val[1] - val[0] + 1;
5445                adap->vres.cq.start = val[2];
5446                adap->vres.cq.size = val[3] - val[2] + 1;
5447                adap->vres.ocq.start = val[4];
5448                adap->vres.ocq.size = val[5] - val[4] + 1;
5449        }
5450        if (caps_cmd.iscsicaps) {
5451                params[0] = FW_PARAM_PFVF(ISCSI_START);
5452                params[1] = FW_PARAM_PFVF(ISCSI_END);
5453                ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5454                                      params, val);
5455                if (ret < 0)
5456                        goto bye;
5457                adap->vres.iscsi.start = val[0];
5458                adap->vres.iscsi.size = val[1] - val[0] + 1;
5459        }
5460#undef FW_PARAM_PFVF
5461#undef FW_PARAM_DEV
5462
5463        /*
5464         * These are finalized by FW initialization, load their values now.
5465         */
5466        t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5467        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5468                     adap->params.b_wnd);
5469
5470        t4_init_tp_params(adap);
5471        adap->flags |= FW_OK;
5472        return 0;
5473
5474        /*
5475         * Something bad happened.  If a command timed out or failed with EIO
5476         * FW does not operate within its spec or something catastrophic
5477         * happened to HW/FW, stop issuing commands.
5478         */
5479bye:
5480        if (ret != -ETIMEDOUT && ret != -EIO)
5481                t4_fw_bye(adap, adap->mbox);
5482        return ret;
5483}
5484
5485/* EEH callbacks */
5486
5487static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5488                                         pci_channel_state_t state)
5489{
5490        int i;
5491        struct adapter *adap = pci_get_drvdata(pdev);
5492
5493        if (!adap)
5494                goto out;
5495
5496        rtnl_lock();
5497        adap->flags &= ~FW_OK;
5498        notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5499        for_each_port(adap, i) {
5500                struct net_device *dev = adap->port[i];
5501
5502                netif_device_detach(dev);
5503                netif_carrier_off(dev);
5504        }
5505        if (adap->flags & FULL_INIT_DONE)
5506                cxgb_down(adap);
5507        rtnl_unlock();
5508        pci_disable_device(pdev);
5509out:    return state == pci_channel_io_perm_failure ?
5510                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5511}
5512
5513static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5514{
5515        int i, ret;
5516        struct fw_caps_config_cmd c;
5517        struct adapter *adap = pci_get_drvdata(pdev);
5518
5519        if (!adap) {
5520                pci_restore_state(pdev);
5521                pci_save_state(pdev);
5522                return PCI_ERS_RESULT_RECOVERED;
5523        }
5524
5525        if (pci_enable_device(pdev)) {
5526                dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5527                return PCI_ERS_RESULT_DISCONNECT;
5528        }
5529
5530        pci_set_master(pdev);
5531        pci_restore_state(pdev);
5532        pci_save_state(pdev);
5533        pci_cleanup_aer_uncorrect_error_status(pdev);
5534
5535        if (t4_wait_dev_ready(adap) < 0)
5536                return PCI_ERS_RESULT_DISCONNECT;
5537        if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5538                return PCI_ERS_RESULT_DISCONNECT;
5539        adap->flags |= FW_OK;
5540        if (adap_init1(adap, &c))
5541                return PCI_ERS_RESULT_DISCONNECT;
5542
5543        for_each_port(adap, i) {
5544                struct port_info *p = adap2pinfo(adap, i);
5545
5546                ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5547                                  NULL, NULL);
5548                if (ret < 0)
5549                        return PCI_ERS_RESULT_DISCONNECT;
5550                p->viid = ret;
5551                p->xact_addr_filt = -1;
5552        }
5553
5554        t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5555                     adap->params.b_wnd);
5556        setup_memwin(adap);
5557        if (cxgb_up(adap))
5558                return PCI_ERS_RESULT_DISCONNECT;
5559        return PCI_ERS_RESULT_RECOVERED;
5560}
5561
5562static void eeh_resume(struct pci_dev *pdev)
5563{
5564        int i;
5565        struct adapter *adap = pci_get_drvdata(pdev);
5566
5567        if (!adap)
5568                return;
5569
5570        rtnl_lock();
5571        for_each_port(adap, i) {
5572                struct net_device *dev = adap->port[i];
5573
5574                if (netif_running(dev)) {
5575                        link_start(dev);
5576                        cxgb_set_rxmode(dev);
5577                }
5578                netif_device_attach(dev);
5579        }
5580        rtnl_unlock();
5581}
5582
5583static const struct pci_error_handlers cxgb4_eeh = {
5584        .error_detected = eeh_err_detected,
5585        .slot_reset     = eeh_slot_reset,
5586        .resume         = eeh_resume,
5587};
5588
5589static inline bool is_10g_port(const struct link_config *lc)
5590{
5591        return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5592}
5593
5594static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5595                             unsigned int size, unsigned int iqe_size)
5596{
5597        q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5598                         (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5599        q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5600        q->iqe_len = iqe_size;
5601        q->size = size;
5602}
5603
5604/*
5605 * Perform default configuration of DMA queues depending on the number and type
5606 * of ports we found and the number of available CPUs.  Most settings can be
5607 * modified by the admin prior to actual use.
5608 */
5609static void cfg_queues(struct adapter *adap)
5610{
5611        struct sge *s = &adap->sge;
5612        int i, q10g = 0, n10g = 0, qidx = 0;
5613
5614        for_each_port(adap, i)
5615                n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5616
5617        /*
5618         * We default to 1 queue per non-10G port and up to # of cores queues
5619         * per 10G port.
5620         */
5621        if (n10g)
5622                q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5623        if (q10g > netif_get_num_default_rss_queues())
5624                q10g = netif_get_num_default_rss_queues();
5625
5626        for_each_port(adap, i) {
5627                struct port_info *pi = adap2pinfo(adap, i);
5628
5629                pi->first_qset = qidx;
5630                pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5631                qidx += pi->nqsets;
5632        }
5633
5634        s->ethqsets = qidx;
5635        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5636
5637        if (is_offload(adap)) {
5638                /*
5639                 * For offload we use 1 queue/channel if all ports are up to 1G,
5640                 * otherwise we divide all available queues amongst the channels
5641                 * capped by the number of available cores.
5642                 */
5643                if (n10g) {
5644                        i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5645                                  num_online_cpus());
5646                        s->ofldqsets = roundup(i, adap->params.nports);
5647                } else
5648                        s->ofldqsets = adap->params.nports;
5649                /* For RDMA one Rx queue per channel suffices */
5650                s->rdmaqs = adap->params.nports;
5651        }
5652
5653        for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5654                struct sge_eth_rxq *r = &s->ethrxq[i];
5655
5656                init_rspq(&r->rspq, 0, 0, 1024, 64);
5657                r->fl.size = 72;
5658        }
5659
5660        for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5661                s->ethtxq[i].q.size = 1024;
5662
5663        for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5664                s->ctrlq[i].q.size = 512;
5665
5666        for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5667                s->ofldtxq[i].q.size = 1024;
5668
5669        for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5670                struct sge_ofld_rxq *r = &s->ofldrxq[i];
5671
5672                init_rspq(&r->rspq, 0, 0, 1024, 64);
5673                r->rspq.uld = CXGB4_ULD_ISCSI;
5674                r->fl.size = 72;
5675        }
5676
5677        for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5678                struct sge_ofld_rxq *r = &s->rdmarxq[i];
5679
5680                init_rspq(&r->rspq, 0, 0, 511, 64);
5681                r->rspq.uld = CXGB4_ULD_RDMA;
5682                r->fl.size = 72;
5683        }
5684
5685        init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5686        init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5687}
5688
5689/*
5690 * Reduce the number of Ethernet queues across all ports to at most n.
5691 * n provides at least one queue per port.
5692 */
5693static void reduce_ethqs(struct adapter *adap, int n)
5694{
5695        int i;
5696        struct port_info *pi;
5697
5698        while (n < adap->sge.ethqsets)
5699                for_each_port(adap, i) {
5700                        pi = adap2pinfo(adap, i);
5701                        if (pi->nqsets > 1) {
5702                                pi->nqsets--;
5703                                adap->sge.ethqsets--;
5704                                if (adap->sge.ethqsets <= n)
5705                                        break;
5706                        }
5707                }
5708
5709        n = 0;
5710        for_each_port(adap, i) {
5711                pi = adap2pinfo(adap, i);
5712                pi->first_qset = n;
5713                n += pi->nqsets;
5714        }
5715}
5716
5717/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5718#define EXTRA_VECS 2
5719
5720static int enable_msix(struct adapter *adap)
5721{
5722        int ofld_need = 0;
5723        int i, err, want, need;
5724        struct sge *s = &adap->sge;
5725        unsigned int nchan = adap->params.nports;
5726        struct msix_entry entries[MAX_INGQ + 1];
5727
5728        for (i = 0; i < ARRAY_SIZE(entries); ++i)
5729                entries[i].entry = i;
5730
5731        want = s->max_ethqsets + EXTRA_VECS;
5732        if (is_offload(adap)) {
5733                want += s->rdmaqs + s->ofldqsets;
5734                /* need nchan for each possible ULD */
5735                ofld_need = 2 * nchan;
5736        }
5737        need = adap->params.nports + EXTRA_VECS + ofld_need;
5738
5739        while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5740                want = err;
5741
5742        if (!err) {
5743                /*
5744                 * Distribute available vectors to the various queue groups.
5745                 * Every group gets its minimum requirement and NIC gets top
5746                 * priority for leftovers.
5747                 */
5748                i = want - EXTRA_VECS - ofld_need;
5749                if (i < s->max_ethqsets) {
5750                        s->max_ethqsets = i;
5751                        if (i < s->ethqsets)
5752                                reduce_ethqs(adap, i);
5753                }
5754                if (is_offload(adap)) {
5755                        i = want - EXTRA_VECS - s->max_ethqsets;
5756                        i -= ofld_need - nchan;
5757                        s->ofldqsets = (i / nchan) * nchan;  /* round down */
5758                }
5759                for (i = 0; i < want; ++i)
5760                        adap->msix_info[i].vec = entries[i].vector;
5761        } else if (err > 0)
5762                dev_info(adap->pdev_dev,
5763                         "only %d MSI-X vectors left, not using MSI-X\n", err);
5764        return err;
5765}
5766
5767#undef EXTRA_VECS
5768
5769static int init_rss(struct adapter *adap)
5770{
5771        unsigned int i, j;
5772
5773        for_each_port(adap, i) {
5774                struct port_info *pi = adap2pinfo(adap, i);
5775
5776                pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5777                if (!pi->rss)
5778                        return -ENOMEM;
5779                for (j = 0; j < pi->rss_size; j++)
5780                        pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5781        }
5782        return 0;
5783}
5784
5785static void print_port_info(const struct net_device *dev)
5786{
5787        static const char *base[] = {
5788                "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5789                "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5790        };
5791
5792        char buf[80];
5793        char *bufp = buf;
5794        const char *spd = "";
5795        const struct port_info *pi = netdev_priv(dev);
5796        const struct adapter *adap = pi->adapter;
5797
5798        if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5799                spd = " 2.5 GT/s";
5800        else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5801                spd = " 5 GT/s";
5802
5803        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5804                bufp += sprintf(bufp, "100/");
5805        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5806                bufp += sprintf(bufp, "1000/");
5807        if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5808                bufp += sprintf(bufp, "10G/");
5809        if (bufp != buf)
5810                --bufp;
5811        sprintf(bufp, "BASE-%s", base[pi->port_type]);
5812
5813        netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5814                    adap->params.vpd.id,
5815                    CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5816                    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5817                    (adap->flags & USING_MSIX) ? " MSI-X" :
5818                    (adap->flags & USING_MSI) ? " MSI" : "");
5819        netdev_info(dev, "S/N: %s, E/C: %s\n",
5820                    adap->params.vpd.sn, adap->params.vpd.ec);
5821}
5822
5823static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5824{
5825        pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5826}
5827
5828/*
5829 * Free the following resources:
5830 * - memory used for tables
5831 * - MSI/MSI-X
5832 * - net devices
5833 * - resources FW is holding for us
5834 */
5835static void free_some_resources(struct adapter *adapter)
5836{
5837        unsigned int i;
5838
5839        t4_free_mem(adapter->l2t);
5840        t4_free_mem(adapter->tids.tid_tab);
5841        disable_msi(adapter);
5842
5843        for_each_port(adapter, i)
5844                if (adapter->port[i]) {
5845                        kfree(adap2pinfo(adapter, i)->rss);
5846                        free_netdev(adapter->port[i]);
5847                }
5848        if (adapter->flags & FW_OK)
5849                t4_fw_bye(adapter, adapter->fn);
5850}
5851
5852#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5853#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5854                   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5855#define SEGMENT_SIZE 128
5856
5857static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5858{
5859        int func, i, err, s_qpp, qpp, num_seg;
5860        struct port_info *pi;
5861        bool highdma = false;
5862        struct adapter *adapter = NULL;
5863
5864        printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5865
5866        err = pci_request_regions(pdev, KBUILD_MODNAME);
5867        if (err) {
5868                /* Just info, some other driver may have claimed the device. */
5869                dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5870                return err;
5871        }
5872
5873        /* We control everything through one PF */
5874        func = PCI_FUNC(pdev->devfn);
5875        if (func != ent->driver_data) {
5876                pci_save_state(pdev);        /* to restore SR-IOV later */
5877                goto sriov;
5878        }
5879
5880        err = pci_enable_device(pdev);
5881        if (err) {
5882                dev_err(&pdev->dev, "cannot enable PCI device\n");
5883                goto out_release_regions;
5884        }
5885
5886        if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5887                highdma = true;
5888                err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5889                if (err) {
5890                        dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5891                                "coherent allocations\n");
5892                        goto out_disable_device;
5893                }
5894        } else {
5895                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5896                if (err) {
5897                        dev_err(&pdev->dev, "no usable DMA configuration\n");
5898                        goto out_disable_device;
5899                }
5900        }
5901
5902        pci_enable_pcie_error_reporting(pdev);
5903        enable_pcie_relaxed_ordering(pdev);
5904        pci_set_master(pdev);
5905        pci_save_state(pdev);
5906
5907        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5908        if (!adapter) {
5909                err = -ENOMEM;
5910                goto out_disable_device;
5911        }
5912
5913        adapter->regs = pci_ioremap_bar(pdev, 0);
5914        if (!adapter->regs) {
5915                dev_err(&pdev->dev, "cannot map device registers\n");
5916                err = -ENOMEM;
5917                goto out_free_adapter;
5918        }
5919
5920        adapter->pdev = pdev;
5921        adapter->pdev_dev = &pdev->dev;
5922        adapter->mbox = func;
5923        adapter->fn = func;
5924        adapter->msg_enable = dflt_msg_enable;
5925        memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5926
5927        spin_lock_init(&adapter->stats_lock);
5928        spin_lock_init(&adapter->tid_release_lock);
5929
5930        INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5931        INIT_WORK(&adapter->db_full_task, process_db_full);
5932        INIT_WORK(&adapter->db_drop_task, process_db_drop);
5933
5934        err = t4_prep_adapter(adapter);
5935        if (err)
5936                goto out_unmap_bar0;
5937
5938        if (!is_t4(adapter->params.chip)) {
5939                s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5940                qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5941                      SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5942                num_seg = PAGE_SIZE / SEGMENT_SIZE;
5943
5944                /* Each segment size is 128B. Write coalescing is enabled only
5945                 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5946                 * queue is less no of segments that can be accommodated in
5947                 * a page size.
5948                 */
5949                if (qpp > num_seg) {
5950                        dev_err(&pdev->dev,
5951                                "Incorrect number of egress queues per page\n");
5952                        err = -EINVAL;
5953                        goto out_unmap_bar0;
5954                }
5955                adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5956                pci_resource_len(pdev, 2));
5957                if (!adapter->bar2) {
5958                        dev_err(&pdev->dev, "cannot map device bar2 region\n");
5959                        err = -ENOMEM;
5960                        goto out_unmap_bar0;
5961                }
5962        }
5963
5964        setup_memwin(adapter);
5965        err = adap_init0(adapter);
5966        setup_memwin_rdma(adapter);
5967        if (err)
5968                goto out_unmap_bar;
5969
5970        for_each_port(adapter, i) {
5971                struct net_device *netdev;
5972
5973                netdev = alloc_etherdev_mq(sizeof(struct port_info),
5974                                           MAX_ETH_QSETS);
5975                if (!netdev) {
5976                        err = -ENOMEM;
5977                        goto out_free_dev;
5978                }
5979
5980                SET_NETDEV_DEV(netdev, &pdev->dev);
5981
5982                adapter->port[i] = netdev;
5983                pi = netdev_priv(netdev);
5984                pi->adapter = adapter;
5985                pi->xact_addr_filt = -1;
5986                pi->port_id = i;
5987                netdev->irq = pdev->irq;
5988
5989                netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5990                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5991                        NETIF_F_RXCSUM | NETIF_F_RXHASH |
5992                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5993                if (highdma)
5994                        netdev->hw_features |= NETIF_F_HIGHDMA;
5995                netdev->features |= netdev->hw_features;
5996                netdev->vlan_features = netdev->features & VLAN_FEAT;
5997
5998                netdev->priv_flags |= IFF_UNICAST_FLT;
5999
6000                netdev->netdev_ops = &cxgb4_netdev_ops;
6001                SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
6002        }
6003
6004        pci_set_drvdata(pdev, adapter);
6005
6006        if (adapter->flags & FW_OK) {
6007                err = t4_port_init(adapter, func, func, 0);
6008                if (err)
6009                        goto out_free_dev;
6010        }
6011
6012        /*
6013         * Configure queues and allocate tables now, they can be needed as
6014         * soon as the first register_netdev completes.
6015         */
6016        cfg_queues(adapter);
6017
6018        adapter->l2t = t4_init_l2t();
6019        if (!adapter->l2t) {
6020                /* We tolerate a lack of L2T, giving up some functionality */
6021                dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6022                adapter->params.offload = 0;
6023        }
6024
6025        if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6026                dev_warn(&pdev->dev, "could not allocate TID table, "
6027                         "continuing\n");
6028                adapter->params.offload = 0;
6029        }
6030
6031        /* See what interrupts we'll be using */
6032        if (msi > 1 && enable_msix(adapter) == 0)
6033                adapter->flags |= USING_MSIX;
6034        else if (msi > 0 && pci_enable_msi(pdev) == 0)
6035                adapter->flags |= USING_MSI;
6036
6037        err = init_rss(adapter);
6038        if (err)
6039                goto out_free_dev;
6040
6041        /*
6042         * The card is now ready to go.  If any errors occur during device
6043         * registration we do not fail the whole card but rather proceed only
6044         * with the ports we manage to register successfully.  However we must
6045         * register at least one net device.
6046         */
6047        for_each_port(adapter, i) {
6048                pi = adap2pinfo(adapter, i);
6049                netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6050                netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6051
6052                err = register_netdev(adapter->port[i]);
6053                if (err)
6054                        break;
6055                adapter->chan_map[pi->tx_chan] = i;
6056                print_port_info(adapter->port[i]);
6057        }
6058        if (i == 0) {
6059                dev_err(&pdev->dev, "could not register any net devices\n");
6060                goto out_free_dev;
6061        }
6062        if (err) {
6063                dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6064                err = 0;
6065        }
6066
6067        if (cxgb4_debugfs_root) {
6068                adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6069                                                           cxgb4_debugfs_root);
6070                setup_debugfs(adapter);
6071        }
6072
6073        /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6074        pdev->needs_freset = 1;
6075
6076        if (is_offload(adapter))
6077                attach_ulds(adapter);
6078
6079sriov:
6080#ifdef CONFIG_PCI_IOV
6081        if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6082                if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6083                        dev_info(&pdev->dev,
6084                                 "instantiated %u virtual functions\n",
6085                                 num_vf[func]);
6086#endif
6087        return 0;
6088
6089 out_free_dev:
6090        free_some_resources(adapter);
6091 out_unmap_bar:
6092        if (!is_t4(adapter->params.chip))
6093                iounmap(adapter->bar2);
6094 out_unmap_bar0:
6095        iounmap(adapter->regs);
6096 out_free_adapter:
6097        kfree(adapter);
6098 out_disable_device:
6099        pci_disable_pcie_error_reporting(pdev);
6100        pci_disable_device(pdev);
6101 out_release_regions:
6102        pci_release_regions(pdev);
6103        return err;
6104}
6105
6106static void remove_one(struct pci_dev *pdev)
6107{
6108        struct adapter *adapter = pci_get_drvdata(pdev);
6109
6110#ifdef CONFIG_PCI_IOV
6111        pci_disable_sriov(pdev);
6112
6113#endif
6114
6115        if (adapter) {
6116                int i;
6117
6118                if (is_offload(adapter))
6119                        detach_ulds(adapter);
6120
6121                for_each_port(adapter, i)
6122                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6123                                unregister_netdev(adapter->port[i]);
6124
6125                if (adapter->debugfs_root)
6126                        debugfs_remove_recursive(adapter->debugfs_root);
6127
6128                /* If we allocated filters, free up state associated with any
6129                 * valid filters ...
6130                 */
6131                if (adapter->tids.ftid_tab) {
6132                        struct filter_entry *f = &adapter->tids.ftid_tab[0];
6133                        for (i = 0; i < (adapter->tids.nftids +
6134                                        adapter->tids.nsftids); i++, f++)
6135                                if (f->valid)
6136                                        clear_filter(adapter, f);
6137                }
6138
6139                if (adapter->flags & FULL_INIT_DONE)
6140                        cxgb_down(adapter);
6141
6142                free_some_resources(adapter);
6143                iounmap(adapter->regs);
6144                if (!is_t4(adapter->params.chip))
6145                        iounmap(adapter->bar2);
6146                kfree(adapter);
6147                pci_disable_pcie_error_reporting(pdev);
6148                pci_disable_device(pdev);
6149                pci_release_regions(pdev);
6150        } else
6151                pci_release_regions(pdev);
6152}
6153
6154static struct pci_driver cxgb4_driver = {
6155        .name     = KBUILD_MODNAME,
6156        .id_table = cxgb4_pci_tbl,
6157        .probe    = init_one,
6158        .remove   = remove_one,
6159        .err_handler = &cxgb4_eeh,
6160};
6161
6162static int __init cxgb4_init_module(void)
6163{
6164        int ret;
6165
6166        workq = create_singlethread_workqueue("cxgb4");
6167        if (!workq)
6168                return -ENOMEM;
6169
6170        /* Debugfs support is optional, just warn if this fails */
6171        cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6172        if (!cxgb4_debugfs_root)
6173                pr_warn("could not create debugfs entry, continuing\n");
6174
6175        ret = pci_register_driver(&cxgb4_driver);
6176        if (ret < 0) {
6177                debugfs_remove(cxgb4_debugfs_root);
6178                destroy_workqueue(workq);
6179        }
6180
6181        register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6182
6183        return ret;
6184}
6185
6186static void __exit cxgb4_cleanup_module(void)
6187{
6188        unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6189        pci_unregister_driver(&cxgb4_driver);
6190        debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
6191        flush_workqueue(workq);
6192        destroy_workqueue(workq);
6193}
6194
6195module_init(cxgb4_init_module);
6196module_exit(cxgb4_cleanup_module);
6197