linux/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
<<
>>
Prefs
   1/*****************************************************************************
   2 *                                                                           *
   3 * File: cxgb2.c                                                             *
   4 * $Revision: 1.25 $                                                         *
   5 * $Date: 2005/06/22 00:43:25 $                                              *
   6 * Description:                                                              *
   7 *  Chelsio 10Gb Ethernet Driver.                                            *
   8 *                                                                           *
   9 * This program is free software; you can redistribute it and/or modify      *
  10 * it under the terms of the GNU General Public License, version 2, as       *
  11 * published by the Free Software Foundation.                                *
  12 *                                                                           *
  13 * You should have received a copy of the GNU General Public License along   *
  14 * with this program; if not, see <http://www.gnu.org/licenses/>.            *
  15 *                                                                           *
  16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
  17 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
  18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
  19 *                                                                           *
  20 * http://www.chelsio.com                                                    *
  21 *                                                                           *
  22 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
  23 * All rights reserved.                                                      *
  24 *                                                                           *
  25 * Maintainers: maintainers@chelsio.com                                      *
  26 *                                                                           *
  27 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
  28 *          Tina Yang               <tainay@chelsio.com>                     *
  29 *          Felix Marti             <felix@chelsio.com>                      *
  30 *          Scott Bardone           <sbardone@chelsio.com>                   *
  31 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
  32 *          Frank DiMambro          <frank@chelsio.com>                      *
  33 *                                                                           *
  34 * History:                                                                  *
  35 *                                                                           *
  36 ****************************************************************************/
  37
  38#include "common.h"
  39#include <linux/module.h>
  40#include <linux/pci.h>
  41#include <linux/netdevice.h>
  42#include <linux/etherdevice.h>
  43#include <linux/if_vlan.h>
  44#include <linux/mii.h>
  45#include <linux/sockios.h>
  46#include <linux/dma-mapping.h>
  47#include <linux/uaccess.h>
  48
  49#include "cpl5_cmd.h"
  50#include "regs.h"
  51#include "gmac.h"
  52#include "cphy.h"
  53#include "sge.h"
  54#include "tp.h"
  55#include "espi.h"
  56#include "elmer0.h"
  57
  58#include <linux/workqueue.h>
  59
  60static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
  61{
  62        schedule_delayed_work(&ap->stats_update_task, secs * HZ);
  63}
  64
  65static inline void cancel_mac_stats_update(struct adapter *ap)
  66{
  67        cancel_delayed_work(&ap->stats_update_task);
  68}
  69
  70#define MAX_CMDQ_ENTRIES        16384
  71#define MAX_CMDQ1_ENTRIES       1024
  72#define MAX_RX_BUFFERS          16384
  73#define MAX_RX_JUMBO_BUFFERS    16384
  74#define MAX_TX_BUFFERS_HIGH     16384U
  75#define MAX_TX_BUFFERS_LOW      1536U
  76#define MAX_TX_BUFFERS          1460U
  77#define MIN_FL_ENTRIES          32
  78
  79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  80                         NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  81                         NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  82
  83/*
  84 * The EEPROM is actually bigger but only the first few bytes are used so we
  85 * only report those.
  86 */
  87#define EEPROM_SIZE 32
  88
  89MODULE_DESCRIPTION(DRV_DESCRIPTION);
  90MODULE_AUTHOR("Chelsio Communications");
  91MODULE_LICENSE("GPL");
  92
  93static int dflt_msg_enable = DFLT_MSG_ENABLE;
  94
  95module_param(dflt_msg_enable, int, 0);
  96MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
  97
  98#define HCLOCK 0x0
  99#define LCLOCK 0x1
 100
 101/* T1 cards powersave mode */
 102static int t1_clock(struct adapter *adapter, int mode);
 103static int t1powersave = 1;     /* HW default is powersave mode. */
 104
 105module_param(t1powersave, int, 0);
 106MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
 107
 108static int disable_msi = 0;
 109module_param(disable_msi, int, 0);
 110MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
 111
 112/*
 113 * Setup MAC to receive the types of packets we want.
 114 */
 115static void t1_set_rxmode(struct net_device *dev)
 116{
 117        struct adapter *adapter = dev->ml_priv;
 118        struct cmac *mac = adapter->port[dev->if_port].mac;
 119        struct t1_rx_mode rm;
 120
 121        rm.dev = dev;
 122        mac->ops->set_rx_mode(mac, &rm);
 123}
 124
 125static void link_report(struct port_info *p)
 126{
 127        if (!netif_carrier_ok(p->dev))
 128                netdev_info(p->dev, "link down\n");
 129        else {
 130                const char *s = "10Mbps";
 131
 132                switch (p->link_config.speed) {
 133                        case SPEED_10000: s = "10Gbps"; break;
 134                        case SPEED_1000:  s = "1000Mbps"; break;
 135                        case SPEED_100:   s = "100Mbps"; break;
 136                }
 137
 138                netdev_info(p->dev, "link up, %s, %s-duplex\n",
 139                            s, p->link_config.duplex == DUPLEX_FULL
 140                            ? "full" : "half");
 141        }
 142}
 143
 144void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
 145                        int speed, int duplex, int pause)
 146{
 147        struct port_info *p = &adapter->port[port_id];
 148
 149        if (link_stat != netif_carrier_ok(p->dev)) {
 150                if (link_stat)
 151                        netif_carrier_on(p->dev);
 152                else
 153                        netif_carrier_off(p->dev);
 154                link_report(p);
 155
 156                /* multi-ports: inform toe */
 157                if ((speed > 0) && (adapter->params.nports > 1)) {
 158                        unsigned int sched_speed = 10;
 159                        switch (speed) {
 160                        case SPEED_1000:
 161                                sched_speed = 1000;
 162                                break;
 163                        case SPEED_100:
 164                                sched_speed = 100;
 165                                break;
 166                        case SPEED_10:
 167                                sched_speed = 10;
 168                                break;
 169                        }
 170                        t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
 171                }
 172        }
 173}
 174
 175static void link_start(struct port_info *p)
 176{
 177        struct cmac *mac = p->mac;
 178
 179        mac->ops->reset(mac);
 180        if (mac->ops->macaddress_set)
 181                mac->ops->macaddress_set(mac, p->dev->dev_addr);
 182        t1_set_rxmode(p->dev);
 183        t1_link_start(p->phy, mac, &p->link_config);
 184        mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 185}
 186
 187static void enable_hw_csum(struct adapter *adapter)
 188{
 189        if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
 190                t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
 191        t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
 192}
 193
 194/*
 195 * Things to do upon first use of a card.
 196 * This must run with the rtnl lock held.
 197 */
 198static int cxgb_up(struct adapter *adapter)
 199{
 200        int err = 0;
 201
 202        if (!(adapter->flags & FULL_INIT_DONE)) {
 203                err = t1_init_hw_modules(adapter);
 204                if (err)
 205                        goto out_err;
 206
 207                enable_hw_csum(adapter);
 208                adapter->flags |= FULL_INIT_DONE;
 209        }
 210
 211        t1_interrupts_clear(adapter);
 212
 213        adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
 214        err = request_threaded_irq(adapter->pdev->irq, t1_interrupt,
 215                                   t1_interrupt_thread,
 216                                   adapter->params.has_msi ? 0 : IRQF_SHARED,
 217                                   adapter->name, adapter);
 218        if (err) {
 219                if (adapter->params.has_msi)
 220                        pci_disable_msi(adapter->pdev);
 221
 222                goto out_err;
 223        }
 224
 225        t1_sge_start(adapter->sge);
 226        t1_interrupts_enable(adapter);
 227out_err:
 228        return err;
 229}
 230
 231/*
 232 * Release resources when all the ports have been stopped.
 233 */
 234static void cxgb_down(struct adapter *adapter)
 235{
 236        t1_sge_stop(adapter->sge);
 237        t1_interrupts_disable(adapter);
 238        free_irq(adapter->pdev->irq, adapter);
 239        if (adapter->params.has_msi)
 240                pci_disable_msi(adapter->pdev);
 241}
 242
 243static int cxgb_open(struct net_device *dev)
 244{
 245        int err;
 246        struct adapter *adapter = dev->ml_priv;
 247        int other_ports = adapter->open_device_map & PORT_MASK;
 248
 249        napi_enable(&adapter->napi);
 250        if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
 251                napi_disable(&adapter->napi);
 252                return err;
 253        }
 254
 255        __set_bit(dev->if_port, &adapter->open_device_map);
 256        link_start(&adapter->port[dev->if_port]);
 257        netif_start_queue(dev);
 258        if (!other_ports && adapter->params.stats_update_period)
 259                schedule_mac_stats_update(adapter,
 260                                          adapter->params.stats_update_period);
 261
 262        t1_vlan_mode(adapter, dev->features);
 263        return 0;
 264}
 265
 266static int cxgb_close(struct net_device *dev)
 267{
 268        struct adapter *adapter = dev->ml_priv;
 269        struct port_info *p = &adapter->port[dev->if_port];
 270        struct cmac *mac = p->mac;
 271
 272        netif_stop_queue(dev);
 273        napi_disable(&adapter->napi);
 274        mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 275        netif_carrier_off(dev);
 276
 277        clear_bit(dev->if_port, &adapter->open_device_map);
 278        if (adapter->params.stats_update_period &&
 279            !(adapter->open_device_map & PORT_MASK)) {
 280                /* Stop statistics accumulation. */
 281                smp_mb__after_atomic();
 282                spin_lock(&adapter->work_lock);   /* sync with update task */
 283                spin_unlock(&adapter->work_lock);
 284                cancel_mac_stats_update(adapter);
 285        }
 286
 287        if (!adapter->open_device_map)
 288                cxgb_down(adapter);
 289        return 0;
 290}
 291
 292static struct net_device_stats *t1_get_stats(struct net_device *dev)
 293{
 294        struct adapter *adapter = dev->ml_priv;
 295        struct port_info *p = &adapter->port[dev->if_port];
 296        struct net_device_stats *ns = &dev->stats;
 297        const struct cmac_statistics *pstats;
 298
 299        /* Do a full update of the MAC stats */
 300        pstats = p->mac->ops->statistics_update(p->mac,
 301                                                MAC_STATS_UPDATE_FULL);
 302
 303        ns->tx_packets = pstats->TxUnicastFramesOK +
 304                pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
 305
 306        ns->rx_packets = pstats->RxUnicastFramesOK +
 307                pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
 308
 309        ns->tx_bytes = pstats->TxOctetsOK;
 310        ns->rx_bytes = pstats->RxOctetsOK;
 311
 312        ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
 313                pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
 314        ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
 315                pstats->RxFCSErrors + pstats->RxAlignErrors +
 316                pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
 317                pstats->RxSymbolErrors + pstats->RxRuntErrors;
 318
 319        ns->multicast  = pstats->RxMulticastFramesOK;
 320        ns->collisions = pstats->TxTotalCollisions;
 321
 322        /* detailed rx_errors */
 323        ns->rx_length_errors = pstats->RxFrameTooLongErrors +
 324                pstats->RxJabberErrors;
 325        ns->rx_over_errors   = 0;
 326        ns->rx_crc_errors    = pstats->RxFCSErrors;
 327        ns->rx_frame_errors  = pstats->RxAlignErrors;
 328        ns->rx_fifo_errors   = 0;
 329        ns->rx_missed_errors = 0;
 330
 331        /* detailed tx_errors */
 332        ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
 333        ns->tx_carrier_errors   = 0;
 334        ns->tx_fifo_errors      = pstats->TxUnderrun;
 335        ns->tx_heartbeat_errors = 0;
 336        ns->tx_window_errors    = pstats->TxLateCollisions;
 337        return ns;
 338}
 339
 340static u32 get_msglevel(struct net_device *dev)
 341{
 342        struct adapter *adapter = dev->ml_priv;
 343
 344        return adapter->msg_enable;
 345}
 346
 347static void set_msglevel(struct net_device *dev, u32 val)
 348{
 349        struct adapter *adapter = dev->ml_priv;
 350
 351        adapter->msg_enable = val;
 352}
 353
 354static const char stats_strings[][ETH_GSTRING_LEN] = {
 355        "TxOctetsOK",
 356        "TxOctetsBad",
 357        "TxUnicastFramesOK",
 358        "TxMulticastFramesOK",
 359        "TxBroadcastFramesOK",
 360        "TxPauseFrames",
 361        "TxFramesWithDeferredXmissions",
 362        "TxLateCollisions",
 363        "TxTotalCollisions",
 364        "TxFramesAbortedDueToXSCollisions",
 365        "TxUnderrun",
 366        "TxLengthErrors",
 367        "TxInternalMACXmitError",
 368        "TxFramesWithExcessiveDeferral",
 369        "TxFCSErrors",
 370        "TxJumboFramesOk",
 371        "TxJumboOctetsOk",
 372        
 373        "RxOctetsOK",
 374        "RxOctetsBad",
 375        "RxUnicastFramesOK",
 376        "RxMulticastFramesOK",
 377        "RxBroadcastFramesOK",
 378        "RxPauseFrames",
 379        "RxFCSErrors",
 380        "RxAlignErrors",
 381        "RxSymbolErrors",
 382        "RxDataErrors",
 383        "RxSequenceErrors",
 384        "RxRuntErrors",
 385        "RxJabberErrors",
 386        "RxInternalMACRcvError",
 387        "RxInRangeLengthErrors",
 388        "RxOutOfRangeLengthField",
 389        "RxFrameTooLongErrors",
 390        "RxJumboFramesOk",
 391        "RxJumboOctetsOk",
 392
 393        /* Port stats */
 394        "RxCsumGood",
 395        "TxCsumOffload",
 396        "TxTso",
 397        "RxVlan",
 398        "TxVlan",
 399        "TxNeedHeadroom", 
 400        
 401        /* Interrupt stats */
 402        "rx drops",
 403        "pure_rsps",
 404        "unhandled irqs",
 405        "respQ_empty",
 406        "respQ_overflow",
 407        "freelistQ_empty",
 408        "pkt_too_big",
 409        "pkt_mismatch",
 410        "cmdQ_full0",
 411        "cmdQ_full1",
 412
 413        "espi_DIP2ParityErr",
 414        "espi_DIP4Err",
 415        "espi_RxDrops",
 416        "espi_TxDrops",
 417        "espi_RxOvfl",
 418        "espi_ParityErr"
 419};
 420
 421#define T2_REGMAP_SIZE (3 * 1024)
 422
 423static int get_regs_len(struct net_device *dev)
 424{
 425        return T2_REGMAP_SIZE;
 426}
 427
 428static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 429{
 430        struct adapter *adapter = dev->ml_priv;
 431
 432        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 433        strlcpy(info->bus_info, pci_name(adapter->pdev),
 434                sizeof(info->bus_info));
 435}
 436
 437static int get_sset_count(struct net_device *dev, int sset)
 438{
 439        switch (sset) {
 440        case ETH_SS_STATS:
 441                return ARRAY_SIZE(stats_strings);
 442        default:
 443                return -EOPNOTSUPP;
 444        }
 445}
 446
 447static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
 448{
 449        if (stringset == ETH_SS_STATS)
 450                memcpy(data, stats_strings, sizeof(stats_strings));
 451}
 452
 453static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 454                      u64 *data)
 455{
 456        struct adapter *adapter = dev->ml_priv;
 457        struct cmac *mac = adapter->port[dev->if_port].mac;
 458        const struct cmac_statistics *s;
 459        const struct sge_intr_counts *t;
 460        struct sge_port_stats ss;
 461
 462        s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
 463        t = t1_sge_get_intr_counts(adapter->sge);
 464        t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
 465
 466        *data++ = s->TxOctetsOK;
 467        *data++ = s->TxOctetsBad;
 468        *data++ = s->TxUnicastFramesOK;
 469        *data++ = s->TxMulticastFramesOK;
 470        *data++ = s->TxBroadcastFramesOK;
 471        *data++ = s->TxPauseFrames;
 472        *data++ = s->TxFramesWithDeferredXmissions;
 473        *data++ = s->TxLateCollisions;
 474        *data++ = s->TxTotalCollisions;
 475        *data++ = s->TxFramesAbortedDueToXSCollisions;
 476        *data++ = s->TxUnderrun;
 477        *data++ = s->TxLengthErrors;
 478        *data++ = s->TxInternalMACXmitError;
 479        *data++ = s->TxFramesWithExcessiveDeferral;
 480        *data++ = s->TxFCSErrors;
 481        *data++ = s->TxJumboFramesOK;
 482        *data++ = s->TxJumboOctetsOK;
 483
 484        *data++ = s->RxOctetsOK;
 485        *data++ = s->RxOctetsBad;
 486        *data++ = s->RxUnicastFramesOK;
 487        *data++ = s->RxMulticastFramesOK;
 488        *data++ = s->RxBroadcastFramesOK;
 489        *data++ = s->RxPauseFrames;
 490        *data++ = s->RxFCSErrors;
 491        *data++ = s->RxAlignErrors;
 492        *data++ = s->RxSymbolErrors;
 493        *data++ = s->RxDataErrors;
 494        *data++ = s->RxSequenceErrors;
 495        *data++ = s->RxRuntErrors;
 496        *data++ = s->RxJabberErrors;
 497        *data++ = s->RxInternalMACRcvError;
 498        *data++ = s->RxInRangeLengthErrors;
 499        *data++ = s->RxOutOfRangeLengthField;
 500        *data++ = s->RxFrameTooLongErrors;
 501        *data++ = s->RxJumboFramesOK;
 502        *data++ = s->RxJumboOctetsOK;
 503
 504        *data++ = ss.rx_cso_good;
 505        *data++ = ss.tx_cso;
 506        *data++ = ss.tx_tso;
 507        *data++ = ss.vlan_xtract;
 508        *data++ = ss.vlan_insert;
 509        *data++ = ss.tx_need_hdrroom;
 510        
 511        *data++ = t->rx_drops;
 512        *data++ = t->pure_rsps;
 513        *data++ = t->unhandled_irqs;
 514        *data++ = t->respQ_empty;
 515        *data++ = t->respQ_overflow;
 516        *data++ = t->freelistQ_empty;
 517        *data++ = t->pkt_too_big;
 518        *data++ = t->pkt_mismatch;
 519        *data++ = t->cmdQ_full[0];
 520        *data++ = t->cmdQ_full[1];
 521
 522        if (adapter->espi) {
 523                const struct espi_intr_counts *e;
 524
 525                e = t1_espi_get_intr_counts(adapter->espi);
 526                *data++ = e->DIP2_parity_err;
 527                *data++ = e->DIP4_err;
 528                *data++ = e->rx_drops;
 529                *data++ = e->tx_drops;
 530                *data++ = e->rx_ovflw;
 531                *data++ = e->parity_err;
 532        }
 533}
 534
 535static inline void reg_block_dump(struct adapter *ap, void *buf,
 536                                  unsigned int start, unsigned int end)
 537{
 538        u32 *p = buf + start;
 539
 540        for ( ; start <= end; start += sizeof(u32))
 541                *p++ = readl(ap->regs + start);
 542}
 543
 544static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 545                     void *buf)
 546{
 547        struct adapter *ap = dev->ml_priv;
 548
 549        /*
 550         * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
 551         */
 552        regs->version = 2;
 553
 554        memset(buf, 0, T2_REGMAP_SIZE);
 555        reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
 556        reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
 557        reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
 558        reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
 559        reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
 560        reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
 561        reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
 562        reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
 563        reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
 564        reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
 565}
 566
 567static int get_link_ksettings(struct net_device *dev,
 568                              struct ethtool_link_ksettings *cmd)
 569{
 570        struct adapter *adapter = dev->ml_priv;
 571        struct port_info *p = &adapter->port[dev->if_port];
 572        u32 supported, advertising;
 573
 574        supported = p->link_config.supported;
 575        advertising = p->link_config.advertising;
 576
 577        if (netif_carrier_ok(dev)) {
 578                cmd->base.speed = p->link_config.speed;
 579                cmd->base.duplex = p->link_config.duplex;
 580        } else {
 581                cmd->base.speed = SPEED_UNKNOWN;
 582                cmd->base.duplex = DUPLEX_UNKNOWN;
 583        }
 584
 585        cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
 586        cmd->base.phy_address = p->phy->mdio.prtad;
 587        cmd->base.autoneg = p->link_config.autoneg;
 588
 589        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
 590                                                supported);
 591        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
 592                                                advertising);
 593
 594        return 0;
 595}
 596
 597static int speed_duplex_to_caps(int speed, int duplex)
 598{
 599        int cap = 0;
 600
 601        switch (speed) {
 602        case SPEED_10:
 603                if (duplex == DUPLEX_FULL)
 604                        cap = SUPPORTED_10baseT_Full;
 605                else
 606                        cap = SUPPORTED_10baseT_Half;
 607                break;
 608        case SPEED_100:
 609                if (duplex == DUPLEX_FULL)
 610                        cap = SUPPORTED_100baseT_Full;
 611                else
 612                        cap = SUPPORTED_100baseT_Half;
 613                break;
 614        case SPEED_1000:
 615                if (duplex == DUPLEX_FULL)
 616                        cap = SUPPORTED_1000baseT_Full;
 617                else
 618                        cap = SUPPORTED_1000baseT_Half;
 619                break;
 620        case SPEED_10000:
 621                if (duplex == DUPLEX_FULL)
 622                        cap = SUPPORTED_10000baseT_Full;
 623        }
 624        return cap;
 625}
 626
 627#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
 628                      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
 629                      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
 630                      ADVERTISED_10000baseT_Full)
 631
 632static int set_link_ksettings(struct net_device *dev,
 633                              const struct ethtool_link_ksettings *cmd)
 634{
 635        struct adapter *adapter = dev->ml_priv;
 636        struct port_info *p = &adapter->port[dev->if_port];
 637        struct link_config *lc = &p->link_config;
 638        u32 advertising;
 639
 640        ethtool_convert_link_mode_to_legacy_u32(&advertising,
 641                                                cmd->link_modes.advertising);
 642
 643        if (!(lc->supported & SUPPORTED_Autoneg))
 644                return -EOPNOTSUPP;             /* can't change speed/duplex */
 645
 646        if (cmd->base.autoneg == AUTONEG_DISABLE) {
 647                u32 speed = cmd->base.speed;
 648                int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
 649
 650                if (!(lc->supported & cap) || (speed == SPEED_1000))
 651                        return -EINVAL;
 652                lc->requested_speed = speed;
 653                lc->requested_duplex = cmd->base.duplex;
 654                lc->advertising = 0;
 655        } else {
 656                advertising &= ADVERTISED_MASK;
 657                if (advertising & (advertising - 1))
 658                        advertising = lc->supported;
 659                advertising &= lc->supported;
 660                if (!advertising)
 661                        return -EINVAL;
 662                lc->requested_speed = SPEED_INVALID;
 663                lc->requested_duplex = DUPLEX_INVALID;
 664                lc->advertising = advertising | ADVERTISED_Autoneg;
 665        }
 666        lc->autoneg = cmd->base.autoneg;
 667        if (netif_running(dev))
 668                t1_link_start(p->phy, p->mac, lc);
 669        return 0;
 670}
 671
 672static void get_pauseparam(struct net_device *dev,
 673                           struct ethtool_pauseparam *epause)
 674{
 675        struct adapter *adapter = dev->ml_priv;
 676        struct port_info *p = &adapter->port[dev->if_port];
 677
 678        epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
 679        epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
 680        epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
 681}
 682
 683static int set_pauseparam(struct net_device *dev,
 684                          struct ethtool_pauseparam *epause)
 685{
 686        struct adapter *adapter = dev->ml_priv;
 687        struct port_info *p = &adapter->port[dev->if_port];
 688        struct link_config *lc = &p->link_config;
 689
 690        if (epause->autoneg == AUTONEG_DISABLE)
 691                lc->requested_fc = 0;
 692        else if (lc->supported & SUPPORTED_Autoneg)
 693                lc->requested_fc = PAUSE_AUTONEG;
 694        else
 695                return -EINVAL;
 696
 697        if (epause->rx_pause)
 698                lc->requested_fc |= PAUSE_RX;
 699        if (epause->tx_pause)
 700                lc->requested_fc |= PAUSE_TX;
 701        if (lc->autoneg == AUTONEG_ENABLE) {
 702                if (netif_running(dev))
 703                        t1_link_start(p->phy, p->mac, lc);
 704        } else {
 705                lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 706                if (netif_running(dev))
 707                        p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
 708                                                         lc->fc);
 709        }
 710        return 0;
 711}
 712
 713static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 714{
 715        struct adapter *adapter = dev->ml_priv;
 716        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 717
 718        e->rx_max_pending = MAX_RX_BUFFERS;
 719        e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
 720        e->tx_max_pending = MAX_CMDQ_ENTRIES;
 721
 722        e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
 723        e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
 724        e->tx_pending = adapter->params.sge.cmdQ_size[0];
 725}
 726
 727static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 728{
 729        struct adapter *adapter = dev->ml_priv;
 730        int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 731
 732        if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
 733            e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
 734            e->tx_pending > MAX_CMDQ_ENTRIES ||
 735            e->rx_pending < MIN_FL_ENTRIES ||
 736            e->rx_jumbo_pending < MIN_FL_ENTRIES ||
 737            e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
 738                return -EINVAL;
 739
 740        if (adapter->flags & FULL_INIT_DONE)
 741                return -EBUSY;
 742
 743        adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
 744        adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
 745        adapter->params.sge.cmdQ_size[0] = e->tx_pending;
 746        adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
 747                MAX_CMDQ1_ENTRIES : e->tx_pending;
 748        return 0;
 749}
 750
 751static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
 752                        struct kernel_ethtool_coalesce *kernel_coal,
 753                        struct netlink_ext_ack *extack)
 754{
 755        struct adapter *adapter = dev->ml_priv;
 756
 757        adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
 758        adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
 759        adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
 760        t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
 761        return 0;
 762}
 763
 764static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
 765                        struct kernel_ethtool_coalesce *kernel_coal,
 766                        struct netlink_ext_ack *extack)
 767{
 768        struct adapter *adapter = dev->ml_priv;
 769
 770        c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
 771        c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
 772        c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
 773        return 0;
 774}
 775
 776static int get_eeprom_len(struct net_device *dev)
 777{
 778        struct adapter *adapter = dev->ml_priv;
 779
 780        return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
 781}
 782
 783#define EEPROM_MAGIC(ap) \
 784        (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
 785
 786static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 787                      u8 *data)
 788{
 789        int i;
 790        u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
 791        struct adapter *adapter = dev->ml_priv;
 792
 793        e->magic = EEPROM_MAGIC(adapter);
 794        for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
 795                t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
 796        memcpy(data, buf + e->offset, e->len);
 797        return 0;
 798}
 799
 800static const struct ethtool_ops t1_ethtool_ops = {
 801        .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
 802                                     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
 803                                     ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
 804        .get_drvinfo       = get_drvinfo,
 805        .get_msglevel      = get_msglevel,
 806        .set_msglevel      = set_msglevel,
 807        .get_ringparam     = get_sge_param,
 808        .set_ringparam     = set_sge_param,
 809        .get_coalesce      = get_coalesce,
 810        .set_coalesce      = set_coalesce,
 811        .get_eeprom_len    = get_eeprom_len,
 812        .get_eeprom        = get_eeprom,
 813        .get_pauseparam    = get_pauseparam,
 814        .set_pauseparam    = set_pauseparam,
 815        .get_link          = ethtool_op_get_link,
 816        .get_strings       = get_strings,
 817        .get_sset_count    = get_sset_count,
 818        .get_ethtool_stats = get_stats,
 819        .get_regs_len      = get_regs_len,
 820        .get_regs          = get_regs,
 821        .get_link_ksettings = get_link_ksettings,
 822        .set_link_ksettings = set_link_ksettings,
 823};
 824
 825static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 826{
 827        struct adapter *adapter = dev->ml_priv;
 828        struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
 829
 830        return mdio_mii_ioctl(mdio, if_mii(req), cmd);
 831}
 832
 833static int t1_change_mtu(struct net_device *dev, int new_mtu)
 834{
 835        int ret;
 836        struct adapter *adapter = dev->ml_priv;
 837        struct cmac *mac = adapter->port[dev->if_port].mac;
 838
 839        if (!mac->ops->set_mtu)
 840                return -EOPNOTSUPP;
 841        if ((ret = mac->ops->set_mtu(mac, new_mtu)))
 842                return ret;
 843        dev->mtu = new_mtu;
 844        return 0;
 845}
 846
 847static int t1_set_mac_addr(struct net_device *dev, void *p)
 848{
 849        struct adapter *adapter = dev->ml_priv;
 850        struct cmac *mac = adapter->port[dev->if_port].mac;
 851        struct sockaddr *addr = p;
 852
 853        if (!mac->ops->macaddress_set)
 854                return -EOPNOTSUPP;
 855
 856        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 857        mac->ops->macaddress_set(mac, dev->dev_addr);
 858        return 0;
 859}
 860
 861static netdev_features_t t1_fix_features(struct net_device *dev,
 862        netdev_features_t features)
 863{
 864        /*
 865         * Since there is no support for separate rx/tx vlan accel
 866         * enable/disable make sure tx flag is always in same state as rx.
 867         */
 868        if (features & NETIF_F_HW_VLAN_CTAG_RX)
 869                features |= NETIF_F_HW_VLAN_CTAG_TX;
 870        else
 871                features &= ~NETIF_F_HW_VLAN_CTAG_TX;
 872
 873        return features;
 874}
 875
 876static int t1_set_features(struct net_device *dev, netdev_features_t features)
 877{
 878        netdev_features_t changed = dev->features ^ features;
 879        struct adapter *adapter = dev->ml_priv;
 880
 881        if (changed & NETIF_F_HW_VLAN_CTAG_RX)
 882                t1_vlan_mode(adapter, features);
 883
 884        return 0;
 885}
 886#ifdef CONFIG_NET_POLL_CONTROLLER
 887static void t1_netpoll(struct net_device *dev)
 888{
 889        unsigned long flags;
 890        struct adapter *adapter = dev->ml_priv;
 891
 892        local_irq_save(flags);
 893        t1_interrupt(adapter->pdev->irq, adapter);
 894        local_irq_restore(flags);
 895}
 896#endif
 897
 898/*
 899 * Periodic accumulation of MAC statistics.  This is used only if the MAC
 900 * does not have any other way to prevent stats counter overflow.
 901 */
 902static void mac_stats_task(struct work_struct *work)
 903{
 904        int i;
 905        struct adapter *adapter =
 906                container_of(work, struct adapter, stats_update_task.work);
 907
 908        for_each_port(adapter, i) {
 909                struct port_info *p = &adapter->port[i];
 910
 911                if (netif_running(p->dev))
 912                        p->mac->ops->statistics_update(p->mac,
 913                                                       MAC_STATS_UPDATE_FAST);
 914        }
 915
 916        /* Schedule the next statistics update if any port is active. */
 917        spin_lock(&adapter->work_lock);
 918        if (adapter->open_device_map & PORT_MASK)
 919                schedule_mac_stats_update(adapter,
 920                                          adapter->params.stats_update_period);
 921        spin_unlock(&adapter->work_lock);
 922}
 923
 924static const struct net_device_ops cxgb_netdev_ops = {
 925        .ndo_open               = cxgb_open,
 926        .ndo_stop               = cxgb_close,
 927        .ndo_start_xmit         = t1_start_xmit,
 928        .ndo_get_stats          = t1_get_stats,
 929        .ndo_validate_addr      = eth_validate_addr,
 930        .ndo_set_rx_mode        = t1_set_rxmode,
 931        .ndo_eth_ioctl          = t1_ioctl,
 932        .ndo_change_mtu         = t1_change_mtu,
 933        .ndo_set_mac_address    = t1_set_mac_addr,
 934        .ndo_fix_features       = t1_fix_features,
 935        .ndo_set_features       = t1_set_features,
 936#ifdef CONFIG_NET_POLL_CONTROLLER
 937        .ndo_poll_controller    = t1_netpoll,
 938#endif
 939};
 940
 941static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 942{
 943        int i, err, pci_using_dac = 0;
 944        unsigned long mmio_start, mmio_len;
 945        const struct board_info *bi;
 946        struct adapter *adapter = NULL;
 947        struct port_info *pi;
 948
 949        err = pci_enable_device(pdev);
 950        if (err)
 951                return err;
 952
 953        if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
 954                pr_err("%s: cannot find PCI device memory base address\n",
 955                       pci_name(pdev));
 956                err = -ENODEV;
 957                goto out_disable_pdev;
 958        }
 959
 960        if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
 961                pci_using_dac = 1;
 962
 963                if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
 964                        pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
 965                               pci_name(pdev));
 966                        err = -ENODEV;
 967                        goto out_disable_pdev;
 968                }
 969
 970        } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
 971                pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
 972                goto out_disable_pdev;
 973        }
 974
 975        err = pci_request_regions(pdev, DRV_NAME);
 976        if (err) {
 977                pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
 978                goto out_disable_pdev;
 979        }
 980
 981        pci_set_master(pdev);
 982
 983        mmio_start = pci_resource_start(pdev, 0);
 984        mmio_len = pci_resource_len(pdev, 0);
 985        bi = t1_get_board_info(ent->driver_data);
 986
 987        for (i = 0; i < bi->port_number; ++i) {
 988                struct net_device *netdev;
 989
 990                netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
 991                if (!netdev) {
 992                        err = -ENOMEM;
 993                        goto out_free_dev;
 994                }
 995
 996                SET_NETDEV_DEV(netdev, &pdev->dev);
 997
 998                if (!adapter) {
 999                        adapter = netdev_priv(netdev);
1000                        adapter->pdev = pdev;
1001                        adapter->port[0].dev = netdev;  /* so we don't leak it */
1002
1003                        adapter->regs = ioremap(mmio_start, mmio_len);
1004                        if (!adapter->regs) {
1005                                pr_err("%s: cannot map device registers\n",
1006                                       pci_name(pdev));
1007                                err = -ENOMEM;
1008                                goto out_free_dev;
1009                        }
1010
1011                        if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1012                                err = -ENODEV;    /* Can't handle this chip rev */
1013                                goto out_free_dev;
1014                        }
1015
1016                        adapter->name = pci_name(pdev);
1017                        adapter->msg_enable = dflt_msg_enable;
1018                        adapter->mmio_len = mmio_len;
1019
1020                        spin_lock_init(&adapter->tpi_lock);
1021                        spin_lock_init(&adapter->work_lock);
1022                        spin_lock_init(&adapter->async_lock);
1023                        spin_lock_init(&adapter->mac_lock);
1024
1025                        INIT_DELAYED_WORK(&adapter->stats_update_task,
1026                                          mac_stats_task);
1027
1028                        pci_set_drvdata(pdev, netdev);
1029                }
1030
1031                pi = &adapter->port[i];
1032                pi->dev = netdev;
1033                netif_carrier_off(netdev);
1034                netdev->irq = pdev->irq;
1035                netdev->if_port = i;
1036                netdev->mem_start = mmio_start;
1037                netdev->mem_end = mmio_start + mmio_len - 1;
1038                netdev->ml_priv = adapter;
1039                netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1040                        NETIF_F_RXCSUM;
1041                netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1042                        NETIF_F_RXCSUM | NETIF_F_LLTX;
1043
1044                if (pci_using_dac)
1045                        netdev->features |= NETIF_F_HIGHDMA;
1046                if (vlan_tso_capable(adapter)) {
1047                        netdev->features |=
1048                                NETIF_F_HW_VLAN_CTAG_TX |
1049                                NETIF_F_HW_VLAN_CTAG_RX;
1050                        netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1051
1052                        /* T204: disable TSO */
1053                        if (!(is_T2(adapter)) || bi->port_number != 4) {
1054                                netdev->hw_features |= NETIF_F_TSO;
1055                                netdev->features |= NETIF_F_TSO;
1056                        }
1057                }
1058
1059                netdev->netdev_ops = &cxgb_netdev_ops;
1060                netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1061                        sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1062
1063                netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1064
1065                netdev->ethtool_ops = &t1_ethtool_ops;
1066
1067                switch (bi->board) {
1068                case CHBT_BOARD_CHT110:
1069                case CHBT_BOARD_N110:
1070                case CHBT_BOARD_N210:
1071                case CHBT_BOARD_CHT210:
1072                        netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1073                                          (ETH_HLEN + ETH_FCS_LEN);
1074                        break;
1075                case CHBT_BOARD_CHN204:
1076                        netdev->max_mtu = VSC7326_MAX_MTU;
1077                        break;
1078                default:
1079                        netdev->max_mtu = ETH_DATA_LEN;
1080                        break;
1081                }
1082        }
1083
1084        if (t1_init_sw_modules(adapter, bi) < 0) {
1085                err = -ENODEV;
1086                goto out_free_dev;
1087        }
1088
1089        /*
1090         * The card is now ready to go.  If any errors occur during device
1091         * registration we do not fail the whole card but rather proceed only
1092         * with the ports we manage to register successfully.  However we must
1093         * register at least one net device.
1094         */
1095        for (i = 0; i < bi->port_number; ++i) {
1096                err = register_netdev(adapter->port[i].dev);
1097                if (err)
1098                        pr_warn("%s: cannot register net device %s, skipping\n",
1099                                pci_name(pdev), adapter->port[i].dev->name);
1100                else {
1101                        /*
1102                         * Change the name we use for messages to the name of
1103                         * the first successfully registered interface.
1104                         */
1105                        if (!adapter->registered_device_map)
1106                                adapter->name = adapter->port[i].dev->name;
1107
1108                        __set_bit(i, &adapter->registered_device_map);
1109                }
1110        }
1111        if (!adapter->registered_device_map) {
1112                pr_err("%s: could not register any net devices\n",
1113                       pci_name(pdev));
1114                err = -EINVAL;
1115                goto out_release_adapter_res;
1116        }
1117
1118        pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1119                adapter->name, bi->desc, adapter->params.chip_revision,
1120                adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1121                adapter->params.pci.speed, adapter->params.pci.width);
1122
1123        /*
1124         * Set the T1B ASIC and memory clocks.
1125         */
1126        if (t1powersave)
1127                adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1128        else
1129                adapter->t1powersave = HCLOCK;
1130        if (t1_is_T1B(adapter))
1131                t1_clock(adapter, t1powersave);
1132
1133        return 0;
1134
1135out_release_adapter_res:
1136        t1_free_sw_modules(adapter);
1137out_free_dev:
1138        if (adapter) {
1139                if (adapter->regs)
1140                        iounmap(adapter->regs);
1141                for (i = bi->port_number - 1; i >= 0; --i)
1142                        if (adapter->port[i].dev)
1143                                free_netdev(adapter->port[i].dev);
1144        }
1145        pci_release_regions(pdev);
1146out_disable_pdev:
1147        pci_disable_device(pdev);
1148        return err;
1149}
1150
1151static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1152{
1153        int data;
1154        int i;
1155        u32 val;
1156
1157        enum {
1158                S_CLOCK = 1 << 3,
1159                S_DATA = 1 << 4
1160        };
1161
1162        for (i = (nbits - 1); i > -1; i--) {
1163
1164                udelay(50);
1165
1166                data = ((bitdata >> i) & 0x1);
1167                __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1168
1169                if (data)
1170                        val |= S_DATA;
1171                else
1172                        val &= ~S_DATA;
1173
1174                udelay(50);
1175
1176                /* Set SCLOCK low */
1177                val &= ~S_CLOCK;
1178                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1179
1180                udelay(50);
1181
1182                /* Write SCLOCK high */
1183                val |= S_CLOCK;
1184                __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1185
1186        }
1187}
1188
1189static int t1_clock(struct adapter *adapter, int mode)
1190{
1191        u32 val;
1192        int M_CORE_VAL;
1193        int M_MEM_VAL;
1194
1195        enum {
1196                M_CORE_BITS     = 9,
1197                T_CORE_VAL      = 0,
1198                T_CORE_BITS     = 2,
1199                N_CORE_VAL      = 0,
1200                N_CORE_BITS     = 2,
1201                M_MEM_BITS      = 9,
1202                T_MEM_VAL       = 0,
1203                T_MEM_BITS      = 2,
1204                N_MEM_VAL       = 0,
1205                N_MEM_BITS      = 2,
1206                NP_LOAD         = 1 << 17,
1207                S_LOAD_MEM      = 1 << 5,
1208                S_LOAD_CORE     = 1 << 6,
1209                S_CLOCK         = 1 << 3
1210        };
1211
1212        if (!t1_is_T1B(adapter))
1213                return -ENODEV; /* Can't re-clock this chip. */
1214
1215        if (mode & 2)
1216                return 0;       /* show current mode. */
1217
1218        if ((adapter->t1powersave & 1) == (mode & 1))
1219                return -EALREADY;       /* ASIC already running in mode. */
1220
1221        if ((mode & 1) == HCLOCK) {
1222                M_CORE_VAL = 0x14;
1223                M_MEM_VAL = 0x18;
1224                adapter->t1powersave = HCLOCK;  /* overclock */
1225        } else {
1226                M_CORE_VAL = 0xe;
1227                M_MEM_VAL = 0x10;
1228                adapter->t1powersave = LCLOCK;  /* underclock */
1229        }
1230
1231        /* Don't interrupt this serial stream! */
1232        spin_lock(&adapter->tpi_lock);
1233
1234        /* Initialize for ASIC core */
1235        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1236        val |= NP_LOAD;
1237        udelay(50);
1238        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1239        udelay(50);
1240        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1241        val &= ~S_LOAD_CORE;
1242        val &= ~S_CLOCK;
1243        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1244        udelay(50);
1245
1246        /* Serial program the ASIC clock synthesizer */
1247        bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1248        bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1249        bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1250        udelay(50);
1251
1252        /* Finish ASIC core */
1253        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1254        val |= S_LOAD_CORE;
1255        udelay(50);
1256        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1257        udelay(50);
1258        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1259        val &= ~S_LOAD_CORE;
1260        udelay(50);
1261        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1262        udelay(50);
1263
1264        /* Initialize for memory */
1265        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1266        val |= NP_LOAD;
1267        udelay(50);
1268        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1269        udelay(50);
1270        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1271        val &= ~S_LOAD_MEM;
1272        val &= ~S_CLOCK;
1273        udelay(50);
1274        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1275        udelay(50);
1276
1277        /* Serial program the memory clock synthesizer */
1278        bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1279        bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1280        bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1281        udelay(50);
1282
1283        /* Finish memory */
1284        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1285        val |= S_LOAD_MEM;
1286        udelay(50);
1287        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1288        udelay(50);
1289        __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1290        val &= ~S_LOAD_MEM;
1291        udelay(50);
1292        __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1293
1294        spin_unlock(&adapter->tpi_lock);
1295
1296        return 0;
1297}
1298
1299static inline void t1_sw_reset(struct pci_dev *pdev)
1300{
1301        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1302        pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1303}
1304
1305static void remove_one(struct pci_dev *pdev)
1306{
1307        struct net_device *dev = pci_get_drvdata(pdev);
1308        struct adapter *adapter = dev->ml_priv;
1309        int i;
1310
1311        for_each_port(adapter, i) {
1312                if (test_bit(i, &adapter->registered_device_map))
1313                        unregister_netdev(adapter->port[i].dev);
1314        }
1315
1316        t1_free_sw_modules(adapter);
1317        iounmap(adapter->regs);
1318
1319        while (--i >= 0) {
1320                if (adapter->port[i].dev)
1321                        free_netdev(adapter->port[i].dev);
1322        }
1323
1324        pci_release_regions(pdev);
1325        pci_disable_device(pdev);
1326        t1_sw_reset(pdev);
1327}
1328
1329static struct pci_driver cxgb_pci_driver = {
1330        .name     = DRV_NAME,
1331        .id_table = t1_pci_tbl,
1332        .probe    = init_one,
1333        .remove   = remove_one,
1334};
1335
1336module_pci_driver(cxgb_pci_driver);
1337