linux/drivers/net/b44.c
<<
>>
Prefs
   1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
   2 *
   3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
   4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
   5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
   6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
   7 * Copyright (C) 2006 Broadcom Corporation.
   8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
   9 *
  10 * Distribute under GPL.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/types.h>
  17#include <linux/netdevice.h>
  18#include <linux/ethtool.h>
  19#include <linux/mii.h>
  20#include <linux/if_ether.h>
  21#include <linux/if_vlan.h>
  22#include <linux/etherdevice.h>
  23#include <linux/pci.h>
  24#include <linux/delay.h>
  25#include <linux/init.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/ssb/ssb.h>
  28
  29#include <asm/uaccess.h>
  30#include <asm/io.h>
  31#include <asm/irq.h>
  32
  33
  34#include "b44.h"
  35
  36#define DRV_MODULE_NAME         "b44"
  37#define PFX DRV_MODULE_NAME     ": "
  38#define DRV_MODULE_VERSION      "2.0"
  39
  40#define B44_DEF_MSG_ENABLE        \
  41        (NETIF_MSG_DRV          | \
  42         NETIF_MSG_PROBE        | \
  43         NETIF_MSG_LINK         | \
  44         NETIF_MSG_TIMER        | \
  45         NETIF_MSG_IFDOWN       | \
  46         NETIF_MSG_IFUP         | \
  47         NETIF_MSG_RX_ERR       | \
  48         NETIF_MSG_TX_ERR)
  49
  50/* length of time before we decide the hardware is borked,
  51 * and dev->tx_timeout() should be called to fix the problem
  52 */
  53#define B44_TX_TIMEOUT                  (5 * HZ)
  54
  55/* hardware minimum and maximum for a single frame's data payload */
  56#define B44_MIN_MTU                     60
  57#define B44_MAX_MTU                     1500
  58
  59#define B44_RX_RING_SIZE                512
  60#define B44_DEF_RX_RING_PENDING         200
  61#define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
  62                                 B44_RX_RING_SIZE)
  63#define B44_TX_RING_SIZE                512
  64#define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
  65#define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
  66                                 B44_TX_RING_SIZE)
  67
  68#define TX_RING_GAP(BP) \
  69        (B44_TX_RING_SIZE - (BP)->tx_pending)
  70#define TX_BUFFS_AVAIL(BP)                                              \
  71        (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
  72          (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
  73          (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
  74#define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
  75
  76#define RX_PKT_OFFSET           30
  77#define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET + 64)
  78
  79/* minimum number of free TX descriptors required to wake up TX process */
  80#define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
  81
  82/* b44 internal pattern match filter info */
  83#define B44_PATTERN_BASE        0x400
  84#define B44_PATTERN_SIZE        0x80
  85#define B44_PMASK_BASE          0x600
  86#define B44_PMASK_SIZE          0x10
  87#define B44_MAX_PATTERNS        16
  88#define B44_ETHIPV6UDP_HLEN     62
  89#define B44_ETHIPV4UDP_HLEN     42
  90
  91static char version[] __devinitdata =
  92        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
  93
  94MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
  95MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
  96MODULE_LICENSE("GPL");
  97MODULE_VERSION(DRV_MODULE_VERSION);
  98
  99static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
 100module_param(b44_debug, int, 0);
 101MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
 102
 103
 104#ifdef CONFIG_B44_PCI
 105static const struct pci_device_id b44_pci_tbl[] = {
 106        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
 107        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
 108        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
 109        { 0 } /* terminate list with empty entry */
 110};
 111MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
 112
 113static struct pci_driver b44_pci_driver = {
 114        .name           = DRV_MODULE_NAME,
 115        .id_table       = b44_pci_tbl,
 116};
 117#endif /* CONFIG_B44_PCI */
 118
 119static const struct ssb_device_id b44_ssb_tbl[] = {
 120        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
 121        SSB_DEVTABLE_END
 122};
 123MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
 124
 125static void b44_halt(struct b44 *);
 126static void b44_init_rings(struct b44 *);
 127
 128#define B44_FULL_RESET          1
 129#define B44_FULL_RESET_SKIP_PHY 2
 130#define B44_PARTIAL_RESET       3
 131
 132static void b44_init_hw(struct b44 *, int);
 133
 134static int dma_desc_align_mask;
 135static int dma_desc_sync_size;
 136static int instance;
 137
 138static const char b44_gstrings[][ETH_GSTRING_LEN] = {
 139#define _B44(x...)      # x,
 140B44_STAT_REG_DECLARE
 141#undef _B44
 142};
 143
 144static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
 145                                                dma_addr_t dma_base,
 146                                                unsigned long offset,
 147                                                enum dma_data_direction dir)
 148{
 149        dma_sync_single_range_for_device(sdev->dev, dma_base,
 150                                         offset & dma_desc_align_mask,
 151                                         dma_desc_sync_size, dir);
 152}
 153
 154static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
 155                                             dma_addr_t dma_base,
 156                                             unsigned long offset,
 157                                             enum dma_data_direction dir)
 158{
 159        dma_sync_single_range_for_cpu(sdev->dev, dma_base,
 160                                      offset & dma_desc_align_mask,
 161                                      dma_desc_sync_size, dir);
 162}
 163
 164static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
 165{
 166        return ssb_read32(bp->sdev, reg);
 167}
 168
 169static inline void bw32(const struct b44 *bp,
 170                        unsigned long reg, unsigned long val)
 171{
 172        ssb_write32(bp->sdev, reg, val);
 173}
 174
 175static int b44_wait_bit(struct b44 *bp, unsigned long reg,
 176                        u32 bit, unsigned long timeout, const int clear)
 177{
 178        unsigned long i;
 179
 180        for (i = 0; i < timeout; i++) {
 181                u32 val = br32(bp, reg);
 182
 183                if (clear && !(val & bit))
 184                        break;
 185                if (!clear && (val & bit))
 186                        break;
 187                udelay(10);
 188        }
 189        if (i == timeout) {
 190                printk(KERN_ERR PFX "%s: BUG!  Timeout waiting for bit %08x of register "
 191                       "%lx to %s.\n",
 192                       bp->dev->name,
 193                       bit, reg,
 194                       (clear ? "clear" : "set"));
 195                return -ENODEV;
 196        }
 197        return 0;
 198}
 199
 200static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
 201{
 202        u32 val;
 203
 204        bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
 205                            (index << CAM_CTRL_INDEX_SHIFT)));
 206
 207        b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 208
 209        val = br32(bp, B44_CAM_DATA_LO);
 210
 211        data[2] = (val >> 24) & 0xFF;
 212        data[3] = (val >> 16) & 0xFF;
 213        data[4] = (val >> 8) & 0xFF;
 214        data[5] = (val >> 0) & 0xFF;
 215
 216        val = br32(bp, B44_CAM_DATA_HI);
 217
 218        data[0] = (val >> 8) & 0xFF;
 219        data[1] = (val >> 0) & 0xFF;
 220}
 221
 222static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
 223{
 224        u32 val;
 225
 226        val  = ((u32) data[2]) << 24;
 227        val |= ((u32) data[3]) << 16;
 228        val |= ((u32) data[4]) <<  8;
 229        val |= ((u32) data[5]) <<  0;
 230        bw32(bp, B44_CAM_DATA_LO, val);
 231        val = (CAM_DATA_HI_VALID |
 232               (((u32) data[0]) << 8) |
 233               (((u32) data[1]) << 0));
 234        bw32(bp, B44_CAM_DATA_HI, val);
 235        bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
 236                            (index << CAM_CTRL_INDEX_SHIFT)));
 237        b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 238}
 239
 240static inline void __b44_disable_ints(struct b44 *bp)
 241{
 242        bw32(bp, B44_IMASK, 0);
 243}
 244
 245static void b44_disable_ints(struct b44 *bp)
 246{
 247        __b44_disable_ints(bp);
 248
 249        /* Flush posted writes. */
 250        br32(bp, B44_IMASK);
 251}
 252
 253static void b44_enable_ints(struct b44 *bp)
 254{
 255        bw32(bp, B44_IMASK, bp->imask);
 256}
 257
 258static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
 259{
 260        int err;
 261
 262        bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 263        bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 264                             (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
 265                             (phy_addr << MDIO_DATA_PMD_SHIFT) |
 266                             (reg << MDIO_DATA_RA_SHIFT) |
 267                             (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
 268        err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 269        *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
 270
 271        return err;
 272}
 273
 274static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
 275{
 276        bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 277        bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 278                             (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
 279                             (phy_addr << MDIO_DATA_PMD_SHIFT) |
 280                             (reg << MDIO_DATA_RA_SHIFT) |
 281                             (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
 282                             (val & MDIO_DATA_DATA)));
 283        return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 284}
 285
 286static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
 287{
 288        if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
 289                return 0;
 290
 291        return __b44_readphy(bp, bp->phy_addr, reg, val);
 292}
 293
 294static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
 295{
 296        if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
 297                return 0;
 298
 299        return __b44_writephy(bp, bp->phy_addr, reg, val);
 300}
 301
 302/* miilib interface */
 303static int b44_mii_read(struct net_device *dev, int phy_id, int location)
 304{
 305        u32 val;
 306        struct b44 *bp = netdev_priv(dev);
 307        int rc = __b44_readphy(bp, phy_id, location, &val);
 308        if (rc)
 309                return 0xffffffff;
 310        return val;
 311}
 312
 313static void b44_mii_write(struct net_device *dev, int phy_id, int location,
 314                         int val)
 315{
 316        struct b44 *bp = netdev_priv(dev);
 317        __b44_writephy(bp, phy_id, location, val);
 318}
 319
 320static int b44_phy_reset(struct b44 *bp)
 321{
 322        u32 val;
 323        int err;
 324
 325        if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
 326                return 0;
 327        err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
 328        if (err)
 329                return err;
 330        udelay(100);
 331        err = b44_readphy(bp, MII_BMCR, &val);
 332        if (!err) {
 333                if (val & BMCR_RESET) {
 334                        printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
 335                               bp->dev->name);
 336                        err = -ENODEV;
 337                }
 338        }
 339
 340        return 0;
 341}
 342
 343static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
 344{
 345        u32 val;
 346
 347        bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
 348        bp->flags |= pause_flags;
 349
 350        val = br32(bp, B44_RXCONFIG);
 351        if (pause_flags & B44_FLAG_RX_PAUSE)
 352                val |= RXCONFIG_FLOW;
 353        else
 354                val &= ~RXCONFIG_FLOW;
 355        bw32(bp, B44_RXCONFIG, val);
 356
 357        val = br32(bp, B44_MAC_FLOW);
 358        if (pause_flags & B44_FLAG_TX_PAUSE)
 359                val |= (MAC_FLOW_PAUSE_ENAB |
 360                        (0xc0 & MAC_FLOW_RX_HI_WATER));
 361        else
 362                val &= ~MAC_FLOW_PAUSE_ENAB;
 363        bw32(bp, B44_MAC_FLOW, val);
 364}
 365
 366static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
 367{
 368        u32 pause_enab = 0;
 369
 370        /* The driver supports only rx pause by default because
 371           the b44 mac tx pause mechanism generates excessive
 372           pause frames.
 373           Use ethtool to turn on b44 tx pause if necessary.
 374         */
 375        if ((local & ADVERTISE_PAUSE_CAP) &&
 376            (local & ADVERTISE_PAUSE_ASYM)){
 377                if ((remote & LPA_PAUSE_ASYM) &&
 378                    !(remote & LPA_PAUSE_CAP))
 379                        pause_enab |= B44_FLAG_RX_PAUSE;
 380        }
 381
 382        __b44_set_flow_ctrl(bp, pause_enab);
 383}
 384
 385#ifdef SSB_DRIVER_MIPS
 386extern char *nvram_get(char *name);
 387static void b44_wap54g10_workaround(struct b44 *bp)
 388{
 389        const char *str;
 390        u32 val;
 391        int err;
 392
 393        /*
 394         * workaround for bad hardware design in Linksys WAP54G v1.0
 395         * see https://dev.openwrt.org/ticket/146
 396         * check and reset bit "isolate"
 397         */
 398        str = nvram_get("boardnum");
 399        if (!str)
 400                return;
 401        if (simple_strtoul(str, NULL, 0) == 2) {
 402                err = __b44_readphy(bp, 0, MII_BMCR, &val);
 403                if (err)
 404                        goto error;
 405                if (!(val & BMCR_ISOLATE))
 406                        return;
 407                val &= ~BMCR_ISOLATE;
 408                err = __b44_writephy(bp, 0, MII_BMCR, val);
 409                if (err)
 410                        goto error;
 411        }
 412        return;
 413error:
 414        printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
 415}
 416#else
 417static inline void b44_wap54g10_workaround(struct b44 *bp)
 418{
 419}
 420#endif
 421
 422static int b44_setup_phy(struct b44 *bp)
 423{
 424        u32 val;
 425        int err;
 426
 427        b44_wap54g10_workaround(bp);
 428
 429        if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
 430                return 0;
 431        if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
 432                goto out;
 433        if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
 434                                val & MII_ALEDCTRL_ALLMSK)) != 0)
 435                goto out;
 436        if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
 437                goto out;
 438        if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
 439                                val | MII_TLEDCTRL_ENABLE)) != 0)
 440                goto out;
 441
 442        if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
 443                u32 adv = ADVERTISE_CSMA;
 444
 445                if (bp->flags & B44_FLAG_ADV_10HALF)
 446                        adv |= ADVERTISE_10HALF;
 447                if (bp->flags & B44_FLAG_ADV_10FULL)
 448                        adv |= ADVERTISE_10FULL;
 449                if (bp->flags & B44_FLAG_ADV_100HALF)
 450                        adv |= ADVERTISE_100HALF;
 451                if (bp->flags & B44_FLAG_ADV_100FULL)
 452                        adv |= ADVERTISE_100FULL;
 453
 454                if (bp->flags & B44_FLAG_PAUSE_AUTO)
 455                        adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 456
 457                if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
 458                        goto out;
 459                if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
 460                                                       BMCR_ANRESTART))) != 0)
 461                        goto out;
 462        } else {
 463                u32 bmcr;
 464
 465                if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
 466                        goto out;
 467                bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
 468                if (bp->flags & B44_FLAG_100_BASE_T)
 469                        bmcr |= BMCR_SPEED100;
 470                if (bp->flags & B44_FLAG_FULL_DUPLEX)
 471                        bmcr |= BMCR_FULLDPLX;
 472                if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
 473                        goto out;
 474
 475                /* Since we will not be negotiating there is no safe way
 476                 * to determine if the link partner supports flow control
 477                 * or not.  So just disable it completely in this case.
 478                 */
 479                b44_set_flow_ctrl(bp, 0, 0);
 480        }
 481
 482out:
 483        return err;
 484}
 485
 486static void b44_stats_update(struct b44 *bp)
 487{
 488        unsigned long reg;
 489        u32 *val;
 490
 491        val = &bp->hw_stats.tx_good_octets;
 492        for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
 493                *val++ += br32(bp, reg);
 494        }
 495
 496        /* Pad */
 497        reg += 8*4UL;
 498
 499        for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
 500                *val++ += br32(bp, reg);
 501        }
 502}
 503
 504static void b44_link_report(struct b44 *bp)
 505{
 506        if (!netif_carrier_ok(bp->dev)) {
 507                printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
 508        } else {
 509                printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
 510                       bp->dev->name,
 511                       (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
 512                       (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
 513
 514                printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
 515                       "%s for RX.\n",
 516                       bp->dev->name,
 517                       (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
 518                       (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
 519        }
 520}
 521
 522static void b44_check_phy(struct b44 *bp)
 523{
 524        u32 bmsr, aux;
 525
 526        if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
 527                bp->flags |= B44_FLAG_100_BASE_T;
 528                bp->flags |= B44_FLAG_FULL_DUPLEX;
 529                if (!netif_carrier_ok(bp->dev)) {
 530                        u32 val = br32(bp, B44_TX_CTRL);
 531                        val |= TX_CTRL_DUPLEX;
 532                        bw32(bp, B44_TX_CTRL, val);
 533                        netif_carrier_on(bp->dev);
 534                        b44_link_report(bp);
 535                }
 536                return;
 537        }
 538
 539        if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
 540            !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
 541            (bmsr != 0xffff)) {
 542                if (aux & MII_AUXCTRL_SPEED)
 543                        bp->flags |= B44_FLAG_100_BASE_T;
 544                else
 545                        bp->flags &= ~B44_FLAG_100_BASE_T;
 546                if (aux & MII_AUXCTRL_DUPLEX)
 547                        bp->flags |= B44_FLAG_FULL_DUPLEX;
 548                else
 549                        bp->flags &= ~B44_FLAG_FULL_DUPLEX;
 550
 551                if (!netif_carrier_ok(bp->dev) &&
 552                    (bmsr & BMSR_LSTATUS)) {
 553                        u32 val = br32(bp, B44_TX_CTRL);
 554                        u32 local_adv, remote_adv;
 555
 556                        if (bp->flags & B44_FLAG_FULL_DUPLEX)
 557                                val |= TX_CTRL_DUPLEX;
 558                        else
 559                                val &= ~TX_CTRL_DUPLEX;
 560                        bw32(bp, B44_TX_CTRL, val);
 561
 562                        if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
 563                            !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
 564                            !b44_readphy(bp, MII_LPA, &remote_adv))
 565                                b44_set_flow_ctrl(bp, local_adv, remote_adv);
 566
 567                        /* Link now up */
 568                        netif_carrier_on(bp->dev);
 569                        b44_link_report(bp);
 570                } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
 571                        /* Link now down */
 572                        netif_carrier_off(bp->dev);
 573                        b44_link_report(bp);
 574                }
 575
 576                if (bmsr & BMSR_RFAULT)
 577                        printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
 578                               bp->dev->name);
 579                if (bmsr & BMSR_JCD)
 580                        printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
 581                               bp->dev->name);
 582        }
 583}
 584
 585static void b44_timer(unsigned long __opaque)
 586{
 587        struct b44 *bp = (struct b44 *) __opaque;
 588
 589        spin_lock_irq(&bp->lock);
 590
 591        b44_check_phy(bp);
 592
 593        b44_stats_update(bp);
 594
 595        spin_unlock_irq(&bp->lock);
 596
 597        mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
 598}
 599
 600static void b44_tx(struct b44 *bp)
 601{
 602        u32 cur, cons;
 603
 604        cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
 605        cur /= sizeof(struct dma_desc);
 606
 607        /* XXX needs updating when NETIF_F_SG is supported */
 608        for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
 609                struct ring_info *rp = &bp->tx_buffers[cons];
 610                struct sk_buff *skb = rp->skb;
 611
 612                BUG_ON(skb == NULL);
 613
 614                dma_unmap_single(bp->sdev->dev,
 615                                 rp->mapping,
 616                                 skb->len,
 617                                 DMA_TO_DEVICE);
 618                rp->skb = NULL;
 619                dev_kfree_skb_irq(skb);
 620        }
 621
 622        bp->tx_cons = cons;
 623        if (netif_queue_stopped(bp->dev) &&
 624            TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
 625                netif_wake_queue(bp->dev);
 626
 627        bw32(bp, B44_GPTIMER, 0);
 628}
 629
 630/* Works like this.  This chip writes a 'struct rx_header" 30 bytes
 631 * before the DMA address you give it.  So we allocate 30 more bytes
 632 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
 633 * point the chip at 30 bytes past where the rx_header will go.
 634 */
 635static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 636{
 637        struct dma_desc *dp;
 638        struct ring_info *src_map, *map;
 639        struct rx_header *rh;
 640        struct sk_buff *skb;
 641        dma_addr_t mapping;
 642        int dest_idx;
 643        u32 ctrl;
 644
 645        src_map = NULL;
 646        if (src_idx >= 0)
 647                src_map = &bp->rx_buffers[src_idx];
 648        dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 649        map = &bp->rx_buffers[dest_idx];
 650        skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
 651        if (skb == NULL)
 652                return -ENOMEM;
 653
 654        mapping = dma_map_single(bp->sdev->dev, skb->data,
 655                                 RX_PKT_BUF_SZ,
 656                                 DMA_FROM_DEVICE);
 657
 658        /* Hardware bug work-around, the chip is unable to do PCI DMA
 659           to/from anything above 1GB :-( */
 660        if (dma_mapping_error(mapping) ||
 661                mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
 662                /* Sigh... */
 663                if (!dma_mapping_error(mapping))
 664                        dma_unmap_single(bp->sdev->dev, mapping,
 665                                        RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
 666                dev_kfree_skb_any(skb);
 667                skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
 668                if (skb == NULL)
 669                        return -ENOMEM;
 670                mapping = dma_map_single(bp->sdev->dev, skb->data,
 671                                         RX_PKT_BUF_SZ,
 672                                         DMA_FROM_DEVICE);
 673                if (dma_mapping_error(mapping) ||
 674                        mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
 675                        if (!dma_mapping_error(mapping))
 676                                dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
 677                        dev_kfree_skb_any(skb);
 678                        return -ENOMEM;
 679                }
 680        }
 681
 682        rh = (struct rx_header *) skb->data;
 683        skb_reserve(skb, RX_PKT_OFFSET);
 684
 685        rh->len = 0;
 686        rh->flags = 0;
 687
 688        map->skb = skb;
 689        map->mapping = mapping;
 690
 691        if (src_map != NULL)
 692                src_map->skb = NULL;
 693
 694        ctrl  = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
 695        if (dest_idx == (B44_RX_RING_SIZE - 1))
 696                ctrl |= DESC_CTRL_EOT;
 697
 698        dp = &bp->rx_ring[dest_idx];
 699        dp->ctrl = cpu_to_le32(ctrl);
 700        dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
 701
 702        if (bp->flags & B44_FLAG_RX_RING_HACK)
 703                b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 704                                            dest_idx * sizeof(dp),
 705                                            DMA_BIDIRECTIONAL);
 706
 707        return RX_PKT_BUF_SZ;
 708}
 709
 710static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 711{
 712        struct dma_desc *src_desc, *dest_desc;
 713        struct ring_info *src_map, *dest_map;
 714        struct rx_header *rh;
 715        int dest_idx;
 716        __le32 ctrl;
 717
 718        dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 719        dest_desc = &bp->rx_ring[dest_idx];
 720        dest_map = &bp->rx_buffers[dest_idx];
 721        src_desc = &bp->rx_ring[src_idx];
 722        src_map = &bp->rx_buffers[src_idx];
 723
 724        dest_map->skb = src_map->skb;
 725        rh = (struct rx_header *) src_map->skb->data;
 726        rh->len = 0;
 727        rh->flags = 0;
 728        dest_map->mapping = src_map->mapping;
 729
 730        if (bp->flags & B44_FLAG_RX_RING_HACK)
 731                b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
 732                                         src_idx * sizeof(src_desc),
 733                                         DMA_BIDIRECTIONAL);
 734
 735        ctrl = src_desc->ctrl;
 736        if (dest_idx == (B44_RX_RING_SIZE - 1))
 737                ctrl |= cpu_to_le32(DESC_CTRL_EOT);
 738        else
 739                ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
 740
 741        dest_desc->ctrl = ctrl;
 742        dest_desc->addr = src_desc->addr;
 743
 744        src_map->skb = NULL;
 745
 746        if (bp->flags & B44_FLAG_RX_RING_HACK)
 747                b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 748                                             dest_idx * sizeof(dest_desc),
 749                                             DMA_BIDIRECTIONAL);
 750
 751        dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr),
 752                                   RX_PKT_BUF_SZ,
 753                                   DMA_FROM_DEVICE);
 754}
 755
 756static int b44_rx(struct b44 *bp, int budget)
 757{
 758        int received;
 759        u32 cons, prod;
 760
 761        received = 0;
 762        prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
 763        prod /= sizeof(struct dma_desc);
 764        cons = bp->rx_cons;
 765
 766        while (cons != prod && budget > 0) {
 767                struct ring_info *rp = &bp->rx_buffers[cons];
 768                struct sk_buff *skb = rp->skb;
 769                dma_addr_t map = rp->mapping;
 770                struct rx_header *rh;
 771                u16 len;
 772
 773                dma_sync_single_for_cpu(bp->sdev->dev, map,
 774                                            RX_PKT_BUF_SZ,
 775                                            DMA_FROM_DEVICE);
 776                rh = (struct rx_header *) skb->data;
 777                len = le16_to_cpu(rh->len);
 778                if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
 779                    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
 780                drop_it:
 781                        b44_recycle_rx(bp, cons, bp->rx_prod);
 782                drop_it_no_recycle:
 783                        bp->stats.rx_dropped++;
 784                        goto next_pkt;
 785                }
 786
 787                if (len == 0) {
 788                        int i = 0;
 789
 790                        do {
 791                                udelay(2);
 792                                barrier();
 793                                len = le16_to_cpu(rh->len);
 794                        } while (len == 0 && i++ < 5);
 795                        if (len == 0)
 796                                goto drop_it;
 797                }
 798
 799                /* Omit CRC. */
 800                len -= 4;
 801
 802                if (len > RX_COPY_THRESHOLD) {
 803                        int skb_size;
 804                        skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 805                        if (skb_size < 0)
 806                                goto drop_it;
 807                        dma_unmap_single(bp->sdev->dev, map,
 808                                         skb_size, DMA_FROM_DEVICE);
 809                        /* Leave out rx_header */
 810                        skb_put(skb, len + RX_PKT_OFFSET);
 811                        skb_pull(skb, RX_PKT_OFFSET);
 812                } else {
 813                        struct sk_buff *copy_skb;
 814
 815                        b44_recycle_rx(bp, cons, bp->rx_prod);
 816                        copy_skb = dev_alloc_skb(len + 2);
 817                        if (copy_skb == NULL)
 818                                goto drop_it_no_recycle;
 819
 820                        skb_reserve(copy_skb, 2);
 821                        skb_put(copy_skb, len);
 822                        /* DMA sync done above, copy just the actual packet */
 823                        skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
 824                                                         copy_skb->data, len);
 825                        skb = copy_skb;
 826                }
 827                skb->ip_summed = CHECKSUM_NONE;
 828                skb->protocol = eth_type_trans(skb, bp->dev);
 829                netif_receive_skb(skb);
 830                bp->dev->last_rx = jiffies;
 831                received++;
 832                budget--;
 833        next_pkt:
 834                bp->rx_prod = (bp->rx_prod + 1) &
 835                        (B44_RX_RING_SIZE - 1);
 836                cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
 837        }
 838
 839        bp->rx_cons = cons;
 840        bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
 841
 842        return received;
 843}
 844
 845static int b44_poll(struct napi_struct *napi, int budget)
 846{
 847        struct b44 *bp = container_of(napi, struct b44, napi);
 848        struct net_device *netdev = bp->dev;
 849        int work_done;
 850
 851        spin_lock_irq(&bp->lock);
 852
 853        if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
 854                /* spin_lock(&bp->tx_lock); */
 855                b44_tx(bp);
 856                /* spin_unlock(&bp->tx_lock); */
 857        }
 858        spin_unlock_irq(&bp->lock);
 859
 860        work_done = 0;
 861        if (bp->istat & ISTAT_RX)
 862                work_done += b44_rx(bp, budget);
 863
 864        if (bp->istat & ISTAT_ERRORS) {
 865                unsigned long flags;
 866
 867                spin_lock_irqsave(&bp->lock, flags);
 868                b44_halt(bp);
 869                b44_init_rings(bp);
 870                b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
 871                netif_wake_queue(bp->dev);
 872                spin_unlock_irqrestore(&bp->lock, flags);
 873                work_done = 0;
 874        }
 875
 876        if (work_done < budget) {
 877                netif_rx_complete(netdev, napi);
 878                b44_enable_ints(bp);
 879        }
 880
 881        return work_done;
 882}
 883
 884static irqreturn_t b44_interrupt(int irq, void *dev_id)
 885{
 886        struct net_device *dev = dev_id;
 887        struct b44 *bp = netdev_priv(dev);
 888        u32 istat, imask;
 889        int handled = 0;
 890
 891        spin_lock(&bp->lock);
 892
 893        istat = br32(bp, B44_ISTAT);
 894        imask = br32(bp, B44_IMASK);
 895
 896        /* The interrupt mask register controls which interrupt bits
 897         * will actually raise an interrupt to the CPU when set by hw/firmware,
 898         * but doesn't mask off the bits.
 899         */
 900        istat &= imask;
 901        if (istat) {
 902                handled = 1;
 903
 904                if (unlikely(!netif_running(dev))) {
 905                        printk(KERN_INFO "%s: late interrupt.\n", dev->name);
 906                        goto irq_ack;
 907                }
 908
 909                if (netif_rx_schedule_prep(dev, &bp->napi)) {
 910                        /* NOTE: These writes are posted by the readback of
 911                         *       the ISTAT register below.
 912                         */
 913                        bp->istat = istat;
 914                        __b44_disable_ints(bp);
 915                        __netif_rx_schedule(dev, &bp->napi);
 916                } else {
 917                        printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
 918                               dev->name);
 919                }
 920
 921irq_ack:
 922                bw32(bp, B44_ISTAT, istat);
 923                br32(bp, B44_ISTAT);
 924        }
 925        spin_unlock(&bp->lock);
 926        return IRQ_RETVAL(handled);
 927}
 928
 929static void b44_tx_timeout(struct net_device *dev)
 930{
 931        struct b44 *bp = netdev_priv(dev);
 932
 933        printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
 934               dev->name);
 935
 936        spin_lock_irq(&bp->lock);
 937
 938        b44_halt(bp);
 939        b44_init_rings(bp);
 940        b44_init_hw(bp, B44_FULL_RESET);
 941
 942        spin_unlock_irq(&bp->lock);
 943
 944        b44_enable_ints(bp);
 945
 946        netif_wake_queue(dev);
 947}
 948
 949static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
 950{
 951        struct b44 *bp = netdev_priv(dev);
 952        int rc = NETDEV_TX_OK;
 953        dma_addr_t mapping;
 954        u32 len, entry, ctrl;
 955
 956        len = skb->len;
 957        spin_lock_irq(&bp->lock);
 958
 959        /* This is a hard error, log it. */
 960        if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
 961                netif_stop_queue(dev);
 962                printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
 963                       dev->name);
 964                goto err_out;
 965        }
 966
 967        mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE);
 968        if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
 969                struct sk_buff *bounce_skb;
 970
 971                /* Chip can't handle DMA to/from >1GB, use bounce buffer */
 972                if (!dma_mapping_error(mapping))
 973                        dma_unmap_single(bp->sdev->dev, mapping, len,
 974                                        DMA_TO_DEVICE);
 975
 976                bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
 977                if (!bounce_skb)
 978                        goto err_out;
 979
 980                mapping = dma_map_single(bp->sdev->dev, bounce_skb->data,
 981                                         len, DMA_TO_DEVICE);
 982                if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
 983                        if (!dma_mapping_error(mapping))
 984                                dma_unmap_single(bp->sdev->dev, mapping,
 985                                         len, DMA_TO_DEVICE);
 986                        dev_kfree_skb_any(bounce_skb);
 987                        goto err_out;
 988                }
 989
 990                skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
 991                dev_kfree_skb_any(skb);
 992                skb = bounce_skb;
 993        }
 994
 995        entry = bp->tx_prod;
 996        bp->tx_buffers[entry].skb = skb;
 997        bp->tx_buffers[entry].mapping = mapping;
 998
 999        ctrl  = (len & DESC_CTRL_LEN);
1000        ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1001        if (entry == (B44_TX_RING_SIZE - 1))
1002                ctrl |= DESC_CTRL_EOT;
1003
1004        bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1005        bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1006
1007        if (bp->flags & B44_FLAG_TX_RING_HACK)
1008                b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1009                                            entry * sizeof(bp->tx_ring[0]),
1010                                            DMA_TO_DEVICE);
1011
1012        entry = NEXT_TX(entry);
1013
1014        bp->tx_prod = entry;
1015
1016        wmb();
1017
1018        bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1019        if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1020                bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1021        if (bp->flags & B44_FLAG_REORDER_BUG)
1022                br32(bp, B44_DMATX_PTR);
1023
1024        if (TX_BUFFS_AVAIL(bp) < 1)
1025                netif_stop_queue(dev);
1026
1027        dev->trans_start = jiffies;
1028
1029out_unlock:
1030        spin_unlock_irq(&bp->lock);
1031
1032        return rc;
1033
1034err_out:
1035        rc = NETDEV_TX_BUSY;
1036        goto out_unlock;
1037}
1038
1039static int b44_change_mtu(struct net_device *dev, int new_mtu)
1040{
1041        struct b44 *bp = netdev_priv(dev);
1042
1043        if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1044                return -EINVAL;
1045
1046        if (!netif_running(dev)) {
1047                /* We'll just catch it later when the
1048                 * device is up'd.
1049                 */
1050                dev->mtu = new_mtu;
1051                return 0;
1052        }
1053
1054        spin_lock_irq(&bp->lock);
1055        b44_halt(bp);
1056        dev->mtu = new_mtu;
1057        b44_init_rings(bp);
1058        b44_init_hw(bp, B44_FULL_RESET);
1059        spin_unlock_irq(&bp->lock);
1060
1061        b44_enable_ints(bp);
1062
1063        return 0;
1064}
1065
1066/* Free up pending packets in all rx/tx rings.
1067 *
1068 * The chip has been shut down and the driver detached from
1069 * the networking, so no interrupts or new tx packets will
1070 * end up in the driver.  bp->lock is not held and we are not
1071 * in an interrupt context and thus may sleep.
1072 */
1073static void b44_free_rings(struct b44 *bp)
1074{
1075        struct ring_info *rp;
1076        int i;
1077
1078        for (i = 0; i < B44_RX_RING_SIZE; i++) {
1079                rp = &bp->rx_buffers[i];
1080
1081                if (rp->skb == NULL)
1082                        continue;
1083                dma_unmap_single(bp->sdev->dev, rp->mapping, RX_PKT_BUF_SZ,
1084                                        DMA_FROM_DEVICE);
1085                dev_kfree_skb_any(rp->skb);
1086                rp->skb = NULL;
1087        }
1088
1089        /* XXX needs changes once NETIF_F_SG is set... */
1090        for (i = 0; i < B44_TX_RING_SIZE; i++) {
1091                rp = &bp->tx_buffers[i];
1092
1093                if (rp->skb == NULL)
1094                        continue;
1095                dma_unmap_single(bp->sdev->dev, rp->mapping, rp->skb->len,
1096                                        DMA_TO_DEVICE);
1097                dev_kfree_skb_any(rp->skb);
1098                rp->skb = NULL;
1099        }
1100}
1101
1102/* Initialize tx/rx rings for packet processing.
1103 *
1104 * The chip has been shut down and the driver detached from
1105 * the networking, so no interrupts or new tx packets will
1106 * end up in the driver.
1107 */
1108static void b44_init_rings(struct b44 *bp)
1109{
1110        int i;
1111
1112        b44_free_rings(bp);
1113
1114        memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1115        memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1116
1117        if (bp->flags & B44_FLAG_RX_RING_HACK)
1118                dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma,
1119                                          DMA_TABLE_BYTES,
1120                                          DMA_BIDIRECTIONAL);
1121
1122        if (bp->flags & B44_FLAG_TX_RING_HACK)
1123                dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma,
1124                                          DMA_TABLE_BYTES,
1125                                          DMA_TO_DEVICE);
1126
1127        for (i = 0; i < bp->rx_pending; i++) {
1128                if (b44_alloc_rx_skb(bp, -1, i) < 0)
1129                        break;
1130        }
1131}
1132
1133/*
1134 * Must not be invoked with interrupt sources disabled and
1135 * the hardware shutdown down.
1136 */
1137static void b44_free_consistent(struct b44 *bp)
1138{
1139        kfree(bp->rx_buffers);
1140        bp->rx_buffers = NULL;
1141        kfree(bp->tx_buffers);
1142        bp->tx_buffers = NULL;
1143        if (bp->rx_ring) {
1144                if (bp->flags & B44_FLAG_RX_RING_HACK) {
1145                        dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma,
1146                                        DMA_TABLE_BYTES,
1147                                        DMA_BIDIRECTIONAL);
1148                        kfree(bp->rx_ring);
1149                } else
1150                        dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
1151                                            bp->rx_ring, bp->rx_ring_dma);
1152                bp->rx_ring = NULL;
1153                bp->flags &= ~B44_FLAG_RX_RING_HACK;
1154        }
1155        if (bp->tx_ring) {
1156                if (bp->flags & B44_FLAG_TX_RING_HACK) {
1157                        dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma,
1158                                        DMA_TABLE_BYTES,
1159                                        DMA_TO_DEVICE);
1160                        kfree(bp->tx_ring);
1161                } else
1162                        dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES,
1163                                            bp->tx_ring, bp->tx_ring_dma);
1164                bp->tx_ring = NULL;
1165                bp->flags &= ~B44_FLAG_TX_RING_HACK;
1166        }
1167}
1168
1169/*
1170 * Must not be invoked with interrupt sources disabled and
1171 * the hardware shutdown down.  Can sleep.
1172 */
1173static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1174{
1175        int size;
1176
1177        size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1178        bp->rx_buffers = kzalloc(size, gfp);
1179        if (!bp->rx_buffers)
1180                goto out_err;
1181
1182        size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1183        bp->tx_buffers = kzalloc(size, gfp);
1184        if (!bp->tx_buffers)
1185                goto out_err;
1186
1187        size = DMA_TABLE_BYTES;
1188        bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, gfp);
1189        if (!bp->rx_ring) {
1190                /* Allocation may have failed due to pci_alloc_consistent
1191                   insisting on use of GFP_DMA, which is more restrictive
1192                   than necessary...  */
1193                struct dma_desc *rx_ring;
1194                dma_addr_t rx_ring_dma;
1195
1196                rx_ring = kzalloc(size, gfp);
1197                if (!rx_ring)
1198                        goto out_err;
1199
1200                rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring,
1201                                            DMA_TABLE_BYTES,
1202                                            DMA_BIDIRECTIONAL);
1203
1204                if (dma_mapping_error(rx_ring_dma) ||
1205                        rx_ring_dma + size > DMA_30BIT_MASK) {
1206                        kfree(rx_ring);
1207                        goto out_err;
1208                }
1209
1210                bp->rx_ring = rx_ring;
1211                bp->rx_ring_dma = rx_ring_dma;
1212                bp->flags |= B44_FLAG_RX_RING_HACK;
1213        }
1214
1215        bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, gfp);
1216        if (!bp->tx_ring) {
1217                /* Allocation may have failed due to dma_alloc_coherent
1218                   insisting on use of GFP_DMA, which is more restrictive
1219                   than necessary...  */
1220                struct dma_desc *tx_ring;
1221                dma_addr_t tx_ring_dma;
1222
1223                tx_ring = kzalloc(size, gfp);
1224                if (!tx_ring)
1225                        goto out_err;
1226
1227                tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring,
1228                                            DMA_TABLE_BYTES,
1229                                            DMA_TO_DEVICE);
1230
1231                if (dma_mapping_error(tx_ring_dma) ||
1232                        tx_ring_dma + size > DMA_30BIT_MASK) {
1233                        kfree(tx_ring);
1234                        goto out_err;
1235                }
1236
1237                bp->tx_ring = tx_ring;
1238                bp->tx_ring_dma = tx_ring_dma;
1239                bp->flags |= B44_FLAG_TX_RING_HACK;
1240        }
1241
1242        return 0;
1243
1244out_err:
1245        b44_free_consistent(bp);
1246        return -ENOMEM;
1247}
1248
1249/* bp->lock is held. */
1250static void b44_clear_stats(struct b44 *bp)
1251{
1252        unsigned long reg;
1253
1254        bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1255        for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1256                br32(bp, reg);
1257        for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1258                br32(bp, reg);
1259}
1260
1261/* bp->lock is held. */
1262static void b44_chip_reset(struct b44 *bp)
1263{
1264        struct ssb_device *sdev = bp->sdev;
1265
1266        if (ssb_device_is_enabled(bp->sdev)) {
1267                bw32(bp, B44_RCV_LAZY, 0);
1268                bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1269                b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1270                bw32(bp, B44_DMATX_CTRL, 0);
1271                bp->tx_prod = bp->tx_cons = 0;
1272                if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1273                        b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1274                                     100, 0);
1275                }
1276                bw32(bp, B44_DMARX_CTRL, 0);
1277                bp->rx_prod = bp->rx_cons = 0;
1278        } else
1279                ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1280
1281        ssb_device_enable(bp->sdev, 0);
1282        b44_clear_stats(bp);
1283
1284        switch (sdev->bus->bustype) {
1285        case SSB_BUSTYPE_SSB:
1286                bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1287                     (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
1288                     & MDIO_CTRL_MAXF_MASK)));
1289                break;
1290        case SSB_BUSTYPE_PCI:
1291        case SSB_BUSTYPE_PCMCIA:
1292                bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1293                     (0x0d & MDIO_CTRL_MAXF_MASK)));
1294                break;
1295        }
1296
1297        br32(bp, B44_MDIO_CTRL);
1298
1299        if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1300                bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1301                br32(bp, B44_ENET_CTRL);
1302                bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1303        } else {
1304                u32 val = br32(bp, B44_DEVCTRL);
1305
1306                if (val & DEVCTRL_EPR) {
1307                        bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1308                        br32(bp, B44_DEVCTRL);
1309                        udelay(100);
1310                }
1311                bp->flags |= B44_FLAG_INTERNAL_PHY;
1312        }
1313}
1314
1315/* bp->lock is held. */
1316static void b44_halt(struct b44 *bp)
1317{
1318        b44_disable_ints(bp);
1319        b44_chip_reset(bp);
1320}
1321
1322/* bp->lock is held. */
1323static void __b44_set_mac_addr(struct b44 *bp)
1324{
1325        bw32(bp, B44_CAM_CTRL, 0);
1326        if (!(bp->dev->flags & IFF_PROMISC)) {
1327                u32 val;
1328
1329                __b44_cam_write(bp, bp->dev->dev_addr, 0);
1330                val = br32(bp, B44_CAM_CTRL);
1331                bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1332        }
1333}
1334
1335static int b44_set_mac_addr(struct net_device *dev, void *p)
1336{
1337        struct b44 *bp = netdev_priv(dev);
1338        struct sockaddr *addr = p;
1339        u32 val;
1340
1341        if (netif_running(dev))
1342                return -EBUSY;
1343
1344        if (!is_valid_ether_addr(addr->sa_data))
1345                return -EINVAL;
1346
1347        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1348
1349        spin_lock_irq(&bp->lock);
1350
1351        val = br32(bp, B44_RXCONFIG);
1352        if (!(val & RXCONFIG_CAM_ABSENT))
1353                __b44_set_mac_addr(bp);
1354
1355        spin_unlock_irq(&bp->lock);
1356
1357        return 0;
1358}
1359
1360/* Called at device open time to get the chip ready for
1361 * packet processing.  Invoked with bp->lock held.
1362 */
1363static void __b44_set_rx_mode(struct net_device *);
1364static void b44_init_hw(struct b44 *bp, int reset_kind)
1365{
1366        u32 val;
1367
1368        b44_chip_reset(bp);
1369        if (reset_kind == B44_FULL_RESET) {
1370                b44_phy_reset(bp);
1371                b44_setup_phy(bp);
1372        }
1373
1374        /* Enable CRC32, set proper LED modes and power on PHY */
1375        bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1376        bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1377
1378        /* This sets the MAC address too.  */
1379        __b44_set_rx_mode(bp->dev);
1380
1381        /* MTU + eth header + possible VLAN tag + struct rx_header */
1382        bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1383        bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1384
1385        bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1386        if (reset_kind == B44_PARTIAL_RESET) {
1387                bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1388                                      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1389        } else {
1390                bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1391                bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1392                bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1393                                      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1394                bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1395
1396                bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1397                bp->rx_prod = bp->rx_pending;
1398
1399                bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1400        }
1401
1402        val = br32(bp, B44_ENET_CTRL);
1403        bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1404}
1405
1406static int b44_open(struct net_device *dev)
1407{
1408        struct b44 *bp = netdev_priv(dev);
1409        int err;
1410
1411        err = b44_alloc_consistent(bp, GFP_KERNEL);
1412        if (err)
1413                goto out;
1414
1415        napi_enable(&bp->napi);
1416
1417        b44_init_rings(bp);
1418        b44_init_hw(bp, B44_FULL_RESET);
1419
1420        b44_check_phy(bp);
1421
1422        err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1423        if (unlikely(err < 0)) {
1424                napi_disable(&bp->napi);
1425                b44_chip_reset(bp);
1426                b44_free_rings(bp);
1427                b44_free_consistent(bp);
1428                goto out;
1429        }
1430
1431        init_timer(&bp->timer);
1432        bp->timer.expires = jiffies + HZ;
1433        bp->timer.data = (unsigned long) bp;
1434        bp->timer.function = b44_timer;
1435        add_timer(&bp->timer);
1436
1437        b44_enable_ints(bp);
1438        netif_start_queue(dev);
1439out:
1440        return err;
1441}
1442
1443#ifdef CONFIG_NET_POLL_CONTROLLER
1444/*
1445 * Polling receive - used by netconsole and other diagnostic tools
1446 * to allow network i/o with interrupts disabled.
1447 */
1448static void b44_poll_controller(struct net_device *dev)
1449{
1450        disable_irq(dev->irq);
1451        b44_interrupt(dev->irq, dev);
1452        enable_irq(dev->irq);
1453}
1454#endif
1455
1456static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1457{
1458        u32 i;
1459        u32 *pattern = (u32 *) pp;
1460
1461        for (i = 0; i < bytes; i += sizeof(u32)) {
1462                bw32(bp, B44_FILT_ADDR, table_offset + i);
1463                bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1464        }
1465}
1466
1467static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1468{
1469        int magicsync = 6;
1470        int k, j, len = offset;
1471        int ethaddr_bytes = ETH_ALEN;
1472
1473        memset(ppattern + offset, 0xff, magicsync);
1474        for (j = 0; j < magicsync; j++)
1475                set_bit(len++, (unsigned long *) pmask);
1476
1477        for (j = 0; j < B44_MAX_PATTERNS; j++) {
1478                if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1479                        ethaddr_bytes = ETH_ALEN;
1480                else
1481                        ethaddr_bytes = B44_PATTERN_SIZE - len;
1482                if (ethaddr_bytes <=0)
1483                        break;
1484                for (k = 0; k< ethaddr_bytes; k++) {
1485                        ppattern[offset + magicsync +
1486                                (j * ETH_ALEN) + k] = macaddr[k];
1487                        len++;
1488                        set_bit(len, (unsigned long *) pmask);
1489                }
1490        }
1491        return len - 1;
1492}
1493
1494/* Setup magic packet patterns in the b44 WOL
1495 * pattern matching filter.
1496 */
1497static void b44_setup_pseudo_magicp(struct b44 *bp)
1498{
1499
1500        u32 val;
1501        int plen0, plen1, plen2;
1502        u8 *pwol_pattern;
1503        u8 pwol_mask[B44_PMASK_SIZE];
1504
1505        pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1506        if (!pwol_pattern) {
1507                printk(KERN_ERR PFX "Memory not available for WOL\n");
1508                return;
1509        }
1510
1511        /* Ipv4 magic packet pattern - pattern 0.*/
1512        memset(pwol_mask, 0, B44_PMASK_SIZE);
1513        plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1514                                  B44_ETHIPV4UDP_HLEN);
1515
1516        bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1517        bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1518
1519        /* Raw ethernet II magic packet pattern - pattern 1 */
1520        memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1521        memset(pwol_mask, 0, B44_PMASK_SIZE);
1522        plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1523                                  ETH_HLEN);
1524
1525        bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1526                       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1527        bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1528                       B44_PMASK_BASE + B44_PMASK_SIZE);
1529
1530        /* Ipv6 magic packet pattern - pattern 2 */
1531        memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532        memset(pwol_mask, 0, B44_PMASK_SIZE);
1533        plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1534                                  B44_ETHIPV6UDP_HLEN);
1535
1536        bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1537                       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1538        bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1539                       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1540
1541        kfree(pwol_pattern);
1542
1543        /* set these pattern's lengths: one less than each real length */
1544        val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1545        bw32(bp, B44_WKUP_LEN, val);
1546
1547        /* enable wakeup pattern matching */
1548        val = br32(bp, B44_DEVCTRL);
1549        bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1550
1551}
1552
1553#ifdef CONFIG_B44_PCI
1554static void b44_setup_wol_pci(struct b44 *bp)
1555{
1556        u16 val;
1557
1558        if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1559                bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1560                pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1561                pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1562        }
1563}
1564#else
1565static inline void b44_setup_wol_pci(struct b44 *bp) { }
1566#endif /* CONFIG_B44_PCI */
1567
1568static void b44_setup_wol(struct b44 *bp)
1569{
1570        u32 val;
1571
1572        bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1573
1574        if (bp->flags & B44_FLAG_B0_ANDLATER) {
1575
1576                bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1577
1578                val = bp->dev->dev_addr[2] << 24 |
1579                        bp->dev->dev_addr[3] << 16 |
1580                        bp->dev->dev_addr[4] << 8 |
1581                        bp->dev->dev_addr[5];
1582                bw32(bp, B44_ADDR_LO, val);
1583
1584                val = bp->dev->dev_addr[0] << 8 |
1585                        bp->dev->dev_addr[1];
1586                bw32(bp, B44_ADDR_HI, val);
1587
1588                val = br32(bp, B44_DEVCTRL);
1589                bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1590
1591        } else {
1592                b44_setup_pseudo_magicp(bp);
1593        }
1594        b44_setup_wol_pci(bp);
1595}
1596
1597static int b44_close(struct net_device *dev)
1598{
1599        struct b44 *bp = netdev_priv(dev);
1600
1601        netif_stop_queue(dev);
1602
1603        napi_disable(&bp->napi);
1604
1605        del_timer_sync(&bp->timer);
1606
1607        spin_lock_irq(&bp->lock);
1608
1609        b44_halt(bp);
1610        b44_free_rings(bp);
1611        netif_carrier_off(dev);
1612
1613        spin_unlock_irq(&bp->lock);
1614
1615        free_irq(dev->irq, dev);
1616
1617        if (bp->flags & B44_FLAG_WOL_ENABLE) {
1618                b44_init_hw(bp, B44_PARTIAL_RESET);
1619                b44_setup_wol(bp);
1620        }
1621
1622        b44_free_consistent(bp);
1623
1624        return 0;
1625}
1626
1627static struct net_device_stats *b44_get_stats(struct net_device *dev)
1628{
1629        struct b44 *bp = netdev_priv(dev);
1630        struct net_device_stats *nstat = &bp->stats;
1631        struct b44_hw_stats *hwstat = &bp->hw_stats;
1632
1633        /* Convert HW stats into netdevice stats. */
1634        nstat->rx_packets = hwstat->rx_pkts;
1635        nstat->tx_packets = hwstat->tx_pkts;
1636        nstat->rx_bytes   = hwstat->rx_octets;
1637        nstat->tx_bytes   = hwstat->tx_octets;
1638        nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1639                             hwstat->tx_oversize_pkts +
1640                             hwstat->tx_underruns +
1641                             hwstat->tx_excessive_cols +
1642                             hwstat->tx_late_cols);
1643        nstat->multicast  = hwstat->tx_multicast_pkts;
1644        nstat->collisions = hwstat->tx_total_cols;
1645
1646        nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1647                                   hwstat->rx_undersize);
1648        nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1649        nstat->rx_frame_errors  = hwstat->rx_align_errs;
1650        nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1651        nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1652                                   hwstat->rx_oversize_pkts +
1653                                   hwstat->rx_missed_pkts +
1654                                   hwstat->rx_crc_align_errs +
1655                                   hwstat->rx_undersize +
1656                                   hwstat->rx_crc_errs +
1657                                   hwstat->rx_align_errs +
1658                                   hwstat->rx_symbol_errs);
1659
1660        nstat->tx_aborted_errors = hwstat->tx_underruns;
1661#if 0
1662        /* Carrier lost counter seems to be broken for some devices */
1663        nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1664#endif
1665
1666        return nstat;
1667}
1668
1669static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1670{
1671        struct dev_mc_list *mclist;
1672        int i, num_ents;
1673
1674        num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1675        mclist = dev->mc_list;
1676        for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1677                __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1678        }
1679        return i+1;
1680}
1681
1682static void __b44_set_rx_mode(struct net_device *dev)
1683{
1684        struct b44 *bp = netdev_priv(dev);
1685        u32 val;
1686
1687        val = br32(bp, B44_RXCONFIG);
1688        val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1689        if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1690                val |= RXCONFIG_PROMISC;
1691                bw32(bp, B44_RXCONFIG, val);
1692        } else {
1693                unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1694                int i = 1;
1695
1696                __b44_set_mac_addr(bp);
1697
1698                if ((dev->flags & IFF_ALLMULTI) ||
1699                    (dev->mc_count > B44_MCAST_TABLE_SIZE))
1700                        val |= RXCONFIG_ALLMULTI;
1701                else
1702                        i = __b44_load_mcast(bp, dev);
1703
1704                for (; i < 64; i++)
1705                        __b44_cam_write(bp, zero, i);
1706
1707                bw32(bp, B44_RXCONFIG, val);
1708                val = br32(bp, B44_CAM_CTRL);
1709                bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1710        }
1711}
1712
1713static void b44_set_rx_mode(struct net_device *dev)
1714{
1715        struct b44 *bp = netdev_priv(dev);
1716
1717        spin_lock_irq(&bp->lock);
1718        __b44_set_rx_mode(dev);
1719        spin_unlock_irq(&bp->lock);
1720}
1721
1722static u32 b44_get_msglevel(struct net_device *dev)
1723{
1724        struct b44 *bp = netdev_priv(dev);
1725        return bp->msg_enable;
1726}
1727
1728static void b44_set_msglevel(struct net_device *dev, u32 value)
1729{
1730        struct b44 *bp = netdev_priv(dev);
1731        bp->msg_enable = value;
1732}
1733
1734static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1735{
1736        struct b44 *bp = netdev_priv(dev);
1737        struct ssb_bus *bus = bp->sdev->bus;
1738
1739        strncpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1740        strncpy(info->version, DRV_MODULE_VERSION, sizeof(info->driver));
1741        switch (bus->bustype) {
1742        case SSB_BUSTYPE_PCI:
1743                strncpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1744                break;
1745        case SSB_BUSTYPE_PCMCIA:
1746        case SSB_BUSTYPE_SSB:
1747                strncpy(info->bus_info, "SSB", sizeof(info->bus_info));
1748                break;
1749        }
1750}
1751
1752static int b44_nway_reset(struct net_device *dev)
1753{
1754        struct b44 *bp = netdev_priv(dev);
1755        u32 bmcr;
1756        int r;
1757
1758        spin_lock_irq(&bp->lock);
1759        b44_readphy(bp, MII_BMCR, &bmcr);
1760        b44_readphy(bp, MII_BMCR, &bmcr);
1761        r = -EINVAL;
1762        if (bmcr & BMCR_ANENABLE) {
1763                b44_writephy(bp, MII_BMCR,
1764                             bmcr | BMCR_ANRESTART);
1765                r = 0;
1766        }
1767        spin_unlock_irq(&bp->lock);
1768
1769        return r;
1770}
1771
1772static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1773{
1774        struct b44 *bp = netdev_priv(dev);
1775
1776        cmd->supported = (SUPPORTED_Autoneg);
1777        cmd->supported |= (SUPPORTED_100baseT_Half |
1778                          SUPPORTED_100baseT_Full |
1779                          SUPPORTED_10baseT_Half |
1780                          SUPPORTED_10baseT_Full |
1781                          SUPPORTED_MII);
1782
1783        cmd->advertising = 0;
1784        if (bp->flags & B44_FLAG_ADV_10HALF)
1785                cmd->advertising |= ADVERTISED_10baseT_Half;
1786        if (bp->flags & B44_FLAG_ADV_10FULL)
1787                cmd->advertising |= ADVERTISED_10baseT_Full;
1788        if (bp->flags & B44_FLAG_ADV_100HALF)
1789                cmd->advertising |= ADVERTISED_100baseT_Half;
1790        if (bp->flags & B44_FLAG_ADV_100FULL)
1791                cmd->advertising |= ADVERTISED_100baseT_Full;
1792        cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1793        cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1794                SPEED_100 : SPEED_10;
1795        cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1796                DUPLEX_FULL : DUPLEX_HALF;
1797        cmd->port = 0;
1798        cmd->phy_address = bp->phy_addr;
1799        cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1800                XCVR_INTERNAL : XCVR_EXTERNAL;
1801        cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1802                AUTONEG_DISABLE : AUTONEG_ENABLE;
1803        if (cmd->autoneg == AUTONEG_ENABLE)
1804                cmd->advertising |= ADVERTISED_Autoneg;
1805        if (!netif_running(dev)){
1806                cmd->speed = 0;
1807                cmd->duplex = 0xff;
1808        }
1809        cmd->maxtxpkt = 0;
1810        cmd->maxrxpkt = 0;
1811        return 0;
1812}
1813
1814static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1815{
1816        struct b44 *bp = netdev_priv(dev);
1817
1818        /* We do not support gigabit. */
1819        if (cmd->autoneg == AUTONEG_ENABLE) {
1820                if (cmd->advertising &
1821                    (ADVERTISED_1000baseT_Half |
1822                     ADVERTISED_1000baseT_Full))
1823                        return -EINVAL;
1824        } else if ((cmd->speed != SPEED_100 &&
1825                    cmd->speed != SPEED_10) ||
1826                   (cmd->duplex != DUPLEX_HALF &&
1827                    cmd->duplex != DUPLEX_FULL)) {
1828                        return -EINVAL;
1829        }
1830
1831        spin_lock_irq(&bp->lock);
1832
1833        if (cmd->autoneg == AUTONEG_ENABLE) {
1834                bp->flags &= ~(B44_FLAG_FORCE_LINK |
1835                               B44_FLAG_100_BASE_T |
1836                               B44_FLAG_FULL_DUPLEX |
1837                               B44_FLAG_ADV_10HALF |
1838                               B44_FLAG_ADV_10FULL |
1839                               B44_FLAG_ADV_100HALF |
1840                               B44_FLAG_ADV_100FULL);
1841                if (cmd->advertising == 0) {
1842                        bp->flags |= (B44_FLAG_ADV_10HALF |
1843                                      B44_FLAG_ADV_10FULL |
1844                                      B44_FLAG_ADV_100HALF |
1845                                      B44_FLAG_ADV_100FULL);
1846                } else {
1847                        if (cmd->advertising & ADVERTISED_10baseT_Half)
1848                                bp->flags |= B44_FLAG_ADV_10HALF;
1849                        if (cmd->advertising & ADVERTISED_10baseT_Full)
1850                                bp->flags |= B44_FLAG_ADV_10FULL;
1851                        if (cmd->advertising & ADVERTISED_100baseT_Half)
1852                                bp->flags |= B44_FLAG_ADV_100HALF;
1853                        if (cmd->advertising & ADVERTISED_100baseT_Full)
1854                                bp->flags |= B44_FLAG_ADV_100FULL;
1855                }
1856        } else {
1857                bp->flags |= B44_FLAG_FORCE_LINK;
1858                bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1859                if (cmd->speed == SPEED_100)
1860                        bp->flags |= B44_FLAG_100_BASE_T;
1861                if (cmd->duplex == DUPLEX_FULL)
1862                        bp->flags |= B44_FLAG_FULL_DUPLEX;
1863        }
1864
1865        if (netif_running(dev))
1866                b44_setup_phy(bp);
1867
1868        spin_unlock_irq(&bp->lock);
1869
1870        return 0;
1871}
1872
1873static void b44_get_ringparam(struct net_device *dev,
1874                              struct ethtool_ringparam *ering)
1875{
1876        struct b44 *bp = netdev_priv(dev);
1877
1878        ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1879        ering->rx_pending = bp->rx_pending;
1880
1881        /* XXX ethtool lacks a tx_max_pending, oops... */
1882}
1883
1884static int b44_set_ringparam(struct net_device *dev,
1885                             struct ethtool_ringparam *ering)
1886{
1887        struct b44 *bp = netdev_priv(dev);
1888
1889        if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1890            (ering->rx_mini_pending != 0) ||
1891            (ering->rx_jumbo_pending != 0) ||
1892            (ering->tx_pending > B44_TX_RING_SIZE - 1))
1893                return -EINVAL;
1894
1895        spin_lock_irq(&bp->lock);
1896
1897        bp->rx_pending = ering->rx_pending;
1898        bp->tx_pending = ering->tx_pending;
1899
1900        b44_halt(bp);
1901        b44_init_rings(bp);
1902        b44_init_hw(bp, B44_FULL_RESET);
1903        netif_wake_queue(bp->dev);
1904        spin_unlock_irq(&bp->lock);
1905
1906        b44_enable_ints(bp);
1907
1908        return 0;
1909}
1910
1911static void b44_get_pauseparam(struct net_device *dev,
1912                                struct ethtool_pauseparam *epause)
1913{
1914        struct b44 *bp = netdev_priv(dev);
1915
1916        epause->autoneg =
1917                (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1918        epause->rx_pause =
1919                (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1920        epause->tx_pause =
1921                (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1922}
1923
1924static int b44_set_pauseparam(struct net_device *dev,
1925                                struct ethtool_pauseparam *epause)
1926{
1927        struct b44 *bp = netdev_priv(dev);
1928
1929        spin_lock_irq(&bp->lock);
1930        if (epause->autoneg)
1931                bp->flags |= B44_FLAG_PAUSE_AUTO;
1932        else
1933                bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1934        if (epause->rx_pause)
1935                bp->flags |= B44_FLAG_RX_PAUSE;
1936        else
1937                bp->flags &= ~B44_FLAG_RX_PAUSE;
1938        if (epause->tx_pause)
1939                bp->flags |= B44_FLAG_TX_PAUSE;
1940        else
1941                bp->flags &= ~B44_FLAG_TX_PAUSE;
1942        if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1943                b44_halt(bp);
1944                b44_init_rings(bp);
1945                b44_init_hw(bp, B44_FULL_RESET);
1946        } else {
1947                __b44_set_flow_ctrl(bp, bp->flags);
1948        }
1949        spin_unlock_irq(&bp->lock);
1950
1951        b44_enable_ints(bp);
1952
1953        return 0;
1954}
1955
1956static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1957{
1958        switch(stringset) {
1959        case ETH_SS_STATS:
1960                memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1961                break;
1962        }
1963}
1964
1965static int b44_get_sset_count(struct net_device *dev, int sset)
1966{
1967        switch (sset) {
1968        case ETH_SS_STATS:
1969                return ARRAY_SIZE(b44_gstrings);
1970        default:
1971                return -EOPNOTSUPP;
1972        }
1973}
1974
1975static void b44_get_ethtool_stats(struct net_device *dev,
1976                                  struct ethtool_stats *stats, u64 *data)
1977{
1978        struct b44 *bp = netdev_priv(dev);
1979        u32 *val = &bp->hw_stats.tx_good_octets;
1980        u32 i;
1981
1982        spin_lock_irq(&bp->lock);
1983
1984        b44_stats_update(bp);
1985
1986        for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1987                *data++ = *val++;
1988
1989        spin_unlock_irq(&bp->lock);
1990}
1991
1992static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1993{
1994        struct b44 *bp = netdev_priv(dev);
1995
1996        wol->supported = WAKE_MAGIC;
1997        if (bp->flags & B44_FLAG_WOL_ENABLE)
1998                wol->wolopts = WAKE_MAGIC;
1999        else
2000                wol->wolopts = 0;
2001        memset(&wol->sopass, 0, sizeof(wol->sopass));
2002}
2003
2004static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2005{
2006        struct b44 *bp = netdev_priv(dev);
2007
2008        spin_lock_irq(&bp->lock);
2009        if (wol->wolopts & WAKE_MAGIC)
2010                bp->flags |= B44_FLAG_WOL_ENABLE;
2011        else
2012                bp->flags &= ~B44_FLAG_WOL_ENABLE;
2013        spin_unlock_irq(&bp->lock);
2014
2015        return 0;
2016}
2017
2018static const struct ethtool_ops b44_ethtool_ops = {
2019        .get_drvinfo            = b44_get_drvinfo,
2020        .get_settings           = b44_get_settings,
2021        .set_settings           = b44_set_settings,
2022        .nway_reset             = b44_nway_reset,
2023        .get_link               = ethtool_op_get_link,
2024        .get_wol                = b44_get_wol,
2025        .set_wol                = b44_set_wol,
2026        .get_ringparam          = b44_get_ringparam,
2027        .set_ringparam          = b44_set_ringparam,
2028        .get_pauseparam         = b44_get_pauseparam,
2029        .set_pauseparam         = b44_set_pauseparam,
2030        .get_msglevel           = b44_get_msglevel,
2031        .set_msglevel           = b44_set_msglevel,
2032        .get_strings            = b44_get_strings,
2033        .get_sset_count         = b44_get_sset_count,
2034        .get_ethtool_stats      = b44_get_ethtool_stats,
2035};
2036
2037static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2038{
2039        struct mii_ioctl_data *data = if_mii(ifr);
2040        struct b44 *bp = netdev_priv(dev);
2041        int err = -EINVAL;
2042
2043        if (!netif_running(dev))
2044                goto out;
2045
2046        spin_lock_irq(&bp->lock);
2047        err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2048        spin_unlock_irq(&bp->lock);
2049out:
2050        return err;
2051}
2052
2053static int __devinit b44_get_invariants(struct b44 *bp)
2054{
2055        struct ssb_device *sdev = bp->sdev;
2056        int err = 0;
2057        u8 *addr;
2058
2059        bp->dma_offset = ssb_dma_translation(sdev);
2060
2061        if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2062            instance > 1) {
2063                addr = sdev->bus->sprom.r1.et1mac;
2064                bp->phy_addr = sdev->bus->sprom.r1.et1phyaddr;
2065        } else {
2066                addr = sdev->bus->sprom.r1.et0mac;
2067                bp->phy_addr = sdev->bus->sprom.r1.et0phyaddr;
2068        }
2069        memcpy(bp->dev->dev_addr, addr, 6);
2070
2071        if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2072                printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2073                return -EINVAL;
2074        }
2075
2076        memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2077
2078        bp->imask = IMASK_DEF;
2079
2080        /* XXX - really required?
2081           bp->flags |= B44_FLAG_BUGGY_TXPTR;
2082        */
2083
2084        if (bp->sdev->id.revision >= 7)
2085                bp->flags |= B44_FLAG_B0_ANDLATER;
2086
2087        return err;
2088}
2089
2090static int __devinit b44_init_one(struct ssb_device *sdev,
2091                                  const struct ssb_device_id *ent)
2092{
2093        static int b44_version_printed = 0;
2094        struct net_device *dev;
2095        struct b44 *bp;
2096        int err;
2097        DECLARE_MAC_BUF(mac);
2098
2099        instance++;
2100
2101        if (b44_version_printed++ == 0)
2102                printk(KERN_INFO "%s", version);
2103
2104
2105        dev = alloc_etherdev(sizeof(*bp));
2106        if (!dev) {
2107                dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2108                err = -ENOMEM;
2109                goto out;
2110        }
2111
2112        SET_NETDEV_DEV(dev, sdev->dev);
2113
2114        /* No interesting netdevice features in this card... */
2115        dev->features |= 0;
2116
2117        bp = netdev_priv(dev);
2118        bp->sdev = sdev;
2119        bp->dev = dev;
2120
2121        bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2122
2123        spin_lock_init(&bp->lock);
2124
2125        bp->rx_pending = B44_DEF_RX_RING_PENDING;
2126        bp->tx_pending = B44_DEF_TX_RING_PENDING;
2127
2128        dev->open = b44_open;
2129        dev->stop = b44_close;
2130        dev->hard_start_xmit = b44_start_xmit;
2131        dev->get_stats = b44_get_stats;
2132        dev->set_multicast_list = b44_set_rx_mode;
2133        dev->set_mac_address = b44_set_mac_addr;
2134        dev->do_ioctl = b44_ioctl;
2135        dev->tx_timeout = b44_tx_timeout;
2136        netif_napi_add(dev, &bp->napi, b44_poll, 64);
2137        dev->watchdog_timeo = B44_TX_TIMEOUT;
2138#ifdef CONFIG_NET_POLL_CONTROLLER
2139        dev->poll_controller = b44_poll_controller;
2140#endif
2141        dev->change_mtu = b44_change_mtu;
2142        dev->irq = sdev->irq;
2143        SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2144
2145        netif_carrier_off(dev);
2146
2147        err = ssb_bus_powerup(sdev->bus, 0);
2148        if (err) {
2149                dev_err(sdev->dev,
2150                        "Failed to powerup the bus\n");
2151                goto err_out_free_dev;
2152        }
2153        err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK);
2154        if (err) {
2155                dev_err(sdev->dev,
2156                        "Required 30BIT DMA mask unsupported by the system.\n");
2157                goto err_out_powerdown;
2158        }
2159        err = b44_get_invariants(bp);
2160        if (err) {
2161                dev_err(sdev->dev,
2162                        "Problem fetching invariants of chip, aborting.\n");
2163                goto err_out_powerdown;
2164        }
2165
2166        bp->mii_if.dev = dev;
2167        bp->mii_if.mdio_read = b44_mii_read;
2168        bp->mii_if.mdio_write = b44_mii_write;
2169        bp->mii_if.phy_id = bp->phy_addr;
2170        bp->mii_if.phy_id_mask = 0x1f;
2171        bp->mii_if.reg_num_mask = 0x1f;
2172
2173        /* By default, advertise all speed/duplex settings. */
2174        bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2175                      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2176
2177        /* By default, auto-negotiate PAUSE. */
2178        bp->flags |= B44_FLAG_PAUSE_AUTO;
2179
2180        err = register_netdev(dev);
2181        if (err) {
2182                dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2183                goto err_out_powerdown;
2184        }
2185
2186        ssb_set_drvdata(sdev, dev);
2187
2188        /* Chip reset provides power to the b44 MAC & PCI cores, which
2189         * is necessary for MAC register access.
2190         */
2191        b44_chip_reset(bp);
2192
2193        printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %s\n",
2194               dev->name, print_mac(mac, dev->dev_addr));
2195
2196        return 0;
2197
2198err_out_powerdown:
2199        ssb_bus_may_powerdown(sdev->bus);
2200
2201err_out_free_dev:
2202        free_netdev(dev);
2203
2204out:
2205        return err;
2206}
2207
2208static void __devexit b44_remove_one(struct ssb_device *sdev)
2209{
2210        struct net_device *dev = ssb_get_drvdata(sdev);
2211
2212        unregister_netdev(dev);
2213        ssb_bus_may_powerdown(sdev->bus);
2214        free_netdev(dev);
2215        ssb_set_drvdata(sdev, NULL);
2216}
2217
2218static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2219{
2220        struct net_device *dev = ssb_get_drvdata(sdev);
2221        struct b44 *bp = netdev_priv(dev);
2222
2223        if (!netif_running(dev))
2224                return 0;
2225
2226        del_timer_sync(&bp->timer);
2227
2228        spin_lock_irq(&bp->lock);
2229
2230        b44_halt(bp);
2231        netif_carrier_off(bp->dev);
2232        netif_device_detach(bp->dev);
2233        b44_free_rings(bp);
2234
2235        spin_unlock_irq(&bp->lock);
2236
2237        free_irq(dev->irq, dev);
2238        if (bp->flags & B44_FLAG_WOL_ENABLE) {
2239                b44_init_hw(bp, B44_PARTIAL_RESET);
2240                b44_setup_wol(bp);
2241        }
2242
2243        return 0;
2244}
2245
2246static int b44_resume(struct ssb_device *sdev)
2247{
2248        struct net_device *dev = ssb_get_drvdata(sdev);
2249        struct b44 *bp = netdev_priv(dev);
2250        int rc = 0;
2251
2252        rc = ssb_bus_powerup(sdev->bus, 0);
2253        if (rc) {
2254                dev_err(sdev->dev,
2255                        "Failed to powerup the bus\n");
2256                return rc;
2257        }
2258
2259        if (!netif_running(dev))
2260                return 0;
2261
2262        rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2263        if (rc) {
2264                printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2265                return rc;
2266        }
2267
2268        spin_lock_irq(&bp->lock);
2269
2270        b44_init_rings(bp);
2271        b44_init_hw(bp, B44_FULL_RESET);
2272        netif_device_attach(bp->dev);
2273        spin_unlock_irq(&bp->lock);
2274
2275        b44_enable_ints(bp);
2276        netif_wake_queue(dev);
2277
2278        mod_timer(&bp->timer, jiffies + 1);
2279
2280        return 0;
2281}
2282
2283static struct ssb_driver b44_ssb_driver = {
2284        .name           = DRV_MODULE_NAME,
2285        .id_table       = b44_ssb_tbl,
2286        .probe          = b44_init_one,
2287        .remove         = __devexit_p(b44_remove_one),
2288        .suspend        = b44_suspend,
2289        .resume         = b44_resume,
2290};
2291
2292static inline int b44_pci_init(void)
2293{
2294        int err = 0;
2295#ifdef CONFIG_B44_PCI
2296        err = ssb_pcihost_register(&b44_pci_driver);
2297#endif
2298        return err;
2299}
2300
2301static inline void b44_pci_exit(void)
2302{
2303#ifdef CONFIG_B44_PCI
2304        ssb_pcihost_unregister(&b44_pci_driver);
2305#endif
2306}
2307
2308static int __init b44_init(void)
2309{
2310        unsigned int dma_desc_align_size = dma_get_cache_alignment();
2311        int err;
2312
2313        /* Setup paramaters for syncing RX/TX DMA descriptors */
2314        dma_desc_align_mask = ~(dma_desc_align_size - 1);
2315        dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2316
2317        err = b44_pci_init();
2318        if (err)
2319                return err;
2320        err = ssb_driver_register(&b44_ssb_driver);
2321        if (err)
2322                b44_pci_exit();
2323        return err;
2324}
2325
2326static void __exit b44_cleanup(void)
2327{
2328        ssb_driver_unregister(&b44_ssb_driver);
2329        b44_pci_exit();
2330}
2331
2332module_init(b44_init);
2333module_exit(b44_cleanup);
2334
2335