linux/drivers/net/ethernet/broadcom/b44.c
<<
>>
Prefs
   1/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
   2 *
   3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
   4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
   5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
   6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
   7 * Copyright (C) 2006 Broadcom Corporation.
   8 * Copyright (C) 2007 Michael Buesch <m@bues.ch>
   9 * Copyright (C) 2013 Hauke Mehrtens <hauke@hauke-m.de>
  10 *
  11 * Distribute under GPL.
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/moduleparam.h>
  19#include <linux/types.h>
  20#include <linux/netdevice.h>
  21#include <linux/ethtool.h>
  22#include <linux/mii.h>
  23#include <linux/if_ether.h>
  24#include <linux/if_vlan.h>
  25#include <linux/etherdevice.h>
  26#include <linux/pci.h>
  27#include <linux/delay.h>
  28#include <linux/init.h>
  29#include <linux/interrupt.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/ssb/ssb.h>
  32#include <linux/slab.h>
  33#include <linux/phy.h>
  34
  35#include <linux/uaccess.h>
  36#include <asm/io.h>
  37#include <asm/irq.h>
  38
  39
  40#include "b44.h"
  41
  42#define DRV_MODULE_NAME         "b44"
  43#define DRV_MODULE_VERSION      "2.0"
  44#define DRV_DESCRIPTION         "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
  45
  46#define B44_DEF_MSG_ENABLE        \
  47        (NETIF_MSG_DRV          | \
  48         NETIF_MSG_PROBE        | \
  49         NETIF_MSG_LINK         | \
  50         NETIF_MSG_TIMER        | \
  51         NETIF_MSG_IFDOWN       | \
  52         NETIF_MSG_IFUP         | \
  53         NETIF_MSG_RX_ERR       | \
  54         NETIF_MSG_TX_ERR)
  55
  56/* length of time before we decide the hardware is borked,
  57 * and dev->tx_timeout() should be called to fix the problem
  58 */
  59#define B44_TX_TIMEOUT                  (5 * HZ)
  60
  61/* hardware minimum and maximum for a single frame's data payload */
  62#define B44_MIN_MTU                     ETH_ZLEN
  63#define B44_MAX_MTU                     ETH_DATA_LEN
  64
  65#define B44_RX_RING_SIZE                512
  66#define B44_DEF_RX_RING_PENDING         200
  67#define B44_RX_RING_BYTES       (sizeof(struct dma_desc) * \
  68                                 B44_RX_RING_SIZE)
  69#define B44_TX_RING_SIZE                512
  70#define B44_DEF_TX_RING_PENDING         (B44_TX_RING_SIZE - 1)
  71#define B44_TX_RING_BYTES       (sizeof(struct dma_desc) * \
  72                                 B44_TX_RING_SIZE)
  73
  74#define TX_RING_GAP(BP) \
  75        (B44_TX_RING_SIZE - (BP)->tx_pending)
  76#define TX_BUFFS_AVAIL(BP)                                              \
  77        (((BP)->tx_cons <= (BP)->tx_prod) ?                             \
  78          (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :            \
  79          (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
  80#define NEXT_TX(N)              (((N) + 1) & (B44_TX_RING_SIZE - 1))
  81
  82#define RX_PKT_OFFSET           (RX_HEADER_LEN + 2)
  83#define RX_PKT_BUF_SZ           (1536 + RX_PKT_OFFSET)
  84
  85/* minimum number of free TX descriptors required to wake up TX process */
  86#define B44_TX_WAKEUP_THRESH            (B44_TX_RING_SIZE / 4)
  87
  88/* b44 internal pattern match filter info */
  89#define B44_PATTERN_BASE        0x400
  90#define B44_PATTERN_SIZE        0x80
  91#define B44_PMASK_BASE          0x600
  92#define B44_PMASK_SIZE          0x10
  93#define B44_MAX_PATTERNS        16
  94#define B44_ETHIPV6UDP_HLEN     62
  95#define B44_ETHIPV4UDP_HLEN     42
  96
  97MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
  98MODULE_DESCRIPTION(DRV_DESCRIPTION);
  99MODULE_LICENSE("GPL");
 100MODULE_VERSION(DRV_MODULE_VERSION);
 101
 102static int b44_debug = -1;      /* -1 == use B44_DEF_MSG_ENABLE as value */
 103module_param(b44_debug, int, 0);
 104MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
 105
 106
 107#ifdef CONFIG_B44_PCI
 108static const struct pci_device_id b44_pci_tbl[] = {
 109        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
 110        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
 111        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
 112        { 0 } /* terminate list with empty entry */
 113};
 114MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
 115
 116static struct pci_driver b44_pci_driver = {
 117        .name           = DRV_MODULE_NAME,
 118        .id_table       = b44_pci_tbl,
 119};
 120#endif /* CONFIG_B44_PCI */
 121
 122static const struct ssb_device_id b44_ssb_tbl[] = {
 123        SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
 124        {},
 125};
 126MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
 127
 128static void b44_halt(struct b44 *);
 129static void b44_init_rings(struct b44 *);
 130
 131#define B44_FULL_RESET          1
 132#define B44_FULL_RESET_SKIP_PHY 2
 133#define B44_PARTIAL_RESET       3
 134#define B44_CHIP_RESET_FULL     4
 135#define B44_CHIP_RESET_PARTIAL  5
 136
 137static void b44_init_hw(struct b44 *, int);
 138
 139static int dma_desc_sync_size;
 140static int instance;
 141
 142static const char b44_gstrings[][ETH_GSTRING_LEN] = {
 143#define _B44(x...)      # x,
 144B44_STAT_REG_DECLARE
 145#undef _B44
 146};
 147
 148static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
 149                                                dma_addr_t dma_base,
 150                                                unsigned long offset,
 151                                                enum dma_data_direction dir)
 152{
 153        dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
 154                                   dma_desc_sync_size, dir);
 155}
 156
 157static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
 158                                             dma_addr_t dma_base,
 159                                             unsigned long offset,
 160                                             enum dma_data_direction dir)
 161{
 162        dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
 163                                dma_desc_sync_size, dir);
 164}
 165
 166static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
 167{
 168        return ssb_read32(bp->sdev, reg);
 169}
 170
 171static inline void bw32(const struct b44 *bp,
 172                        unsigned long reg, unsigned long val)
 173{
 174        ssb_write32(bp->sdev, reg, val);
 175}
 176
 177static int b44_wait_bit(struct b44 *bp, unsigned long reg,
 178                        u32 bit, unsigned long timeout, const int clear)
 179{
 180        unsigned long i;
 181
 182        for (i = 0; i < timeout; i++) {
 183                u32 val = br32(bp, reg);
 184
 185                if (clear && !(val & bit))
 186                        break;
 187                if (!clear && (val & bit))
 188                        break;
 189                udelay(10);
 190        }
 191        if (i == timeout) {
 192                if (net_ratelimit())
 193                        netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
 194                                   bit, reg, clear ? "clear" : "set");
 195
 196                return -ENODEV;
 197        }
 198        return 0;
 199}
 200
 201static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
 202{
 203        u32 val;
 204
 205        bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
 206                            (index << CAM_CTRL_INDEX_SHIFT)));
 207
 208        b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 209
 210        val = br32(bp, B44_CAM_DATA_LO);
 211
 212        data[2] = (val >> 24) & 0xFF;
 213        data[3] = (val >> 16) & 0xFF;
 214        data[4] = (val >> 8) & 0xFF;
 215        data[5] = (val >> 0) & 0xFF;
 216
 217        val = br32(bp, B44_CAM_DATA_HI);
 218
 219        data[0] = (val >> 8) & 0xFF;
 220        data[1] = (val >> 0) & 0xFF;
 221}
 222
 223static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
 224{
 225        u32 val;
 226
 227        val  = ((u32) data[2]) << 24;
 228        val |= ((u32) data[3]) << 16;
 229        val |= ((u32) data[4]) <<  8;
 230        val |= ((u32) data[5]) <<  0;
 231        bw32(bp, B44_CAM_DATA_LO, val);
 232        val = (CAM_DATA_HI_VALID |
 233               (((u32) data[0]) << 8) |
 234               (((u32) data[1]) << 0));
 235        bw32(bp, B44_CAM_DATA_HI, val);
 236        bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
 237                            (index << CAM_CTRL_INDEX_SHIFT)));
 238        b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
 239}
 240
 241static inline void __b44_disable_ints(struct b44 *bp)
 242{
 243        bw32(bp, B44_IMASK, 0);
 244}
 245
 246static void b44_disable_ints(struct b44 *bp)
 247{
 248        __b44_disable_ints(bp);
 249
 250        /* Flush posted writes. */
 251        br32(bp, B44_IMASK);
 252}
 253
 254static void b44_enable_ints(struct b44 *bp)
 255{
 256        bw32(bp, B44_IMASK, bp->imask);
 257}
 258
 259static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
 260{
 261        int err;
 262
 263        bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 264        bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 265                             (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
 266                             (phy_addr << MDIO_DATA_PMD_SHIFT) |
 267                             (reg << MDIO_DATA_RA_SHIFT) |
 268                             (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
 269        err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 270        *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
 271
 272        return err;
 273}
 274
 275static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
 276{
 277        bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
 278        bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
 279                             (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
 280                             (phy_addr << MDIO_DATA_PMD_SHIFT) |
 281                             (reg << MDIO_DATA_RA_SHIFT) |
 282                             (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
 283                             (val & MDIO_DATA_DATA)));
 284        return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
 285}
 286
 287static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
 288{
 289        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 290                return 0;
 291
 292        return __b44_readphy(bp, bp->phy_addr, reg, val);
 293}
 294
 295static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
 296{
 297        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 298                return 0;
 299
 300        return __b44_writephy(bp, bp->phy_addr, reg, val);
 301}
 302
 303/* miilib interface */
 304static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
 305{
 306        u32 val;
 307        struct b44 *bp = netdev_priv(dev);
 308        int rc = __b44_readphy(bp, phy_id, location, &val);
 309        if (rc)
 310                return 0xffffffff;
 311        return val;
 312}
 313
 314static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
 315                               int val)
 316{
 317        struct b44 *bp = netdev_priv(dev);
 318        __b44_writephy(bp, phy_id, location, val);
 319}
 320
 321static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
 322{
 323        u32 val;
 324        struct b44 *bp = bus->priv;
 325        int rc = __b44_readphy(bp, phy_id, location, &val);
 326        if (rc)
 327                return 0xffffffff;
 328        return val;
 329}
 330
 331static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
 332                                 u16 val)
 333{
 334        struct b44 *bp = bus->priv;
 335        return __b44_writephy(bp, phy_id, location, val);
 336}
 337
 338static int b44_phy_reset(struct b44 *bp)
 339{
 340        u32 val;
 341        int err;
 342
 343        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 344                return 0;
 345        err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
 346        if (err)
 347                return err;
 348        udelay(100);
 349        err = b44_readphy(bp, MII_BMCR, &val);
 350        if (!err) {
 351                if (val & BMCR_RESET) {
 352                        netdev_err(bp->dev, "PHY Reset would not complete\n");
 353                        err = -ENODEV;
 354                }
 355        }
 356
 357        return err;
 358}
 359
 360static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
 361{
 362        u32 val;
 363
 364        bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
 365        bp->flags |= pause_flags;
 366
 367        val = br32(bp, B44_RXCONFIG);
 368        if (pause_flags & B44_FLAG_RX_PAUSE)
 369                val |= RXCONFIG_FLOW;
 370        else
 371                val &= ~RXCONFIG_FLOW;
 372        bw32(bp, B44_RXCONFIG, val);
 373
 374        val = br32(bp, B44_MAC_FLOW);
 375        if (pause_flags & B44_FLAG_TX_PAUSE)
 376                val |= (MAC_FLOW_PAUSE_ENAB |
 377                        (0xc0 & MAC_FLOW_RX_HI_WATER));
 378        else
 379                val &= ~MAC_FLOW_PAUSE_ENAB;
 380        bw32(bp, B44_MAC_FLOW, val);
 381}
 382
 383static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
 384{
 385        u32 pause_enab = 0;
 386
 387        /* The driver supports only rx pause by default because
 388           the b44 mac tx pause mechanism generates excessive
 389           pause frames.
 390           Use ethtool to turn on b44 tx pause if necessary.
 391         */
 392        if ((local & ADVERTISE_PAUSE_CAP) &&
 393            (local & ADVERTISE_PAUSE_ASYM)){
 394                if ((remote & LPA_PAUSE_ASYM) &&
 395                    !(remote & LPA_PAUSE_CAP))
 396                        pause_enab |= B44_FLAG_RX_PAUSE;
 397        }
 398
 399        __b44_set_flow_ctrl(bp, pause_enab);
 400}
 401
 402#ifdef CONFIG_BCM47XX
 403#include <linux/bcm47xx_nvram.h>
 404static void b44_wap54g10_workaround(struct b44 *bp)
 405{
 406        char buf[20];
 407        u32 val;
 408        int err;
 409
 410        /*
 411         * workaround for bad hardware design in Linksys WAP54G v1.0
 412         * see https://dev.openwrt.org/ticket/146
 413         * check and reset bit "isolate"
 414         */
 415        if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
 416                return;
 417        if (simple_strtoul(buf, NULL, 0) == 2) {
 418                err = __b44_readphy(bp, 0, MII_BMCR, &val);
 419                if (err)
 420                        goto error;
 421                if (!(val & BMCR_ISOLATE))
 422                        return;
 423                val &= ~BMCR_ISOLATE;
 424                err = __b44_writephy(bp, 0, MII_BMCR, val);
 425                if (err)
 426                        goto error;
 427        }
 428        return;
 429error:
 430        pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
 431}
 432#else
 433static inline void b44_wap54g10_workaround(struct b44 *bp)
 434{
 435}
 436#endif
 437
 438static int b44_setup_phy(struct b44 *bp)
 439{
 440        u32 val;
 441        int err;
 442
 443        b44_wap54g10_workaround(bp);
 444
 445        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
 446                return 0;
 447        if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
 448                goto out;
 449        if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
 450                                val & MII_ALEDCTRL_ALLMSK)) != 0)
 451                goto out;
 452        if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
 453                goto out;
 454        if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
 455                                val | MII_TLEDCTRL_ENABLE)) != 0)
 456                goto out;
 457
 458        if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
 459                u32 adv = ADVERTISE_CSMA;
 460
 461                if (bp->flags & B44_FLAG_ADV_10HALF)
 462                        adv |= ADVERTISE_10HALF;
 463                if (bp->flags & B44_FLAG_ADV_10FULL)
 464                        adv |= ADVERTISE_10FULL;
 465                if (bp->flags & B44_FLAG_ADV_100HALF)
 466                        adv |= ADVERTISE_100HALF;
 467                if (bp->flags & B44_FLAG_ADV_100FULL)
 468                        adv |= ADVERTISE_100FULL;
 469
 470                if (bp->flags & B44_FLAG_PAUSE_AUTO)
 471                        adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 472
 473                if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
 474                        goto out;
 475                if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
 476                                                       BMCR_ANRESTART))) != 0)
 477                        goto out;
 478        } else {
 479                u32 bmcr;
 480
 481                if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
 482                        goto out;
 483                bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
 484                if (bp->flags & B44_FLAG_100_BASE_T)
 485                        bmcr |= BMCR_SPEED100;
 486                if (bp->flags & B44_FLAG_FULL_DUPLEX)
 487                        bmcr |= BMCR_FULLDPLX;
 488                if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
 489                        goto out;
 490
 491                /* Since we will not be negotiating there is no safe way
 492                 * to determine if the link partner supports flow control
 493                 * or not.  So just disable it completely in this case.
 494                 */
 495                b44_set_flow_ctrl(bp, 0, 0);
 496        }
 497
 498out:
 499        return err;
 500}
 501
 502static void b44_stats_update(struct b44 *bp)
 503{
 504        unsigned long reg;
 505        u64 *val;
 506
 507        val = &bp->hw_stats.tx_good_octets;
 508        u64_stats_update_begin(&bp->hw_stats.syncp);
 509
 510        for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
 511                *val++ += br32(bp, reg);
 512        }
 513
 514        /* Pad */
 515        reg += 8*4UL;
 516
 517        for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
 518                *val++ += br32(bp, reg);
 519        }
 520
 521        u64_stats_update_end(&bp->hw_stats.syncp);
 522}
 523
 524static void b44_link_report(struct b44 *bp)
 525{
 526        if (!netif_carrier_ok(bp->dev)) {
 527                netdev_info(bp->dev, "Link is down\n");
 528        } else {
 529                netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
 530                            (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
 531                            (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
 532
 533                netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
 534                            (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
 535                            (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
 536        }
 537}
 538
 539static void b44_check_phy(struct b44 *bp)
 540{
 541        u32 bmsr, aux;
 542
 543        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
 544                bp->flags |= B44_FLAG_100_BASE_T;
 545                if (!netif_carrier_ok(bp->dev)) {
 546                        u32 val = br32(bp, B44_TX_CTRL);
 547                        if (bp->flags & B44_FLAG_FULL_DUPLEX)
 548                                val |= TX_CTRL_DUPLEX;
 549                        else
 550                                val &= ~TX_CTRL_DUPLEX;
 551                        bw32(bp, B44_TX_CTRL, val);
 552                        netif_carrier_on(bp->dev);
 553                        b44_link_report(bp);
 554                }
 555                return;
 556        }
 557
 558        if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
 559            !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
 560            (bmsr != 0xffff)) {
 561                if (aux & MII_AUXCTRL_SPEED)
 562                        bp->flags |= B44_FLAG_100_BASE_T;
 563                else
 564                        bp->flags &= ~B44_FLAG_100_BASE_T;
 565                if (aux & MII_AUXCTRL_DUPLEX)
 566                        bp->flags |= B44_FLAG_FULL_DUPLEX;
 567                else
 568                        bp->flags &= ~B44_FLAG_FULL_DUPLEX;
 569
 570                if (!netif_carrier_ok(bp->dev) &&
 571                    (bmsr & BMSR_LSTATUS)) {
 572                        u32 val = br32(bp, B44_TX_CTRL);
 573                        u32 local_adv, remote_adv;
 574
 575                        if (bp->flags & B44_FLAG_FULL_DUPLEX)
 576                                val |= TX_CTRL_DUPLEX;
 577                        else
 578                                val &= ~TX_CTRL_DUPLEX;
 579                        bw32(bp, B44_TX_CTRL, val);
 580
 581                        if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
 582                            !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
 583                            !b44_readphy(bp, MII_LPA, &remote_adv))
 584                                b44_set_flow_ctrl(bp, local_adv, remote_adv);
 585
 586                        /* Link now up */
 587                        netif_carrier_on(bp->dev);
 588                        b44_link_report(bp);
 589                } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
 590                        /* Link now down */
 591                        netif_carrier_off(bp->dev);
 592                        b44_link_report(bp);
 593                }
 594
 595                if (bmsr & BMSR_RFAULT)
 596                        netdev_warn(bp->dev, "Remote fault detected in PHY\n");
 597                if (bmsr & BMSR_JCD)
 598                        netdev_warn(bp->dev, "Jabber detected in PHY\n");
 599        }
 600}
 601
 602static void b44_timer(unsigned long __opaque)
 603{
 604        struct b44 *bp = (struct b44 *) __opaque;
 605
 606        spin_lock_irq(&bp->lock);
 607
 608        b44_check_phy(bp);
 609
 610        b44_stats_update(bp);
 611
 612        spin_unlock_irq(&bp->lock);
 613
 614        mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
 615}
 616
 617static void b44_tx(struct b44 *bp)
 618{
 619        u32 cur, cons;
 620        unsigned bytes_compl = 0, pkts_compl = 0;
 621
 622        cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
 623        cur /= sizeof(struct dma_desc);
 624
 625        /* XXX needs updating when NETIF_F_SG is supported */
 626        for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
 627                struct ring_info *rp = &bp->tx_buffers[cons];
 628                struct sk_buff *skb = rp->skb;
 629
 630                BUG_ON(skb == NULL);
 631
 632                dma_unmap_single(bp->sdev->dma_dev,
 633                                 rp->mapping,
 634                                 skb->len,
 635                                 DMA_TO_DEVICE);
 636                rp->skb = NULL;
 637
 638                bytes_compl += skb->len;
 639                pkts_compl++;
 640
 641                dev_kfree_skb_irq(skb);
 642        }
 643
 644        netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
 645        bp->tx_cons = cons;
 646        if (netif_queue_stopped(bp->dev) &&
 647            TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
 648                netif_wake_queue(bp->dev);
 649
 650        bw32(bp, B44_GPTIMER, 0);
 651}
 652
 653/* Works like this.  This chip writes a 'struct rx_header" 30 bytes
 654 * before the DMA address you give it.  So we allocate 30 more bytes
 655 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
 656 * point the chip at 30 bytes past where the rx_header will go.
 657 */
 658static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 659{
 660        struct dma_desc *dp;
 661        struct ring_info *src_map, *map;
 662        struct rx_header *rh;
 663        struct sk_buff *skb;
 664        dma_addr_t mapping;
 665        int dest_idx;
 666        u32 ctrl;
 667
 668        src_map = NULL;
 669        if (src_idx >= 0)
 670                src_map = &bp->rx_buffers[src_idx];
 671        dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 672        map = &bp->rx_buffers[dest_idx];
 673        skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
 674        if (skb == NULL)
 675                return -ENOMEM;
 676
 677        mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
 678                                 RX_PKT_BUF_SZ,
 679                                 DMA_FROM_DEVICE);
 680
 681        /* Hardware bug work-around, the chip is unable to do PCI DMA
 682           to/from anything above 1GB :-( */
 683        if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 684                mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 685                /* Sigh... */
 686                if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 687                        dma_unmap_single(bp->sdev->dma_dev, mapping,
 688                                             RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
 689                dev_kfree_skb_any(skb);
 690                skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
 691                if (skb == NULL)
 692                        return -ENOMEM;
 693                mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
 694                                         RX_PKT_BUF_SZ,
 695                                         DMA_FROM_DEVICE);
 696                if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
 697                    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
 698                        if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 699                                dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
 700                        dev_kfree_skb_any(skb);
 701                        return -ENOMEM;
 702                }
 703                bp->force_copybreak = 1;
 704        }
 705
 706        rh = (struct rx_header *) skb->data;
 707
 708        rh->len = 0;
 709        rh->flags = 0;
 710
 711        map->skb = skb;
 712        map->mapping = mapping;
 713
 714        if (src_map != NULL)
 715                src_map->skb = NULL;
 716
 717        ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
 718        if (dest_idx == (B44_RX_RING_SIZE - 1))
 719                ctrl |= DESC_CTRL_EOT;
 720
 721        dp = &bp->rx_ring[dest_idx];
 722        dp->ctrl = cpu_to_le32(ctrl);
 723        dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
 724
 725        if (bp->flags & B44_FLAG_RX_RING_HACK)
 726                b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 727                                            dest_idx * sizeof(*dp),
 728                                            DMA_BIDIRECTIONAL);
 729
 730        return RX_PKT_BUF_SZ;
 731}
 732
 733static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
 734{
 735        struct dma_desc *src_desc, *dest_desc;
 736        struct ring_info *src_map, *dest_map;
 737        struct rx_header *rh;
 738        int dest_idx;
 739        __le32 ctrl;
 740
 741        dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
 742        dest_desc = &bp->rx_ring[dest_idx];
 743        dest_map = &bp->rx_buffers[dest_idx];
 744        src_desc = &bp->rx_ring[src_idx];
 745        src_map = &bp->rx_buffers[src_idx];
 746
 747        dest_map->skb = src_map->skb;
 748        rh = (struct rx_header *) src_map->skb->data;
 749        rh->len = 0;
 750        rh->flags = 0;
 751        dest_map->mapping = src_map->mapping;
 752
 753        if (bp->flags & B44_FLAG_RX_RING_HACK)
 754                b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
 755                                         src_idx * sizeof(*src_desc),
 756                                         DMA_BIDIRECTIONAL);
 757
 758        ctrl = src_desc->ctrl;
 759        if (dest_idx == (B44_RX_RING_SIZE - 1))
 760                ctrl |= cpu_to_le32(DESC_CTRL_EOT);
 761        else
 762                ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
 763
 764        dest_desc->ctrl = ctrl;
 765        dest_desc->addr = src_desc->addr;
 766
 767        src_map->skb = NULL;
 768
 769        if (bp->flags & B44_FLAG_RX_RING_HACK)
 770                b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
 771                                             dest_idx * sizeof(*dest_desc),
 772                                             DMA_BIDIRECTIONAL);
 773
 774        dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
 775                                   RX_PKT_BUF_SZ,
 776                                   DMA_FROM_DEVICE);
 777}
 778
 779static int b44_rx(struct b44 *bp, int budget)
 780{
 781        int received;
 782        u32 cons, prod;
 783
 784        received = 0;
 785        prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
 786        prod /= sizeof(struct dma_desc);
 787        cons = bp->rx_cons;
 788
 789        while (cons != prod && budget > 0) {
 790                struct ring_info *rp = &bp->rx_buffers[cons];
 791                struct sk_buff *skb = rp->skb;
 792                dma_addr_t map = rp->mapping;
 793                struct rx_header *rh;
 794                u16 len;
 795
 796                dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
 797                                        RX_PKT_BUF_SZ,
 798                                        DMA_FROM_DEVICE);
 799                rh = (struct rx_header *) skb->data;
 800                len = le16_to_cpu(rh->len);
 801                if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
 802                    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
 803                drop_it:
 804                        b44_recycle_rx(bp, cons, bp->rx_prod);
 805                drop_it_no_recycle:
 806                        bp->dev->stats.rx_dropped++;
 807                        goto next_pkt;
 808                }
 809
 810                if (len == 0) {
 811                        int i = 0;
 812
 813                        do {
 814                                udelay(2);
 815                                barrier();
 816                                len = le16_to_cpu(rh->len);
 817                        } while (len == 0 && i++ < 5);
 818                        if (len == 0)
 819                                goto drop_it;
 820                }
 821
 822                /* Omit CRC. */
 823                len -= 4;
 824
 825                if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
 826                        int skb_size;
 827                        skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
 828                        if (skb_size < 0)
 829                                goto drop_it;
 830                        dma_unmap_single(bp->sdev->dma_dev, map,
 831                                         skb_size, DMA_FROM_DEVICE);
 832                        /* Leave out rx_header */
 833                        skb_put(skb, len + RX_PKT_OFFSET);
 834                        skb_pull(skb, RX_PKT_OFFSET);
 835                } else {
 836                        struct sk_buff *copy_skb;
 837
 838                        b44_recycle_rx(bp, cons, bp->rx_prod);
 839                        copy_skb = napi_alloc_skb(&bp->napi, len);
 840                        if (copy_skb == NULL)
 841                                goto drop_it_no_recycle;
 842
 843                        skb_put(copy_skb, len);
 844                        /* DMA sync done above, copy just the actual packet */
 845                        skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
 846                                                         copy_skb->data, len);
 847                        skb = copy_skb;
 848                }
 849                skb_checksum_none_assert(skb);
 850                skb->protocol = eth_type_trans(skb, bp->dev);
 851                netif_receive_skb(skb);
 852                received++;
 853                budget--;
 854        next_pkt:
 855                bp->rx_prod = (bp->rx_prod + 1) &
 856                        (B44_RX_RING_SIZE - 1);
 857                cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
 858        }
 859
 860        bp->rx_cons = cons;
 861        bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
 862
 863        return received;
 864}
 865
 866static int b44_poll(struct napi_struct *napi, int budget)
 867{
 868        struct b44 *bp = container_of(napi, struct b44, napi);
 869        int work_done;
 870        unsigned long flags;
 871
 872        spin_lock_irqsave(&bp->lock, flags);
 873
 874        if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
 875                /* spin_lock(&bp->tx_lock); */
 876                b44_tx(bp);
 877                /* spin_unlock(&bp->tx_lock); */
 878        }
 879        if (bp->istat & ISTAT_RFO) {    /* fast recovery, in ~20msec */
 880                bp->istat &= ~ISTAT_RFO;
 881                b44_disable_ints(bp);
 882                ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
 883                b44_init_rings(bp);
 884                b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
 885                netif_wake_queue(bp->dev);
 886        }
 887
 888        spin_unlock_irqrestore(&bp->lock, flags);
 889
 890        work_done = 0;
 891        if (bp->istat & ISTAT_RX)
 892                work_done += b44_rx(bp, budget);
 893
 894        if (bp->istat & ISTAT_ERRORS) {
 895                spin_lock_irqsave(&bp->lock, flags);
 896                b44_halt(bp);
 897                b44_init_rings(bp);
 898                b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
 899                netif_wake_queue(bp->dev);
 900                spin_unlock_irqrestore(&bp->lock, flags);
 901                work_done = 0;
 902        }
 903
 904        if (work_done < budget) {
 905                napi_complete_done(napi, work_done);
 906                b44_enable_ints(bp);
 907        }
 908
 909        return work_done;
 910}
 911
 912static irqreturn_t b44_interrupt(int irq, void *dev_id)
 913{
 914        struct net_device *dev = dev_id;
 915        struct b44 *bp = netdev_priv(dev);
 916        u32 istat, imask;
 917        int handled = 0;
 918
 919        spin_lock(&bp->lock);
 920
 921        istat = br32(bp, B44_ISTAT);
 922        imask = br32(bp, B44_IMASK);
 923
 924        /* The interrupt mask register controls which interrupt bits
 925         * will actually raise an interrupt to the CPU when set by hw/firmware,
 926         * but doesn't mask off the bits.
 927         */
 928        istat &= imask;
 929        if (istat) {
 930                handled = 1;
 931
 932                if (unlikely(!netif_running(dev))) {
 933                        netdev_info(dev, "late interrupt\n");
 934                        goto irq_ack;
 935                }
 936
 937                if (napi_schedule_prep(&bp->napi)) {
 938                        /* NOTE: These writes are posted by the readback of
 939                         *       the ISTAT register below.
 940                         */
 941                        bp->istat = istat;
 942                        __b44_disable_ints(bp);
 943                        __napi_schedule(&bp->napi);
 944                }
 945
 946irq_ack:
 947                bw32(bp, B44_ISTAT, istat);
 948                br32(bp, B44_ISTAT);
 949        }
 950        spin_unlock(&bp->lock);
 951        return IRQ_RETVAL(handled);
 952}
 953
 954static void b44_tx_timeout(struct net_device *dev)
 955{
 956        struct b44 *bp = netdev_priv(dev);
 957
 958        netdev_err(dev, "transmit timed out, resetting\n");
 959
 960        spin_lock_irq(&bp->lock);
 961
 962        b44_halt(bp);
 963        b44_init_rings(bp);
 964        b44_init_hw(bp, B44_FULL_RESET);
 965
 966        spin_unlock_irq(&bp->lock);
 967
 968        b44_enable_ints(bp);
 969
 970        netif_wake_queue(dev);
 971}
 972
 973static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
 974{
 975        struct b44 *bp = netdev_priv(dev);
 976        int rc = NETDEV_TX_OK;
 977        dma_addr_t mapping;
 978        u32 len, entry, ctrl;
 979        unsigned long flags;
 980
 981        len = skb->len;
 982        spin_lock_irqsave(&bp->lock, flags);
 983
 984        /* This is a hard error, log it. */
 985        if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
 986                netif_stop_queue(dev);
 987                netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
 988                goto err_out;
 989        }
 990
 991        mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
 992        if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
 993                struct sk_buff *bounce_skb;
 994
 995                /* Chip can't handle DMA to/from >1GB, use bounce buffer */
 996                if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
 997                        dma_unmap_single(bp->sdev->dma_dev, mapping, len,
 998                                             DMA_TO_DEVICE);
 999
1000                bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
1001                if (!bounce_skb)
1002                        goto err_out;
1003
1004                mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1005                                         len, DMA_TO_DEVICE);
1006                if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1007                        if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1008                                dma_unmap_single(bp->sdev->dma_dev, mapping,
1009                                                     len, DMA_TO_DEVICE);
1010                        dev_kfree_skb_any(bounce_skb);
1011                        goto err_out;
1012                }
1013
1014                skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1015                dev_kfree_skb_any(skb);
1016                skb = bounce_skb;
1017        }
1018
1019        entry = bp->tx_prod;
1020        bp->tx_buffers[entry].skb = skb;
1021        bp->tx_buffers[entry].mapping = mapping;
1022
1023        ctrl  = (len & DESC_CTRL_LEN);
1024        ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1025        if (entry == (B44_TX_RING_SIZE - 1))
1026                ctrl |= DESC_CTRL_EOT;
1027
1028        bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1029        bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1030
1031        if (bp->flags & B44_FLAG_TX_RING_HACK)
1032                b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1033                                            entry * sizeof(bp->tx_ring[0]),
1034                                            DMA_TO_DEVICE);
1035
1036        entry = NEXT_TX(entry);
1037
1038        bp->tx_prod = entry;
1039
1040        wmb();
1041
1042        bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1043        if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1044                bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1045        if (bp->flags & B44_FLAG_REORDER_BUG)
1046                br32(bp, B44_DMATX_PTR);
1047
1048        netdev_sent_queue(dev, skb->len);
1049
1050        if (TX_BUFFS_AVAIL(bp) < 1)
1051                netif_stop_queue(dev);
1052
1053out_unlock:
1054        spin_unlock_irqrestore(&bp->lock, flags);
1055
1056        return rc;
1057
1058err_out:
1059        rc = NETDEV_TX_BUSY;
1060        goto out_unlock;
1061}
1062
1063static int b44_change_mtu(struct net_device *dev, int new_mtu)
1064{
1065        struct b44 *bp = netdev_priv(dev);
1066
1067        if (!netif_running(dev)) {
1068                /* We'll just catch it later when the
1069                 * device is up'd.
1070                 */
1071                dev->mtu = new_mtu;
1072                return 0;
1073        }
1074
1075        spin_lock_irq(&bp->lock);
1076        b44_halt(bp);
1077        dev->mtu = new_mtu;
1078        b44_init_rings(bp);
1079        b44_init_hw(bp, B44_FULL_RESET);
1080        spin_unlock_irq(&bp->lock);
1081
1082        b44_enable_ints(bp);
1083
1084        return 0;
1085}
1086
1087/* Free up pending packets in all rx/tx rings.
1088 *
1089 * The chip has been shut down and the driver detached from
1090 * the networking, so no interrupts or new tx packets will
1091 * end up in the driver.  bp->lock is not held and we are not
1092 * in an interrupt context and thus may sleep.
1093 */
1094static void b44_free_rings(struct b44 *bp)
1095{
1096        struct ring_info *rp;
1097        int i;
1098
1099        for (i = 0; i < B44_RX_RING_SIZE; i++) {
1100                rp = &bp->rx_buffers[i];
1101
1102                if (rp->skb == NULL)
1103                        continue;
1104                dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1105                                 DMA_FROM_DEVICE);
1106                dev_kfree_skb_any(rp->skb);
1107                rp->skb = NULL;
1108        }
1109
1110        /* XXX needs changes once NETIF_F_SG is set... */
1111        for (i = 0; i < B44_TX_RING_SIZE; i++) {
1112                rp = &bp->tx_buffers[i];
1113
1114                if (rp->skb == NULL)
1115                        continue;
1116                dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1117                                 DMA_TO_DEVICE);
1118                dev_kfree_skb_any(rp->skb);
1119                rp->skb = NULL;
1120        }
1121}
1122
1123/* Initialize tx/rx rings for packet processing.
1124 *
1125 * The chip has been shut down and the driver detached from
1126 * the networking, so no interrupts or new tx packets will
1127 * end up in the driver.
1128 */
1129static void b44_init_rings(struct b44 *bp)
1130{
1131        int i;
1132
1133        b44_free_rings(bp);
1134
1135        memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1136        memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1137
1138        if (bp->flags & B44_FLAG_RX_RING_HACK)
1139                dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1140                                           DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1141
1142        if (bp->flags & B44_FLAG_TX_RING_HACK)
1143                dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1144                                           DMA_TABLE_BYTES, DMA_TO_DEVICE);
1145
1146        for (i = 0; i < bp->rx_pending; i++) {
1147                if (b44_alloc_rx_skb(bp, -1, i) < 0)
1148                        break;
1149        }
1150}
1151
1152/*
1153 * Must not be invoked with interrupt sources disabled and
1154 * the hardware shutdown down.
1155 */
1156static void b44_free_consistent(struct b44 *bp)
1157{
1158        kfree(bp->rx_buffers);
1159        bp->rx_buffers = NULL;
1160        kfree(bp->tx_buffers);
1161        bp->tx_buffers = NULL;
1162        if (bp->rx_ring) {
1163                if (bp->flags & B44_FLAG_RX_RING_HACK) {
1164                        dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1165                                         DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1166                        kfree(bp->rx_ring);
1167                } else
1168                        dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1169                                          bp->rx_ring, bp->rx_ring_dma);
1170                bp->rx_ring = NULL;
1171                bp->flags &= ~B44_FLAG_RX_RING_HACK;
1172        }
1173        if (bp->tx_ring) {
1174                if (bp->flags & B44_FLAG_TX_RING_HACK) {
1175                        dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1176                                         DMA_TABLE_BYTES, DMA_TO_DEVICE);
1177                        kfree(bp->tx_ring);
1178                } else
1179                        dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1180                                          bp->tx_ring, bp->tx_ring_dma);
1181                bp->tx_ring = NULL;
1182                bp->flags &= ~B44_FLAG_TX_RING_HACK;
1183        }
1184}
1185
1186/*
1187 * Must not be invoked with interrupt sources disabled and
1188 * the hardware shutdown down.  Can sleep.
1189 */
1190static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1191{
1192        int size;
1193
1194        size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
1195        bp->rx_buffers = kzalloc(size, gfp);
1196        if (!bp->rx_buffers)
1197                goto out_err;
1198
1199        size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1200        bp->tx_buffers = kzalloc(size, gfp);
1201        if (!bp->tx_buffers)
1202                goto out_err;
1203
1204        size = DMA_TABLE_BYTES;
1205        bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1206                                         &bp->rx_ring_dma, gfp);
1207        if (!bp->rx_ring) {
1208                /* Allocation may have failed due to pci_alloc_consistent
1209                   insisting on use of GFP_DMA, which is more restrictive
1210                   than necessary...  */
1211                struct dma_desc *rx_ring;
1212                dma_addr_t rx_ring_dma;
1213
1214                rx_ring = kzalloc(size, gfp);
1215                if (!rx_ring)
1216                        goto out_err;
1217
1218                rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1219                                             DMA_TABLE_BYTES,
1220                                             DMA_BIDIRECTIONAL);
1221
1222                if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1223                        rx_ring_dma + size > DMA_BIT_MASK(30)) {
1224                        kfree(rx_ring);
1225                        goto out_err;
1226                }
1227
1228                bp->rx_ring = rx_ring;
1229                bp->rx_ring_dma = rx_ring_dma;
1230                bp->flags |= B44_FLAG_RX_RING_HACK;
1231        }
1232
1233        bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1234                                         &bp->tx_ring_dma, gfp);
1235        if (!bp->tx_ring) {
1236                /* Allocation may have failed due to ssb_dma_alloc_consistent
1237                   insisting on use of GFP_DMA, which is more restrictive
1238                   than necessary...  */
1239                struct dma_desc *tx_ring;
1240                dma_addr_t tx_ring_dma;
1241
1242                tx_ring = kzalloc(size, gfp);
1243                if (!tx_ring)
1244                        goto out_err;
1245
1246                tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1247                                             DMA_TABLE_BYTES,
1248                                             DMA_TO_DEVICE);
1249
1250                if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1251                        tx_ring_dma + size > DMA_BIT_MASK(30)) {
1252                        kfree(tx_ring);
1253                        goto out_err;
1254                }
1255
1256                bp->tx_ring = tx_ring;
1257                bp->tx_ring_dma = tx_ring_dma;
1258                bp->flags |= B44_FLAG_TX_RING_HACK;
1259        }
1260
1261        return 0;
1262
1263out_err:
1264        b44_free_consistent(bp);
1265        return -ENOMEM;
1266}
1267
1268/* bp->lock is held. */
1269static void b44_clear_stats(struct b44 *bp)
1270{
1271        unsigned long reg;
1272
1273        bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1274        for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1275                br32(bp, reg);
1276        for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1277                br32(bp, reg);
1278}
1279
1280/* bp->lock is held. */
1281static void b44_chip_reset(struct b44 *bp, int reset_kind)
1282{
1283        struct ssb_device *sdev = bp->sdev;
1284        bool was_enabled;
1285
1286        was_enabled = ssb_device_is_enabled(bp->sdev);
1287
1288        ssb_device_enable(bp->sdev, 0);
1289        ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1290
1291        if (was_enabled) {
1292                bw32(bp, B44_RCV_LAZY, 0);
1293                bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1294                b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1295                bw32(bp, B44_DMATX_CTRL, 0);
1296                bp->tx_prod = bp->tx_cons = 0;
1297                if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1298                        b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1299                                     100, 0);
1300                }
1301                bw32(bp, B44_DMARX_CTRL, 0);
1302                bp->rx_prod = bp->rx_cons = 0;
1303        }
1304
1305        b44_clear_stats(bp);
1306
1307        /*
1308         * Don't enable PHY if we are doing a partial reset
1309         * we are probably going to power down
1310         */
1311        if (reset_kind == B44_CHIP_RESET_PARTIAL)
1312                return;
1313
1314        switch (sdev->bus->bustype) {
1315        case SSB_BUSTYPE_SSB:
1316                bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1317                     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1318                                        B44_MDC_RATIO)
1319                     & MDIO_CTRL_MAXF_MASK)));
1320                break;
1321        case SSB_BUSTYPE_PCI:
1322                bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1323                     (0x0d & MDIO_CTRL_MAXF_MASK)));
1324                break;
1325        case SSB_BUSTYPE_PCMCIA:
1326        case SSB_BUSTYPE_SDIO:
1327                WARN_ON(1); /* A device with this bus does not exist. */
1328                break;
1329        }
1330
1331        br32(bp, B44_MDIO_CTRL);
1332
1333        if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1334                bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1335                br32(bp, B44_ENET_CTRL);
1336                bp->flags |= B44_FLAG_EXTERNAL_PHY;
1337        } else {
1338                u32 val = br32(bp, B44_DEVCTRL);
1339
1340                if (val & DEVCTRL_EPR) {
1341                        bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1342                        br32(bp, B44_DEVCTRL);
1343                        udelay(100);
1344                }
1345                bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1346        }
1347}
1348
1349/* bp->lock is held. */
1350static void b44_halt(struct b44 *bp)
1351{
1352        b44_disable_ints(bp);
1353        /* reset PHY */
1354        b44_phy_reset(bp);
1355        /* power down PHY */
1356        netdev_info(bp->dev, "powering down PHY\n");
1357        bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1358        /* now reset the chip, but without enabling the MAC&PHY
1359         * part of it. This has to be done _after_ we shut down the PHY */
1360        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1361                b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1362        else
1363                b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1364}
1365
1366/* bp->lock is held. */
1367static void __b44_set_mac_addr(struct b44 *bp)
1368{
1369        bw32(bp, B44_CAM_CTRL, 0);
1370        if (!(bp->dev->flags & IFF_PROMISC)) {
1371                u32 val;
1372
1373                __b44_cam_write(bp, bp->dev->dev_addr, 0);
1374                val = br32(bp, B44_CAM_CTRL);
1375                bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1376        }
1377}
1378
1379static int b44_set_mac_addr(struct net_device *dev, void *p)
1380{
1381        struct b44 *bp = netdev_priv(dev);
1382        struct sockaddr *addr = p;
1383        u32 val;
1384
1385        if (netif_running(dev))
1386                return -EBUSY;
1387
1388        if (!is_valid_ether_addr(addr->sa_data))
1389                return -EINVAL;
1390
1391        memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1392
1393        spin_lock_irq(&bp->lock);
1394
1395        val = br32(bp, B44_RXCONFIG);
1396        if (!(val & RXCONFIG_CAM_ABSENT))
1397                __b44_set_mac_addr(bp);
1398
1399        spin_unlock_irq(&bp->lock);
1400
1401        return 0;
1402}
1403
1404/* Called at device open time to get the chip ready for
1405 * packet processing.  Invoked with bp->lock held.
1406 */
1407static void __b44_set_rx_mode(struct net_device *);
1408static void b44_init_hw(struct b44 *bp, int reset_kind)
1409{
1410        u32 val;
1411
1412        b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1413        if (reset_kind == B44_FULL_RESET) {
1414                b44_phy_reset(bp);
1415                b44_setup_phy(bp);
1416        }
1417
1418        /* Enable CRC32, set proper LED modes and power on PHY */
1419        bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1420        bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1421
1422        /* This sets the MAC address too.  */
1423        __b44_set_rx_mode(bp->dev);
1424
1425        /* MTU + eth header + possible VLAN tag + struct rx_header */
1426        bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1427        bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1428
1429        bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1430        if (reset_kind == B44_PARTIAL_RESET) {
1431                bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1432                                      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1433        } else {
1434                bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1435                bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1436                bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1437                                      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1438                bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1439
1440                bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1441                bp->rx_prod = bp->rx_pending;
1442
1443                bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1444        }
1445
1446        val = br32(bp, B44_ENET_CTRL);
1447        bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1448
1449        netdev_reset_queue(bp->dev);
1450}
1451
1452static int b44_open(struct net_device *dev)
1453{
1454        struct b44 *bp = netdev_priv(dev);
1455        int err;
1456
1457        err = b44_alloc_consistent(bp, GFP_KERNEL);
1458        if (err)
1459                goto out;
1460
1461        napi_enable(&bp->napi);
1462
1463        b44_init_rings(bp);
1464        b44_init_hw(bp, B44_FULL_RESET);
1465
1466        b44_check_phy(bp);
1467
1468        err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1469        if (unlikely(err < 0)) {
1470                napi_disable(&bp->napi);
1471                b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1472                b44_free_rings(bp);
1473                b44_free_consistent(bp);
1474                goto out;
1475        }
1476
1477        init_timer(&bp->timer);
1478        bp->timer.expires = jiffies + HZ;
1479        bp->timer.data = (unsigned long) bp;
1480        bp->timer.function = b44_timer;
1481        add_timer(&bp->timer);
1482
1483        b44_enable_ints(bp);
1484
1485        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1486                phy_start(dev->phydev);
1487
1488        netif_start_queue(dev);
1489out:
1490        return err;
1491}
1492
1493#ifdef CONFIG_NET_POLL_CONTROLLER
1494/*
1495 * Polling receive - used by netconsole and other diagnostic tools
1496 * to allow network i/o with interrupts disabled.
1497 */
1498static void b44_poll_controller(struct net_device *dev)
1499{
1500        disable_irq(dev->irq);
1501        b44_interrupt(dev->irq, dev);
1502        enable_irq(dev->irq);
1503}
1504#endif
1505
1506static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1507{
1508        u32 i;
1509        u32 *pattern = (u32 *) pp;
1510
1511        for (i = 0; i < bytes; i += sizeof(u32)) {
1512                bw32(bp, B44_FILT_ADDR, table_offset + i);
1513                bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1514        }
1515}
1516
1517static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1518{
1519        int magicsync = 6;
1520        int k, j, len = offset;
1521        int ethaddr_bytes = ETH_ALEN;
1522
1523        memset(ppattern + offset, 0xff, magicsync);
1524        for (j = 0; j < magicsync; j++)
1525                set_bit(len++, (unsigned long *) pmask);
1526
1527        for (j = 0; j < B44_MAX_PATTERNS; j++) {
1528                if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1529                        ethaddr_bytes = ETH_ALEN;
1530                else
1531                        ethaddr_bytes = B44_PATTERN_SIZE - len;
1532                if (ethaddr_bytes <=0)
1533                        break;
1534                for (k = 0; k< ethaddr_bytes; k++) {
1535                        ppattern[offset + magicsync +
1536                                (j * ETH_ALEN) + k] = macaddr[k];
1537                        set_bit(len++, (unsigned long *) pmask);
1538                }
1539        }
1540        return len - 1;
1541}
1542
1543/* Setup magic packet patterns in the b44 WOL
1544 * pattern matching filter.
1545 */
1546static void b44_setup_pseudo_magicp(struct b44 *bp)
1547{
1548
1549        u32 val;
1550        int plen0, plen1, plen2;
1551        u8 *pwol_pattern;
1552        u8 pwol_mask[B44_PMASK_SIZE];
1553
1554        pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1555        if (!pwol_pattern)
1556                return;
1557
1558        /* Ipv4 magic packet pattern - pattern 0.*/
1559        memset(pwol_mask, 0, B44_PMASK_SIZE);
1560        plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1561                                  B44_ETHIPV4UDP_HLEN);
1562
1563        bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1564        bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1565
1566        /* Raw ethernet II magic packet pattern - pattern 1 */
1567        memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1568        memset(pwol_mask, 0, B44_PMASK_SIZE);
1569        plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1570                                  ETH_HLEN);
1571
1572        bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1573                       B44_PATTERN_BASE + B44_PATTERN_SIZE);
1574        bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1575                       B44_PMASK_BASE + B44_PMASK_SIZE);
1576
1577        /* Ipv6 magic packet pattern - pattern 2 */
1578        memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1579        memset(pwol_mask, 0, B44_PMASK_SIZE);
1580        plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1581                                  B44_ETHIPV6UDP_HLEN);
1582
1583        bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1584                       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1585        bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1586                       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1587
1588        kfree(pwol_pattern);
1589
1590        /* set these pattern's lengths: one less than each real length */
1591        val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1592        bw32(bp, B44_WKUP_LEN, val);
1593
1594        /* enable wakeup pattern matching */
1595        val = br32(bp, B44_DEVCTRL);
1596        bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1597
1598}
1599
1600#ifdef CONFIG_B44_PCI
1601static void b44_setup_wol_pci(struct b44 *bp)
1602{
1603        u16 val;
1604
1605        if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1606                bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1607                pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1608                pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1609        }
1610}
1611#else
1612static inline void b44_setup_wol_pci(struct b44 *bp) { }
1613#endif /* CONFIG_B44_PCI */
1614
1615static void b44_setup_wol(struct b44 *bp)
1616{
1617        u32 val;
1618
1619        bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1620
1621        if (bp->flags & B44_FLAG_B0_ANDLATER) {
1622
1623                bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1624
1625                val = bp->dev->dev_addr[2] << 24 |
1626                        bp->dev->dev_addr[3] << 16 |
1627                        bp->dev->dev_addr[4] << 8 |
1628                        bp->dev->dev_addr[5];
1629                bw32(bp, B44_ADDR_LO, val);
1630
1631                val = bp->dev->dev_addr[0] << 8 |
1632                        bp->dev->dev_addr[1];
1633                bw32(bp, B44_ADDR_HI, val);
1634
1635                val = br32(bp, B44_DEVCTRL);
1636                bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1637
1638        } else {
1639                b44_setup_pseudo_magicp(bp);
1640        }
1641        b44_setup_wol_pci(bp);
1642}
1643
1644static int b44_close(struct net_device *dev)
1645{
1646        struct b44 *bp = netdev_priv(dev);
1647
1648        netif_stop_queue(dev);
1649
1650        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1651                phy_stop(dev->phydev);
1652
1653        napi_disable(&bp->napi);
1654
1655        del_timer_sync(&bp->timer);
1656
1657        spin_lock_irq(&bp->lock);
1658
1659        b44_halt(bp);
1660        b44_free_rings(bp);
1661        netif_carrier_off(dev);
1662
1663        spin_unlock_irq(&bp->lock);
1664
1665        free_irq(dev->irq, dev);
1666
1667        if (bp->flags & B44_FLAG_WOL_ENABLE) {
1668                b44_init_hw(bp, B44_PARTIAL_RESET);
1669                b44_setup_wol(bp);
1670        }
1671
1672        b44_free_consistent(bp);
1673
1674        return 0;
1675}
1676
1677static void b44_get_stats64(struct net_device *dev,
1678                            struct rtnl_link_stats64 *nstat)
1679{
1680        struct b44 *bp = netdev_priv(dev);
1681        struct b44_hw_stats *hwstat = &bp->hw_stats;
1682        unsigned int start;
1683
1684        do {
1685                start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1686
1687                /* Convert HW stats into rtnl_link_stats64 stats. */
1688                nstat->rx_packets = hwstat->rx_pkts;
1689                nstat->tx_packets = hwstat->tx_pkts;
1690                nstat->rx_bytes   = hwstat->rx_octets;
1691                nstat->tx_bytes   = hwstat->tx_octets;
1692                nstat->tx_errors  = (hwstat->tx_jabber_pkts +
1693                                     hwstat->tx_oversize_pkts +
1694                                     hwstat->tx_underruns +
1695                                     hwstat->tx_excessive_cols +
1696                                     hwstat->tx_late_cols);
1697                nstat->multicast  = hwstat->rx_multicast_pkts;
1698                nstat->collisions = hwstat->tx_total_cols;
1699
1700                nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1701                                           hwstat->rx_undersize);
1702                nstat->rx_over_errors   = hwstat->rx_missed_pkts;
1703                nstat->rx_frame_errors  = hwstat->rx_align_errs;
1704                nstat->rx_crc_errors    = hwstat->rx_crc_errs;
1705                nstat->rx_errors        = (hwstat->rx_jabber_pkts +
1706                                           hwstat->rx_oversize_pkts +
1707                                           hwstat->rx_missed_pkts +
1708                                           hwstat->rx_crc_align_errs +
1709                                           hwstat->rx_undersize +
1710                                           hwstat->rx_crc_errs +
1711                                           hwstat->rx_align_errs +
1712                                           hwstat->rx_symbol_errs);
1713
1714                nstat->tx_aborted_errors = hwstat->tx_underruns;
1715#if 0
1716                /* Carrier lost counter seems to be broken for some devices */
1717                nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1718#endif
1719        } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1720
1721}
1722
1723static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1724{
1725        struct netdev_hw_addr *ha;
1726        int i, num_ents;
1727
1728        num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1729        i = 0;
1730        netdev_for_each_mc_addr(ha, dev) {
1731                if (i == num_ents)
1732                        break;
1733                __b44_cam_write(bp, ha->addr, i++ + 1);
1734        }
1735        return i+1;
1736}
1737
1738static void __b44_set_rx_mode(struct net_device *dev)
1739{
1740        struct b44 *bp = netdev_priv(dev);
1741        u32 val;
1742
1743        val = br32(bp, B44_RXCONFIG);
1744        val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1745        if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1746                val |= RXCONFIG_PROMISC;
1747                bw32(bp, B44_RXCONFIG, val);
1748        } else {
1749                unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1750                int i = 1;
1751
1752                __b44_set_mac_addr(bp);
1753
1754                if ((dev->flags & IFF_ALLMULTI) ||
1755                    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1756                        val |= RXCONFIG_ALLMULTI;
1757                else
1758                        i = __b44_load_mcast(bp, dev);
1759
1760                for (; i < 64; i++)
1761                        __b44_cam_write(bp, zero, i);
1762
1763                bw32(bp, B44_RXCONFIG, val);
1764                val = br32(bp, B44_CAM_CTRL);
1765                bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1766        }
1767}
1768
1769static void b44_set_rx_mode(struct net_device *dev)
1770{
1771        struct b44 *bp = netdev_priv(dev);
1772
1773        spin_lock_irq(&bp->lock);
1774        __b44_set_rx_mode(dev);
1775        spin_unlock_irq(&bp->lock);
1776}
1777
1778static u32 b44_get_msglevel(struct net_device *dev)
1779{
1780        struct b44 *bp = netdev_priv(dev);
1781        return bp->msg_enable;
1782}
1783
1784static void b44_set_msglevel(struct net_device *dev, u32 value)
1785{
1786        struct b44 *bp = netdev_priv(dev);
1787        bp->msg_enable = value;
1788}
1789
1790static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1791{
1792        struct b44 *bp = netdev_priv(dev);
1793        struct ssb_bus *bus = bp->sdev->bus;
1794
1795        strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1796        strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1797        switch (bus->bustype) {
1798        case SSB_BUSTYPE_PCI:
1799                strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1800                break;
1801        case SSB_BUSTYPE_SSB:
1802                strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1803                break;
1804        case SSB_BUSTYPE_PCMCIA:
1805        case SSB_BUSTYPE_SDIO:
1806                WARN_ON(1); /* A device with this bus does not exist. */
1807                break;
1808        }
1809}
1810
1811static int b44_nway_reset(struct net_device *dev)
1812{
1813        struct b44 *bp = netdev_priv(dev);
1814        u32 bmcr;
1815        int r;
1816
1817        spin_lock_irq(&bp->lock);
1818        b44_readphy(bp, MII_BMCR, &bmcr);
1819        b44_readphy(bp, MII_BMCR, &bmcr);
1820        r = -EINVAL;
1821        if (bmcr & BMCR_ANENABLE) {
1822                b44_writephy(bp, MII_BMCR,
1823                             bmcr | BMCR_ANRESTART);
1824                r = 0;
1825        }
1826        spin_unlock_irq(&bp->lock);
1827
1828        return r;
1829}
1830
1831static int b44_get_link_ksettings(struct net_device *dev,
1832                                  struct ethtool_link_ksettings *cmd)
1833{
1834        struct b44 *bp = netdev_priv(dev);
1835        u32 supported, advertising;
1836
1837        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1838                BUG_ON(!dev->phydev);
1839                phy_ethtool_ksettings_get(dev->phydev, cmd);
1840
1841                return 0;
1842        }
1843
1844        supported = (SUPPORTED_Autoneg);
1845        supported |= (SUPPORTED_100baseT_Half |
1846                      SUPPORTED_100baseT_Full |
1847                      SUPPORTED_10baseT_Half |
1848                      SUPPORTED_10baseT_Full |
1849                      SUPPORTED_MII);
1850
1851        advertising = 0;
1852        if (bp->flags & B44_FLAG_ADV_10HALF)
1853                advertising |= ADVERTISED_10baseT_Half;
1854        if (bp->flags & B44_FLAG_ADV_10FULL)
1855                advertising |= ADVERTISED_10baseT_Full;
1856        if (bp->flags & B44_FLAG_ADV_100HALF)
1857                advertising |= ADVERTISED_100baseT_Half;
1858        if (bp->flags & B44_FLAG_ADV_100FULL)
1859                advertising |= ADVERTISED_100baseT_Full;
1860        advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1861        cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1862                SPEED_100 : SPEED_10;
1863        cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1864                DUPLEX_FULL : DUPLEX_HALF;
1865        cmd->base.port = 0;
1866        cmd->base.phy_address = bp->phy_addr;
1867        cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1868                AUTONEG_DISABLE : AUTONEG_ENABLE;
1869        if (cmd->base.autoneg == AUTONEG_ENABLE)
1870                advertising |= ADVERTISED_Autoneg;
1871
1872        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1873                                                supported);
1874        ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1875                                                advertising);
1876
1877        if (!netif_running(dev)){
1878                cmd->base.speed = 0;
1879                cmd->base.duplex = 0xff;
1880        }
1881
1882        return 0;
1883}
1884
1885static int b44_set_link_ksettings(struct net_device *dev,
1886                                  const struct ethtool_link_ksettings *cmd)
1887{
1888        struct b44 *bp = netdev_priv(dev);
1889        u32 speed;
1890        int ret;
1891        u32 advertising;
1892
1893        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1894                BUG_ON(!dev->phydev);
1895                spin_lock_irq(&bp->lock);
1896                if (netif_running(dev))
1897                        b44_setup_phy(bp);
1898
1899                ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1900
1901                spin_unlock_irq(&bp->lock);
1902
1903                return ret;
1904        }
1905
1906        speed = cmd->base.speed;
1907
1908        ethtool_convert_link_mode_to_legacy_u32(&advertising,
1909                                                cmd->link_modes.advertising);
1910
1911        /* We do not support gigabit. */
1912        if (cmd->base.autoneg == AUTONEG_ENABLE) {
1913                if (advertising &
1914                    (ADVERTISED_1000baseT_Half |
1915                     ADVERTISED_1000baseT_Full))
1916                        return -EINVAL;
1917        } else if ((speed != SPEED_100 &&
1918                    speed != SPEED_10) ||
1919                   (cmd->base.duplex != DUPLEX_HALF &&
1920                    cmd->base.duplex != DUPLEX_FULL)) {
1921                        return -EINVAL;
1922        }
1923
1924        spin_lock_irq(&bp->lock);
1925
1926        if (cmd->base.autoneg == AUTONEG_ENABLE) {
1927                bp->flags &= ~(B44_FLAG_FORCE_LINK |
1928                               B44_FLAG_100_BASE_T |
1929                               B44_FLAG_FULL_DUPLEX |
1930                               B44_FLAG_ADV_10HALF |
1931                               B44_FLAG_ADV_10FULL |
1932                               B44_FLAG_ADV_100HALF |
1933                               B44_FLAG_ADV_100FULL);
1934                if (advertising == 0) {
1935                        bp->flags |= (B44_FLAG_ADV_10HALF |
1936                                      B44_FLAG_ADV_10FULL |
1937                                      B44_FLAG_ADV_100HALF |
1938                                      B44_FLAG_ADV_100FULL);
1939                } else {
1940                        if (advertising & ADVERTISED_10baseT_Half)
1941                                bp->flags |= B44_FLAG_ADV_10HALF;
1942                        if (advertising & ADVERTISED_10baseT_Full)
1943                                bp->flags |= B44_FLAG_ADV_10FULL;
1944                        if (advertising & ADVERTISED_100baseT_Half)
1945                                bp->flags |= B44_FLAG_ADV_100HALF;
1946                        if (advertising & ADVERTISED_100baseT_Full)
1947                                bp->flags |= B44_FLAG_ADV_100FULL;
1948                }
1949        } else {
1950                bp->flags |= B44_FLAG_FORCE_LINK;
1951                bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1952                if (speed == SPEED_100)
1953                        bp->flags |= B44_FLAG_100_BASE_T;
1954                if (cmd->base.duplex == DUPLEX_FULL)
1955                        bp->flags |= B44_FLAG_FULL_DUPLEX;
1956        }
1957
1958        if (netif_running(dev))
1959                b44_setup_phy(bp);
1960
1961        spin_unlock_irq(&bp->lock);
1962
1963        return 0;
1964}
1965
1966static void b44_get_ringparam(struct net_device *dev,
1967                              struct ethtool_ringparam *ering)
1968{
1969        struct b44 *bp = netdev_priv(dev);
1970
1971        ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1972        ering->rx_pending = bp->rx_pending;
1973
1974        /* XXX ethtool lacks a tx_max_pending, oops... */
1975}
1976
1977static int b44_set_ringparam(struct net_device *dev,
1978                             struct ethtool_ringparam *ering)
1979{
1980        struct b44 *bp = netdev_priv(dev);
1981
1982        if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1983            (ering->rx_mini_pending != 0) ||
1984            (ering->rx_jumbo_pending != 0) ||
1985            (ering->tx_pending > B44_TX_RING_SIZE - 1))
1986                return -EINVAL;
1987
1988        spin_lock_irq(&bp->lock);
1989
1990        bp->rx_pending = ering->rx_pending;
1991        bp->tx_pending = ering->tx_pending;
1992
1993        b44_halt(bp);
1994        b44_init_rings(bp);
1995        b44_init_hw(bp, B44_FULL_RESET);
1996        netif_wake_queue(bp->dev);
1997        spin_unlock_irq(&bp->lock);
1998
1999        b44_enable_ints(bp);
2000
2001        return 0;
2002}
2003
2004static void b44_get_pauseparam(struct net_device *dev,
2005                                struct ethtool_pauseparam *epause)
2006{
2007        struct b44 *bp = netdev_priv(dev);
2008
2009        epause->autoneg =
2010                (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2011        epause->rx_pause =
2012                (bp->flags & B44_FLAG_RX_PAUSE) != 0;
2013        epause->tx_pause =
2014                (bp->flags & B44_FLAG_TX_PAUSE) != 0;
2015}
2016
2017static int b44_set_pauseparam(struct net_device *dev,
2018                                struct ethtool_pauseparam *epause)
2019{
2020        struct b44 *bp = netdev_priv(dev);
2021
2022        spin_lock_irq(&bp->lock);
2023        if (epause->autoneg)
2024                bp->flags |= B44_FLAG_PAUSE_AUTO;
2025        else
2026                bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2027        if (epause->rx_pause)
2028                bp->flags |= B44_FLAG_RX_PAUSE;
2029        else
2030                bp->flags &= ~B44_FLAG_RX_PAUSE;
2031        if (epause->tx_pause)
2032                bp->flags |= B44_FLAG_TX_PAUSE;
2033        else
2034                bp->flags &= ~B44_FLAG_TX_PAUSE;
2035        if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2036                b44_halt(bp);
2037                b44_init_rings(bp);
2038                b44_init_hw(bp, B44_FULL_RESET);
2039        } else {
2040                __b44_set_flow_ctrl(bp, bp->flags);
2041        }
2042        spin_unlock_irq(&bp->lock);
2043
2044        b44_enable_ints(bp);
2045
2046        return 0;
2047}
2048
2049static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2050{
2051        switch(stringset) {
2052        case ETH_SS_STATS:
2053                memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2054                break;
2055        }
2056}
2057
2058static int b44_get_sset_count(struct net_device *dev, int sset)
2059{
2060        switch (sset) {
2061        case ETH_SS_STATS:
2062                return ARRAY_SIZE(b44_gstrings);
2063        default:
2064                return -EOPNOTSUPP;
2065        }
2066}
2067
2068static void b44_get_ethtool_stats(struct net_device *dev,
2069                                  struct ethtool_stats *stats, u64 *data)
2070{
2071        struct b44 *bp = netdev_priv(dev);
2072        struct b44_hw_stats *hwstat = &bp->hw_stats;
2073        u64 *data_src, *data_dst;
2074        unsigned int start;
2075        u32 i;
2076
2077        spin_lock_irq(&bp->lock);
2078        b44_stats_update(bp);
2079        spin_unlock_irq(&bp->lock);
2080
2081        do {
2082                data_src = &hwstat->tx_good_octets;
2083                data_dst = data;
2084                start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2085
2086                for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2087                        *data_dst++ = *data_src++;
2088
2089        } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2090}
2091
2092static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2093{
2094        struct b44 *bp = netdev_priv(dev);
2095
2096        wol->supported = WAKE_MAGIC;
2097        if (bp->flags & B44_FLAG_WOL_ENABLE)
2098                wol->wolopts = WAKE_MAGIC;
2099        else
2100                wol->wolopts = 0;
2101        memset(&wol->sopass, 0, sizeof(wol->sopass));
2102}
2103
2104static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2105{
2106        struct b44 *bp = netdev_priv(dev);
2107
2108        spin_lock_irq(&bp->lock);
2109        if (wol->wolopts & WAKE_MAGIC)
2110                bp->flags |= B44_FLAG_WOL_ENABLE;
2111        else
2112                bp->flags &= ~B44_FLAG_WOL_ENABLE;
2113        spin_unlock_irq(&bp->lock);
2114
2115        device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2116        return 0;
2117}
2118
2119static const struct ethtool_ops b44_ethtool_ops = {
2120        .get_drvinfo            = b44_get_drvinfo,
2121        .nway_reset             = b44_nway_reset,
2122        .get_link               = ethtool_op_get_link,
2123        .get_wol                = b44_get_wol,
2124        .set_wol                = b44_set_wol,
2125        .get_ringparam          = b44_get_ringparam,
2126        .set_ringparam          = b44_set_ringparam,
2127        .get_pauseparam         = b44_get_pauseparam,
2128        .set_pauseparam         = b44_set_pauseparam,
2129        .get_msglevel           = b44_get_msglevel,
2130        .set_msglevel           = b44_set_msglevel,
2131        .get_strings            = b44_get_strings,
2132        .get_sset_count         = b44_get_sset_count,
2133        .get_ethtool_stats      = b44_get_ethtool_stats,
2134        .get_link_ksettings     = b44_get_link_ksettings,
2135        .set_link_ksettings     = b44_set_link_ksettings,
2136};
2137
2138static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2139{
2140        struct b44 *bp = netdev_priv(dev);
2141        int err = -EINVAL;
2142
2143        if (!netif_running(dev))
2144                goto out;
2145
2146        spin_lock_irq(&bp->lock);
2147        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2148                BUG_ON(!dev->phydev);
2149                err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2150        } else {
2151                err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2152        }
2153        spin_unlock_irq(&bp->lock);
2154out:
2155        return err;
2156}
2157
2158static int b44_get_invariants(struct b44 *bp)
2159{
2160        struct ssb_device *sdev = bp->sdev;
2161        int err = 0;
2162        u8 *addr;
2163
2164        bp->dma_offset = ssb_dma_translation(sdev);
2165
2166        if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2167            instance > 1) {
2168                addr = sdev->bus->sprom.et1mac;
2169                bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2170        } else {
2171                addr = sdev->bus->sprom.et0mac;
2172                bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2173        }
2174        /* Some ROMs have buggy PHY addresses with the high
2175         * bits set (sign extension?). Truncate them to a
2176         * valid PHY address. */
2177        bp->phy_addr &= 0x1F;
2178
2179        memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2180
2181        if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2182                pr_err("Invalid MAC address found in EEPROM\n");
2183                return -EINVAL;
2184        }
2185
2186        bp->imask = IMASK_DEF;
2187
2188        /* XXX - really required?
2189           bp->flags |= B44_FLAG_BUGGY_TXPTR;
2190        */
2191
2192        if (bp->sdev->id.revision >= 7)
2193                bp->flags |= B44_FLAG_B0_ANDLATER;
2194
2195        return err;
2196}
2197
2198static const struct net_device_ops b44_netdev_ops = {
2199        .ndo_open               = b44_open,
2200        .ndo_stop               = b44_close,
2201        .ndo_start_xmit         = b44_start_xmit,
2202        .ndo_get_stats64        = b44_get_stats64,
2203        .ndo_set_rx_mode        = b44_set_rx_mode,
2204        .ndo_set_mac_address    = b44_set_mac_addr,
2205        .ndo_validate_addr      = eth_validate_addr,
2206        .ndo_do_ioctl           = b44_ioctl,
2207        .ndo_tx_timeout         = b44_tx_timeout,
2208        .ndo_change_mtu         = b44_change_mtu,
2209#ifdef CONFIG_NET_POLL_CONTROLLER
2210        .ndo_poll_controller    = b44_poll_controller,
2211#endif
2212};
2213
2214static void b44_adjust_link(struct net_device *dev)
2215{
2216        struct b44 *bp = netdev_priv(dev);
2217        struct phy_device *phydev = dev->phydev;
2218        bool status_changed = 0;
2219
2220        BUG_ON(!phydev);
2221
2222        if (bp->old_link != phydev->link) {
2223                status_changed = 1;
2224                bp->old_link = phydev->link;
2225        }
2226
2227        /* reflect duplex change */
2228        if (phydev->link) {
2229                if ((phydev->duplex == DUPLEX_HALF) &&
2230                    (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2231                        status_changed = 1;
2232                        bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2233                } else if ((phydev->duplex == DUPLEX_FULL) &&
2234                           !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2235                        status_changed = 1;
2236                        bp->flags |= B44_FLAG_FULL_DUPLEX;
2237                }
2238        }
2239
2240        if (status_changed) {
2241                u32 val = br32(bp, B44_TX_CTRL);
2242                if (bp->flags & B44_FLAG_FULL_DUPLEX)
2243                        val |= TX_CTRL_DUPLEX;
2244                else
2245                        val &= ~TX_CTRL_DUPLEX;
2246                bw32(bp, B44_TX_CTRL, val);
2247                phy_print_status(phydev);
2248        }
2249}
2250
2251static int b44_register_phy_one(struct b44 *bp)
2252{
2253        struct mii_bus *mii_bus;
2254        struct ssb_device *sdev = bp->sdev;
2255        struct phy_device *phydev;
2256        char bus_id[MII_BUS_ID_SIZE + 3];
2257        struct ssb_sprom *sprom = &sdev->bus->sprom;
2258        int err;
2259
2260        mii_bus = mdiobus_alloc();
2261        if (!mii_bus) {
2262                dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2263                err = -ENOMEM;
2264                goto err_out;
2265        }
2266
2267        mii_bus->priv = bp;
2268        mii_bus->read = b44_mdio_read_phylib;
2269        mii_bus->write = b44_mdio_write_phylib;
2270        mii_bus->name = "b44_eth_mii";
2271        mii_bus->parent = sdev->dev;
2272        mii_bus->phy_mask = ~(1 << bp->phy_addr);
2273        snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2274
2275        bp->mii_bus = mii_bus;
2276
2277        err = mdiobus_register(mii_bus);
2278        if (err) {
2279                dev_err(sdev->dev, "failed to register MII bus\n");
2280                goto err_out_mdiobus;
2281        }
2282
2283        if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2284            (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2285
2286                dev_info(sdev->dev,
2287                         "could not find PHY at %i, use fixed one\n",
2288                         bp->phy_addr);
2289
2290                bp->phy_addr = 0;
2291                snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2292                         bp->phy_addr);
2293        } else {
2294                snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2295                         bp->phy_addr);
2296        }
2297
2298        phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2299                             PHY_INTERFACE_MODE_MII);
2300        if (IS_ERR(phydev)) {
2301                dev_err(sdev->dev, "could not attach PHY at %i\n",
2302                        bp->phy_addr);
2303                err = PTR_ERR(phydev);
2304                goto err_out_mdiobus_unregister;
2305        }
2306
2307        /* mask with MAC supported features */
2308        phydev->supported &= (SUPPORTED_100baseT_Half |
2309                              SUPPORTED_100baseT_Full |
2310                              SUPPORTED_Autoneg |
2311                              SUPPORTED_MII);
2312        phydev->advertising = phydev->supported;
2313
2314        bp->old_link = 0;
2315        bp->phy_addr = phydev->mdio.addr;
2316
2317        phy_attached_info(phydev);
2318
2319        return 0;
2320
2321err_out_mdiobus_unregister:
2322        mdiobus_unregister(mii_bus);
2323
2324err_out_mdiobus:
2325        mdiobus_free(mii_bus);
2326
2327err_out:
2328        return err;
2329}
2330
2331static void b44_unregister_phy_one(struct b44 *bp)
2332{
2333        struct net_device *dev = bp->dev;
2334        struct mii_bus *mii_bus = bp->mii_bus;
2335
2336        phy_disconnect(dev->phydev);
2337        mdiobus_unregister(mii_bus);
2338        mdiobus_free(mii_bus);
2339}
2340
2341static int b44_init_one(struct ssb_device *sdev,
2342                        const struct ssb_device_id *ent)
2343{
2344        struct net_device *dev;
2345        struct b44 *bp;
2346        int err;
2347
2348        instance++;
2349
2350        pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
2351
2352        dev = alloc_etherdev(sizeof(*bp));
2353        if (!dev) {
2354                err = -ENOMEM;
2355                goto out;
2356        }
2357
2358        SET_NETDEV_DEV(dev, sdev->dev);
2359
2360        /* No interesting netdevice features in this card... */
2361        dev->features |= 0;
2362
2363        bp = netdev_priv(dev);
2364        bp->sdev = sdev;
2365        bp->dev = dev;
2366        bp->force_copybreak = 0;
2367
2368        bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2369
2370        spin_lock_init(&bp->lock);
2371        u64_stats_init(&bp->hw_stats.syncp);
2372
2373        bp->rx_pending = B44_DEF_RX_RING_PENDING;
2374        bp->tx_pending = B44_DEF_TX_RING_PENDING;
2375
2376        dev->netdev_ops = &b44_netdev_ops;
2377        netif_napi_add(dev, &bp->napi, b44_poll, 64);
2378        dev->watchdog_timeo = B44_TX_TIMEOUT;
2379        dev->min_mtu = B44_MIN_MTU;
2380        dev->max_mtu = B44_MAX_MTU;
2381        dev->irq = sdev->irq;
2382        dev->ethtool_ops = &b44_ethtool_ops;
2383
2384        err = ssb_bus_powerup(sdev->bus, 0);
2385        if (err) {
2386                dev_err(sdev->dev,
2387                        "Failed to powerup the bus\n");
2388                goto err_out_free_dev;
2389        }
2390
2391        if (dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30))) {
2392                dev_err(sdev->dev,
2393                        "Required 30BIT DMA mask unsupported by the system\n");
2394                goto err_out_powerdown;
2395        }
2396
2397        err = b44_get_invariants(bp);
2398        if (err) {
2399                dev_err(sdev->dev,
2400                        "Problem fetching invariants of chip, aborting\n");
2401                goto err_out_powerdown;
2402        }
2403
2404        if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2405                dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2406                err = -ENODEV;
2407                goto err_out_powerdown;
2408        }
2409
2410        bp->mii_if.dev = dev;
2411        bp->mii_if.mdio_read = b44_mdio_read_mii;
2412        bp->mii_if.mdio_write = b44_mdio_write_mii;
2413        bp->mii_if.phy_id = bp->phy_addr;
2414        bp->mii_if.phy_id_mask = 0x1f;
2415        bp->mii_if.reg_num_mask = 0x1f;
2416
2417        /* By default, advertise all speed/duplex settings. */
2418        bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2419                      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2420
2421        /* By default, auto-negotiate PAUSE. */
2422        bp->flags |= B44_FLAG_PAUSE_AUTO;
2423
2424        err = register_netdev(dev);
2425        if (err) {
2426                dev_err(sdev->dev, "Cannot register net device, aborting\n");
2427                goto err_out_powerdown;
2428        }
2429
2430        netif_carrier_off(dev);
2431
2432        ssb_set_drvdata(sdev, dev);
2433
2434        /* Chip reset provides power to the b44 MAC & PCI cores, which
2435         * is necessary for MAC register access.
2436         */
2437        b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2438
2439        /* do a phy reset to test if there is an active phy */
2440        err = b44_phy_reset(bp);
2441        if (err < 0) {
2442                dev_err(sdev->dev, "phy reset failed\n");
2443                goto err_out_unregister_netdev;
2444        }
2445
2446        if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2447                err = b44_register_phy_one(bp);
2448                if (err) {
2449                        dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2450                        goto err_out_unregister_netdev;
2451                }
2452        }
2453
2454        device_set_wakeup_capable(sdev->dev, true);
2455        netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2456
2457        return 0;
2458
2459err_out_unregister_netdev:
2460        unregister_netdev(dev);
2461err_out_powerdown:
2462        ssb_bus_may_powerdown(sdev->bus);
2463
2464err_out_free_dev:
2465        netif_napi_del(&bp->napi);
2466        free_netdev(dev);
2467
2468out:
2469        return err;
2470}
2471
2472static void b44_remove_one(struct ssb_device *sdev)
2473{
2474        struct net_device *dev = ssb_get_drvdata(sdev);
2475        struct b44 *bp = netdev_priv(dev);
2476
2477        unregister_netdev(dev);
2478        if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2479                b44_unregister_phy_one(bp);
2480        ssb_device_disable(sdev, 0);
2481        ssb_bus_may_powerdown(sdev->bus);
2482        netif_napi_del(&bp->napi);
2483        free_netdev(dev);
2484        ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2485        ssb_set_drvdata(sdev, NULL);
2486}
2487
2488static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2489{
2490        struct net_device *dev = ssb_get_drvdata(sdev);
2491        struct b44 *bp = netdev_priv(dev);
2492
2493        if (!netif_running(dev))
2494                return 0;
2495
2496        del_timer_sync(&bp->timer);
2497
2498        spin_lock_irq(&bp->lock);
2499
2500        b44_halt(bp);
2501        netif_carrier_off(bp->dev);
2502        netif_device_detach(bp->dev);
2503        b44_free_rings(bp);
2504
2505        spin_unlock_irq(&bp->lock);
2506
2507        free_irq(dev->irq, dev);
2508        if (bp->flags & B44_FLAG_WOL_ENABLE) {
2509                b44_init_hw(bp, B44_PARTIAL_RESET);
2510                b44_setup_wol(bp);
2511        }
2512
2513        ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2514        return 0;
2515}
2516
2517static int b44_resume(struct ssb_device *sdev)
2518{
2519        struct net_device *dev = ssb_get_drvdata(sdev);
2520        struct b44 *bp = netdev_priv(dev);
2521        int rc = 0;
2522
2523        rc = ssb_bus_powerup(sdev->bus, 0);
2524        if (rc) {
2525                dev_err(sdev->dev,
2526                        "Failed to powerup the bus\n");
2527                return rc;
2528        }
2529
2530        if (!netif_running(dev))
2531                return 0;
2532
2533        spin_lock_irq(&bp->lock);
2534        b44_init_rings(bp);
2535        b44_init_hw(bp, B44_FULL_RESET);
2536        spin_unlock_irq(&bp->lock);
2537
2538        /*
2539         * As a shared interrupt, the handler can be called immediately. To be
2540         * able to check the interrupt status the hardware must already be
2541         * powered back on (b44_init_hw).
2542         */
2543        rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2544        if (rc) {
2545                netdev_err(dev, "request_irq failed\n");
2546                spin_lock_irq(&bp->lock);
2547                b44_halt(bp);
2548                b44_free_rings(bp);
2549                spin_unlock_irq(&bp->lock);
2550                return rc;
2551        }
2552
2553        netif_device_attach(bp->dev);
2554
2555        b44_enable_ints(bp);
2556        netif_wake_queue(dev);
2557
2558        mod_timer(&bp->timer, jiffies + 1);
2559
2560        return 0;
2561}
2562
2563static struct ssb_driver b44_ssb_driver = {
2564        .name           = DRV_MODULE_NAME,
2565        .id_table       = b44_ssb_tbl,
2566        .probe          = b44_init_one,
2567        .remove         = b44_remove_one,
2568        .suspend        = b44_suspend,
2569        .resume         = b44_resume,
2570};
2571
2572static inline int __init b44_pci_init(void)
2573{
2574        int err = 0;
2575#ifdef CONFIG_B44_PCI
2576        err = ssb_pcihost_register(&b44_pci_driver);
2577#endif
2578        return err;
2579}
2580
2581static inline void b44_pci_exit(void)
2582{
2583#ifdef CONFIG_B44_PCI
2584        ssb_pcihost_unregister(&b44_pci_driver);
2585#endif
2586}
2587
2588static int __init b44_init(void)
2589{
2590        unsigned int dma_desc_align_size = dma_get_cache_alignment();
2591        int err;
2592
2593        /* Setup paramaters for syncing RX/TX DMA descriptors */
2594        dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2595
2596        err = b44_pci_init();
2597        if (err)
2598                return err;
2599        err = ssb_driver_register(&b44_ssb_driver);
2600        if (err)
2601                b44_pci_exit();
2602        return err;
2603}
2604
2605static void __exit b44_cleanup(void)
2606{
2607        ssb_driver_unregister(&b44_ssb_driver);
2608        b44_pci_exit();
2609}
2610
2611module_init(b44_init);
2612module_exit(b44_cleanup);
2613
2614