linux/drivers/net/mace.c
<<
>>
Prefs
   1/*
   2 * Network device driver for the MACE ethernet controller on
   3 * Apple Powermacs.  Assumes it's under a DBDMA controller.
   4 *
   5 * Copyright (C) 1996 Paul Mackerras.
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/kernel.h>
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/delay.h>
  13#include <linux/string.h>
  14#include <linux/timer.h>
  15#include <linux/init.h>
  16#include <linux/crc32.h>
  17#include <linux/spinlock.h>
  18#include <linux/bitrev.h>
  19#include <asm/prom.h>
  20#include <asm/dbdma.h>
  21#include <asm/io.h>
  22#include <asm/pgtable.h>
  23#include <asm/macio.h>
  24
  25#include "mace.h"
  26
  27static int port_aaui = -1;
  28
  29#define N_RX_RING       8
  30#define N_TX_RING       6
  31#define MAX_TX_ACTIVE   1
  32#define NCMDS_TX        1       /* dma commands per element in tx ring */
  33#define RX_BUFLEN       (ETH_FRAME_LEN + 8)
  34#define TX_TIMEOUT      HZ      /* 1 second */
  35
  36/* Chip rev needs workaround on HW & multicast addr change */
  37#define BROKEN_ADDRCHG_REV      0x0941
  38
  39/* Bits in transmit DMA status */
  40#define TX_DMA_ERR      0x80
  41
  42struct mace_data {
  43    volatile struct mace __iomem *mace;
  44    volatile struct dbdma_regs __iomem *tx_dma;
  45    int tx_dma_intr;
  46    volatile struct dbdma_regs __iomem *rx_dma;
  47    int rx_dma_intr;
  48    volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
  49    volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
  50    struct sk_buff *rx_bufs[N_RX_RING];
  51    int rx_fill;
  52    int rx_empty;
  53    struct sk_buff *tx_bufs[N_TX_RING];
  54    int tx_fill;
  55    int tx_empty;
  56    unsigned char maccc;
  57    unsigned char tx_fullup;
  58    unsigned char tx_active;
  59    unsigned char tx_bad_runt;
  60    struct timer_list tx_timeout;
  61    int timeout_active;
  62    int port_aaui;
  63    int chipid;
  64    struct macio_dev *mdev;
  65    spinlock_t lock;
  66};
  67
  68/*
  69 * Number of bytes of private data per MACE: allow enough for
  70 * the rx and tx dma commands plus a branch dma command each,
  71 * and another 16 bytes to allow us to align the dma command
  72 * buffers on a 16 byte boundary.
  73 */
  74#define PRIV_BYTES      (sizeof(struct mace_data) \
  75        + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
  76
  77static int mace_open(struct net_device *dev);
  78static int mace_close(struct net_device *dev);
  79static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
  80static void mace_set_multicast(struct net_device *dev);
  81static void mace_reset(struct net_device *dev);
  82static int mace_set_address(struct net_device *dev, void *addr);
  83static irqreturn_t mace_interrupt(int irq, void *dev_id);
  84static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
  85static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
  86static void mace_set_timeout(struct net_device *dev);
  87static void mace_tx_timeout(unsigned long data);
  88static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
  89static inline void mace_clean_rings(struct mace_data *mp);
  90static void __mace_set_address(struct net_device *dev, void *addr);
  91
  92/*
  93 * If we can't get a skbuff when we need it, we use this area for DMA.
  94 */
  95static unsigned char *dummy_buf;
  96
  97static const struct net_device_ops mace_netdev_ops = {
  98        .ndo_open               = mace_open,
  99        .ndo_stop               = mace_close,
 100        .ndo_start_xmit         = mace_xmit_start,
 101        .ndo_set_multicast_list = mace_set_multicast,
 102        .ndo_set_mac_address    = mace_set_address,
 103        .ndo_change_mtu         = eth_change_mtu,
 104        .ndo_validate_addr      = eth_validate_addr,
 105};
 106
 107static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
 108{
 109        struct device_node *mace = macio_get_of_node(mdev);
 110        struct net_device *dev;
 111        struct mace_data *mp;
 112        const unsigned char *addr;
 113        int j, rev, rc = -EBUSY;
 114
 115        if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
 116                printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
 117                       mace->full_name);
 118                return -ENODEV;
 119        }
 120
 121        addr = of_get_property(mace, "mac-address", NULL);
 122        if (addr == NULL) {
 123                addr = of_get_property(mace, "local-mac-address", NULL);
 124                if (addr == NULL) {
 125                        printk(KERN_ERR "Can't get mac-address for MACE %s\n",
 126                               mace->full_name);
 127                        return -ENODEV;
 128                }
 129        }
 130
 131        /*
 132         * lazy allocate the driver-wide dummy buffer. (Note that we
 133         * never have more than one MACE in the system anyway)
 134         */
 135        if (dummy_buf == NULL) {
 136                dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
 137                if (dummy_buf == NULL) {
 138                        printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
 139                        return -ENOMEM;
 140                }
 141        }
 142
 143        if (macio_request_resources(mdev, "mace")) {
 144                printk(KERN_ERR "MACE: can't request IO resources !\n");
 145                return -EBUSY;
 146        }
 147
 148        dev = alloc_etherdev(PRIV_BYTES);
 149        if (!dev) {
 150                printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
 151                rc = -ENOMEM;
 152                goto err_release;
 153        }
 154        SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
 155
 156        mp = netdev_priv(dev);
 157        mp->mdev = mdev;
 158        macio_set_drvdata(mdev, dev);
 159
 160        dev->base_addr = macio_resource_start(mdev, 0);
 161        mp->mace = ioremap(dev->base_addr, 0x1000);
 162        if (mp->mace == NULL) {
 163                printk(KERN_ERR "MACE: can't map IO resources !\n");
 164                rc = -ENOMEM;
 165                goto err_free;
 166        }
 167        dev->irq = macio_irq(mdev, 0);
 168
 169        rev = addr[0] == 0 && addr[1] == 0xA0;
 170        for (j = 0; j < 6; ++j) {
 171                dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
 172        }
 173        mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
 174                        in_8(&mp->mace->chipid_lo);
 175
 176
 177        mp = netdev_priv(dev);
 178        mp->maccc = ENXMT | ENRCV;
 179
 180        mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
 181        if (mp->tx_dma == NULL) {
 182                printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
 183                rc = -ENOMEM;
 184                goto err_unmap_io;
 185        }
 186        mp->tx_dma_intr = macio_irq(mdev, 1);
 187
 188        mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
 189        if (mp->rx_dma == NULL) {
 190                printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
 191                rc = -ENOMEM;
 192                goto err_unmap_tx_dma;
 193        }
 194        mp->rx_dma_intr = macio_irq(mdev, 2);
 195
 196        mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
 197        mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
 198
 199        memset((char *) mp->tx_cmds, 0,
 200               (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
 201        init_timer(&mp->tx_timeout);
 202        spin_lock_init(&mp->lock);
 203        mp->timeout_active = 0;
 204
 205        if (port_aaui >= 0)
 206                mp->port_aaui = port_aaui;
 207        else {
 208                /* Apple Network Server uses the AAUI port */
 209                if (machine_is_compatible("AAPL,ShinerESB"))
 210                        mp->port_aaui = 1;
 211                else {
 212#ifdef CONFIG_MACE_AAUI_PORT
 213                        mp->port_aaui = 1;
 214#else
 215                        mp->port_aaui = 0;
 216#endif
 217                }
 218        }
 219
 220        dev->netdev_ops = &mace_netdev_ops;
 221
 222        /*
 223         * Most of what is below could be moved to mace_open()
 224         */
 225        mace_reset(dev);
 226
 227        rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
 228        if (rc) {
 229                printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
 230                goto err_unmap_rx_dma;
 231        }
 232        rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
 233        if (rc) {
 234                printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
 235                goto err_free_irq;
 236        }
 237        rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
 238        if (rc) {
 239                printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
 240                goto err_free_tx_irq;
 241        }
 242
 243        rc = register_netdev(dev);
 244        if (rc) {
 245                printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
 246                goto err_free_rx_irq;
 247        }
 248
 249        printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
 250               dev->name, dev->dev_addr,
 251               mp->chipid >> 8, mp->chipid & 0xff);
 252
 253        return 0;
 254
 255 err_free_rx_irq:
 256        free_irq(macio_irq(mdev, 2), dev);
 257 err_free_tx_irq:
 258        free_irq(macio_irq(mdev, 1), dev);
 259 err_free_irq:
 260        free_irq(macio_irq(mdev, 0), dev);
 261 err_unmap_rx_dma:
 262        iounmap(mp->rx_dma);
 263 err_unmap_tx_dma:
 264        iounmap(mp->tx_dma);
 265 err_unmap_io:
 266        iounmap(mp->mace);
 267 err_free:
 268        free_netdev(dev);
 269 err_release:
 270        macio_release_resources(mdev);
 271
 272        return rc;
 273}
 274
 275static int __devexit mace_remove(struct macio_dev *mdev)
 276{
 277        struct net_device *dev = macio_get_drvdata(mdev);
 278        struct mace_data *mp;
 279
 280        BUG_ON(dev == NULL);
 281
 282        macio_set_drvdata(mdev, NULL);
 283
 284        mp = netdev_priv(dev);
 285
 286        unregister_netdev(dev);
 287
 288        free_irq(dev->irq, dev);
 289        free_irq(mp->tx_dma_intr, dev);
 290        free_irq(mp->rx_dma_intr, dev);
 291
 292        iounmap(mp->rx_dma);
 293        iounmap(mp->tx_dma);
 294        iounmap(mp->mace);
 295
 296        free_netdev(dev);
 297
 298        macio_release_resources(mdev);
 299
 300        return 0;
 301}
 302
 303static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
 304{
 305    int i;
 306
 307    out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
 308
 309    /*
 310     * Yes this looks peculiar, but apparently it needs to be this
 311     * way on some machines.
 312     */
 313    for (i = 200; i > 0; --i)
 314        if (ld_le32(&dma->control) & RUN)
 315            udelay(1);
 316}
 317
 318static void mace_reset(struct net_device *dev)
 319{
 320    struct mace_data *mp = netdev_priv(dev);
 321    volatile struct mace __iomem *mb = mp->mace;
 322    int i;
 323
 324    /* soft-reset the chip */
 325    i = 200;
 326    while (--i) {
 327        out_8(&mb->biucc, SWRST);
 328        if (in_8(&mb->biucc) & SWRST) {
 329            udelay(10);
 330            continue;
 331        }
 332        break;
 333    }
 334    if (!i) {
 335        printk(KERN_ERR "mace: cannot reset chip!\n");
 336        return;
 337    }
 338
 339    out_8(&mb->imr, 0xff);      /* disable all intrs for now */
 340    i = in_8(&mb->ir);
 341    out_8(&mb->maccc, 0);       /* turn off tx, rx */
 342
 343    out_8(&mb->biucc, XMTSP_64);
 344    out_8(&mb->utr, RTRD);
 345    out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
 346    out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
 347    out_8(&mb->rcvfc, 0);
 348
 349    /* load up the hardware address */
 350    __mace_set_address(dev, dev->dev_addr);
 351
 352    /* clear the multicast filter */
 353    if (mp->chipid == BROKEN_ADDRCHG_REV)
 354        out_8(&mb->iac, LOGADDR);
 355    else {
 356        out_8(&mb->iac, ADDRCHG | LOGADDR);
 357        while ((in_8(&mb->iac) & ADDRCHG) != 0)
 358                ;
 359    }
 360    for (i = 0; i < 8; ++i)
 361        out_8(&mb->ladrf, 0);
 362
 363    /* done changing address */
 364    if (mp->chipid != BROKEN_ADDRCHG_REV)
 365        out_8(&mb->iac, 0);
 366
 367    if (mp->port_aaui)
 368        out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
 369    else
 370        out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
 371}
 372
 373static void __mace_set_address(struct net_device *dev, void *addr)
 374{
 375    struct mace_data *mp = netdev_priv(dev);
 376    volatile struct mace __iomem *mb = mp->mace;
 377    unsigned char *p = addr;
 378    int i;
 379
 380    /* load up the hardware address */
 381    if (mp->chipid == BROKEN_ADDRCHG_REV)
 382        out_8(&mb->iac, PHYADDR);
 383    else {
 384        out_8(&mb->iac, ADDRCHG | PHYADDR);
 385        while ((in_8(&mb->iac) & ADDRCHG) != 0)
 386            ;
 387    }
 388    for (i = 0; i < 6; ++i)
 389        out_8(&mb->padr, dev->dev_addr[i] = p[i]);
 390    if (mp->chipid != BROKEN_ADDRCHG_REV)
 391        out_8(&mb->iac, 0);
 392}
 393
 394static int mace_set_address(struct net_device *dev, void *addr)
 395{
 396    struct mace_data *mp = netdev_priv(dev);
 397    volatile struct mace __iomem *mb = mp->mace;
 398    unsigned long flags;
 399
 400    spin_lock_irqsave(&mp->lock, flags);
 401
 402    __mace_set_address(dev, addr);
 403
 404    /* note: setting ADDRCHG clears ENRCV */
 405    out_8(&mb->maccc, mp->maccc);
 406
 407    spin_unlock_irqrestore(&mp->lock, flags);
 408    return 0;
 409}
 410
 411static inline void mace_clean_rings(struct mace_data *mp)
 412{
 413    int i;
 414
 415    /* free some skb's */
 416    for (i = 0; i < N_RX_RING; ++i) {
 417        if (mp->rx_bufs[i] != NULL) {
 418            dev_kfree_skb(mp->rx_bufs[i]);
 419            mp->rx_bufs[i] = NULL;
 420        }
 421    }
 422    for (i = mp->tx_empty; i != mp->tx_fill; ) {
 423        dev_kfree_skb(mp->tx_bufs[i]);
 424        if (++i >= N_TX_RING)
 425            i = 0;
 426    }
 427}
 428
 429static int mace_open(struct net_device *dev)
 430{
 431    struct mace_data *mp = netdev_priv(dev);
 432    volatile struct mace __iomem *mb = mp->mace;
 433    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 434    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 435    volatile struct dbdma_cmd *cp;
 436    int i;
 437    struct sk_buff *skb;
 438    unsigned char *data;
 439
 440    /* reset the chip */
 441    mace_reset(dev);
 442
 443    /* initialize list of sk_buffs for receiving and set up recv dma */
 444    mace_clean_rings(mp);
 445    memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
 446    cp = mp->rx_cmds;
 447    for (i = 0; i < N_RX_RING - 1; ++i) {
 448        skb = dev_alloc_skb(RX_BUFLEN + 2);
 449        if (!skb) {
 450            data = dummy_buf;
 451        } else {
 452            skb_reserve(skb, 2);        /* so IP header lands on 4-byte bdry */
 453            data = skb->data;
 454        }
 455        mp->rx_bufs[i] = skb;
 456        st_le16(&cp->req_count, RX_BUFLEN);
 457        st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
 458        st_le32(&cp->phy_addr, virt_to_bus(data));
 459        cp->xfer_status = 0;
 460        ++cp;
 461    }
 462    mp->rx_bufs[i] = NULL;
 463    st_le16(&cp->command, DBDMA_STOP);
 464    mp->rx_fill = i;
 465    mp->rx_empty = 0;
 466
 467    /* Put a branch back to the beginning of the receive command list */
 468    ++cp;
 469    st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
 470    st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
 471
 472    /* start rx dma */
 473    out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
 474    out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
 475    out_le32(&rd->control, (RUN << 16) | RUN);
 476
 477    /* put a branch at the end of the tx command list */
 478    cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
 479    st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
 480    st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
 481
 482    /* reset tx dma */
 483    out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
 484    out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
 485    mp->tx_fill = 0;
 486    mp->tx_empty = 0;
 487    mp->tx_fullup = 0;
 488    mp->tx_active = 0;
 489    mp->tx_bad_runt = 0;
 490
 491    /* turn it on! */
 492    out_8(&mb->maccc, mp->maccc);
 493    /* enable all interrupts except receive interrupts */
 494    out_8(&mb->imr, RCVINT);
 495
 496    return 0;
 497}
 498
 499static int mace_close(struct net_device *dev)
 500{
 501    struct mace_data *mp = netdev_priv(dev);
 502    volatile struct mace __iomem *mb = mp->mace;
 503    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 504    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 505
 506    /* disable rx and tx */
 507    out_8(&mb->maccc, 0);
 508    out_8(&mb->imr, 0xff);              /* disable all intrs */
 509
 510    /* disable rx and tx dma */
 511    st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
 512    st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
 513
 514    mace_clean_rings(mp);
 515
 516    return 0;
 517}
 518
 519static inline void mace_set_timeout(struct net_device *dev)
 520{
 521    struct mace_data *mp = netdev_priv(dev);
 522
 523    if (mp->timeout_active)
 524        del_timer(&mp->tx_timeout);
 525    mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
 526    mp->tx_timeout.function = mace_tx_timeout;
 527    mp->tx_timeout.data = (unsigned long) dev;
 528    add_timer(&mp->tx_timeout);
 529    mp->timeout_active = 1;
 530}
 531
 532static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
 533{
 534    struct mace_data *mp = netdev_priv(dev);
 535    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 536    volatile struct dbdma_cmd *cp, *np;
 537    unsigned long flags;
 538    int fill, next, len;
 539
 540    /* see if there's a free slot in the tx ring */
 541    spin_lock_irqsave(&mp->lock, flags);
 542    fill = mp->tx_fill;
 543    next = fill + 1;
 544    if (next >= N_TX_RING)
 545        next = 0;
 546    if (next == mp->tx_empty) {
 547        netif_stop_queue(dev);
 548        mp->tx_fullup = 1;
 549        spin_unlock_irqrestore(&mp->lock, flags);
 550        return NETDEV_TX_BUSY;          /* can't take it at the moment */
 551    }
 552    spin_unlock_irqrestore(&mp->lock, flags);
 553
 554    /* partially fill in the dma command block */
 555    len = skb->len;
 556    if (len > ETH_FRAME_LEN) {
 557        printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
 558        len = ETH_FRAME_LEN;
 559    }
 560    mp->tx_bufs[fill] = skb;
 561    cp = mp->tx_cmds + NCMDS_TX * fill;
 562    st_le16(&cp->req_count, len);
 563    st_le32(&cp->phy_addr, virt_to_bus(skb->data));
 564
 565    np = mp->tx_cmds + NCMDS_TX * next;
 566    out_le16(&np->command, DBDMA_STOP);
 567
 568    /* poke the tx dma channel */
 569    spin_lock_irqsave(&mp->lock, flags);
 570    mp->tx_fill = next;
 571    if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
 572        out_le16(&cp->xfer_status, 0);
 573        out_le16(&cp->command, OUTPUT_LAST);
 574        out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
 575        ++mp->tx_active;
 576        mace_set_timeout(dev);
 577    }
 578    if (++next >= N_TX_RING)
 579        next = 0;
 580    if (next == mp->tx_empty)
 581        netif_stop_queue(dev);
 582    spin_unlock_irqrestore(&mp->lock, flags);
 583
 584    return NETDEV_TX_OK;
 585}
 586
 587static void mace_set_multicast(struct net_device *dev)
 588{
 589    struct mace_data *mp = netdev_priv(dev);
 590    volatile struct mace __iomem *mb = mp->mace;
 591    int i, j;
 592    u32 crc;
 593    unsigned long flags;
 594
 595    spin_lock_irqsave(&mp->lock, flags);
 596    mp->maccc &= ~PROM;
 597    if (dev->flags & IFF_PROMISC) {
 598        mp->maccc |= PROM;
 599    } else {
 600        unsigned char multicast_filter[8];
 601        struct dev_mc_list *dmi = dev->mc_list;
 602
 603        if (dev->flags & IFF_ALLMULTI) {
 604            for (i = 0; i < 8; i++)
 605                multicast_filter[i] = 0xff;
 606        } else {
 607            for (i = 0; i < 8; i++)
 608                multicast_filter[i] = 0;
 609            for (i = 0; i < dev->mc_count; i++) {
 610                crc = ether_crc_le(6, dmi->dmi_addr);
 611                j = crc >> 26;  /* bit number in multicast_filter */
 612                multicast_filter[j >> 3] |= 1 << (j & 7);
 613                dmi = dmi->next;
 614            }
 615        }
 616#if 0
 617        printk("Multicast filter :");
 618        for (i = 0; i < 8; i++)
 619            printk("%02x ", multicast_filter[i]);
 620        printk("\n");
 621#endif
 622
 623        if (mp->chipid == BROKEN_ADDRCHG_REV)
 624            out_8(&mb->iac, LOGADDR);
 625        else {
 626            out_8(&mb->iac, ADDRCHG | LOGADDR);
 627            while ((in_8(&mb->iac) & ADDRCHG) != 0)
 628                ;
 629        }
 630        for (i = 0; i < 8; ++i)
 631            out_8(&mb->ladrf, multicast_filter[i]);
 632        if (mp->chipid != BROKEN_ADDRCHG_REV)
 633            out_8(&mb->iac, 0);
 634    }
 635    /* reset maccc */
 636    out_8(&mb->maccc, mp->maccc);
 637    spin_unlock_irqrestore(&mp->lock, flags);
 638}
 639
 640static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
 641{
 642    volatile struct mace __iomem *mb = mp->mace;
 643    static int mace_babbles, mace_jabbers;
 644
 645    if (intr & MPCO)
 646        dev->stats.rx_missed_errors += 256;
 647    dev->stats.rx_missed_errors += in_8(&mb->mpc);   /* reading clears it */
 648    if (intr & RNTPCO)
 649        dev->stats.rx_length_errors += 256;
 650    dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
 651    if (intr & CERR)
 652        ++dev->stats.tx_heartbeat_errors;
 653    if (intr & BABBLE)
 654        if (mace_babbles++ < 4)
 655            printk(KERN_DEBUG "mace: babbling transmitter\n");
 656    if (intr & JABBER)
 657        if (mace_jabbers++ < 4)
 658            printk(KERN_DEBUG "mace: jabbering transceiver\n");
 659}
 660
 661static irqreturn_t mace_interrupt(int irq, void *dev_id)
 662{
 663    struct net_device *dev = (struct net_device *) dev_id;
 664    struct mace_data *mp = netdev_priv(dev);
 665    volatile struct mace __iomem *mb = mp->mace;
 666    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 667    volatile struct dbdma_cmd *cp;
 668    int intr, fs, i, stat, x;
 669    int xcount, dstat;
 670    unsigned long flags;
 671    /* static int mace_last_fs, mace_last_xcount; */
 672
 673    spin_lock_irqsave(&mp->lock, flags);
 674    intr = in_8(&mb->ir);               /* read interrupt register */
 675    in_8(&mb->xmtrc);                   /* get retries */
 676    mace_handle_misc_intrs(mp, intr, dev);
 677
 678    i = mp->tx_empty;
 679    while (in_8(&mb->pr) & XMTSV) {
 680        del_timer(&mp->tx_timeout);
 681        mp->timeout_active = 0;
 682        /*
 683         * Clear any interrupt indication associated with this status
 684         * word.  This appears to unlatch any error indication from
 685         * the DMA controller.
 686         */
 687        intr = in_8(&mb->ir);
 688        if (intr != 0)
 689            mace_handle_misc_intrs(mp, intr, dev);
 690        if (mp->tx_bad_runt) {
 691            fs = in_8(&mb->xmtfs);
 692            mp->tx_bad_runt = 0;
 693            out_8(&mb->xmtfc, AUTO_PAD_XMIT);
 694            continue;
 695        }
 696        dstat = ld_le32(&td->status);
 697        /* stop DMA controller */
 698        out_le32(&td->control, RUN << 16);
 699        /*
 700         * xcount is the number of complete frames which have been
 701         * written to the fifo but for which status has not been read.
 702         */
 703        xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
 704        if (xcount == 0 || (dstat & DEAD)) {
 705            /*
 706             * If a packet was aborted before the DMA controller has
 707             * finished transferring it, it seems that there are 2 bytes
 708             * which are stuck in some buffer somewhere.  These will get
 709             * transmitted as soon as we read the frame status (which
 710             * reenables the transmit data transfer request).  Turning
 711             * off the DMA controller and/or resetting the MACE doesn't
 712             * help.  So we disable auto-padding and FCS transmission
 713             * so the two bytes will only be a runt packet which should
 714             * be ignored by other stations.
 715             */
 716            out_8(&mb->xmtfc, DXMTFCS);
 717        }
 718        fs = in_8(&mb->xmtfs);
 719        if ((fs & XMTSV) == 0) {
 720            printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
 721                   fs, xcount, dstat);
 722            mace_reset(dev);
 723                /*
 724                 * XXX mace likes to hang the machine after a xmtfs error.
 725                 * This is hard to reproduce, reseting *may* help
 726                 */
 727        }
 728        cp = mp->tx_cmds + NCMDS_TX * i;
 729        stat = ld_le16(&cp->xfer_status);
 730        if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
 731            /*
 732             * Check whether there were in fact 2 bytes written to
 733             * the transmit FIFO.
 734             */
 735            udelay(1);
 736            x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
 737            if (x != 0) {
 738                /* there were two bytes with an end-of-packet indication */
 739                mp->tx_bad_runt = 1;
 740                mace_set_timeout(dev);
 741            } else {
 742                /*
 743                 * Either there weren't the two bytes buffered up, or they
 744                 * didn't have an end-of-packet indication.
 745                 * We flush the transmit FIFO just in case (by setting the
 746                 * XMTFWU bit with the transmitter disabled).
 747                 */
 748                out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
 749                out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
 750                udelay(1);
 751                out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
 752                out_8(&mb->xmtfc, AUTO_PAD_XMIT);
 753            }
 754        }
 755        /* dma should have finished */
 756        if (i == mp->tx_fill) {
 757            printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
 758                   fs, xcount, dstat);
 759            continue;
 760        }
 761        /* Update stats */
 762        if (fs & (UFLO|LCOL|LCAR|RTRY)) {
 763            ++dev->stats.tx_errors;
 764            if (fs & LCAR)
 765                ++dev->stats.tx_carrier_errors;
 766            if (fs & (UFLO|LCOL|RTRY))
 767                ++dev->stats.tx_aborted_errors;
 768        } else {
 769            dev->stats.tx_bytes += mp->tx_bufs[i]->len;
 770            ++dev->stats.tx_packets;
 771        }
 772        dev_kfree_skb_irq(mp->tx_bufs[i]);
 773        --mp->tx_active;
 774        if (++i >= N_TX_RING)
 775            i = 0;
 776#if 0
 777        mace_last_fs = fs;
 778        mace_last_xcount = xcount;
 779#endif
 780    }
 781
 782    if (i != mp->tx_empty) {
 783        mp->tx_fullup = 0;
 784        netif_wake_queue(dev);
 785    }
 786    mp->tx_empty = i;
 787    i += mp->tx_active;
 788    if (i >= N_TX_RING)
 789        i -= N_TX_RING;
 790    if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
 791        do {
 792            /* set up the next one */
 793            cp = mp->tx_cmds + NCMDS_TX * i;
 794            out_le16(&cp->xfer_status, 0);
 795            out_le16(&cp->command, OUTPUT_LAST);
 796            ++mp->tx_active;
 797            if (++i >= N_TX_RING)
 798                i = 0;
 799        } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
 800        out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
 801        mace_set_timeout(dev);
 802    }
 803    spin_unlock_irqrestore(&mp->lock, flags);
 804    return IRQ_HANDLED;
 805}
 806
 807static void mace_tx_timeout(unsigned long data)
 808{
 809    struct net_device *dev = (struct net_device *) data;
 810    struct mace_data *mp = netdev_priv(dev);
 811    volatile struct mace __iomem *mb = mp->mace;
 812    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 813    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 814    volatile struct dbdma_cmd *cp;
 815    unsigned long flags;
 816    int i;
 817
 818    spin_lock_irqsave(&mp->lock, flags);
 819    mp->timeout_active = 0;
 820    if (mp->tx_active == 0 && !mp->tx_bad_runt)
 821        goto out;
 822
 823    /* update various counters */
 824    mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
 825
 826    cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
 827
 828    /* turn off both tx and rx and reset the chip */
 829    out_8(&mb->maccc, 0);
 830    printk(KERN_ERR "mace: transmit timeout - resetting\n");
 831    dbdma_reset(td);
 832    mace_reset(dev);
 833
 834    /* restart rx dma */
 835    cp = bus_to_virt(ld_le32(&rd->cmdptr));
 836    dbdma_reset(rd);
 837    out_le16(&cp->xfer_status, 0);
 838    out_le32(&rd->cmdptr, virt_to_bus(cp));
 839    out_le32(&rd->control, (RUN << 16) | RUN);
 840
 841    /* fix up the transmit side */
 842    i = mp->tx_empty;
 843    mp->tx_active = 0;
 844    ++dev->stats.tx_errors;
 845    if (mp->tx_bad_runt) {
 846        mp->tx_bad_runt = 0;
 847    } else if (i != mp->tx_fill) {
 848        dev_kfree_skb(mp->tx_bufs[i]);
 849        if (++i >= N_TX_RING)
 850            i = 0;
 851        mp->tx_empty = i;
 852    }
 853    mp->tx_fullup = 0;
 854    netif_wake_queue(dev);
 855    if (i != mp->tx_fill) {
 856        cp = mp->tx_cmds + NCMDS_TX * i;
 857        out_le16(&cp->xfer_status, 0);
 858        out_le16(&cp->command, OUTPUT_LAST);
 859        out_le32(&td->cmdptr, virt_to_bus(cp));
 860        out_le32(&td->control, (RUN << 16) | RUN);
 861        ++mp->tx_active;
 862        mace_set_timeout(dev);
 863    }
 864
 865    /* turn it back on */
 866    out_8(&mb->imr, RCVINT);
 867    out_8(&mb->maccc, mp->maccc);
 868
 869out:
 870    spin_unlock_irqrestore(&mp->lock, flags);
 871}
 872
 873static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
 874{
 875        return IRQ_HANDLED;
 876}
 877
 878static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
 879{
 880    struct net_device *dev = (struct net_device *) dev_id;
 881    struct mace_data *mp = netdev_priv(dev);
 882    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 883    volatile struct dbdma_cmd *cp, *np;
 884    int i, nb, stat, next;
 885    struct sk_buff *skb;
 886    unsigned frame_status;
 887    static int mace_lost_status;
 888    unsigned char *data;
 889    unsigned long flags;
 890
 891    spin_lock_irqsave(&mp->lock, flags);
 892    for (i = mp->rx_empty; i != mp->rx_fill; ) {
 893        cp = mp->rx_cmds + i;
 894        stat = ld_le16(&cp->xfer_status);
 895        if ((stat & ACTIVE) == 0) {
 896            next = i + 1;
 897            if (next >= N_RX_RING)
 898                next = 0;
 899            np = mp->rx_cmds + next;
 900            if (next != mp->rx_fill
 901                && (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
 902                printk(KERN_DEBUG "mace: lost a status word\n");
 903                ++mace_lost_status;
 904            } else
 905                break;
 906        }
 907        nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
 908        out_le16(&cp->command, DBDMA_STOP);
 909        /* got a packet, have a look at it */
 910        skb = mp->rx_bufs[i];
 911        if (!skb) {
 912            ++dev->stats.rx_dropped;
 913        } else if (nb > 8) {
 914            data = skb->data;
 915            frame_status = (data[nb-3] << 8) + data[nb-4];
 916            if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
 917                ++dev->stats.rx_errors;
 918                if (frame_status & RS_OFLO)
 919                    ++dev->stats.rx_over_errors;
 920                if (frame_status & RS_FRAMERR)
 921                    ++dev->stats.rx_frame_errors;
 922                if (frame_status & RS_FCSERR)
 923                    ++dev->stats.rx_crc_errors;
 924            } else {
 925                /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
 926                 * FCS on frames with 802.3 headers. This means that Ethernet
 927                 * frames have 8 extra octets at the end, while 802.3 frames
 928                 * have only 4. We need to correctly account for this. */
 929                if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
 930                    nb -= 4;
 931                else    /* Ethernet header; mace includes FCS */
 932                    nb -= 8;
 933                skb_put(skb, nb);
 934                skb->protocol = eth_type_trans(skb, dev);
 935                dev->stats.rx_bytes += skb->len;
 936                netif_rx(skb);
 937                mp->rx_bufs[i] = NULL;
 938                ++dev->stats.rx_packets;
 939            }
 940        } else {
 941            ++dev->stats.rx_errors;
 942            ++dev->stats.rx_length_errors;
 943        }
 944
 945        /* advance to next */
 946        if (++i >= N_RX_RING)
 947            i = 0;
 948    }
 949    mp->rx_empty = i;
 950
 951    i = mp->rx_fill;
 952    for (;;) {
 953        next = i + 1;
 954        if (next >= N_RX_RING)
 955            next = 0;
 956        if (next == mp->rx_empty)
 957            break;
 958        cp = mp->rx_cmds + i;
 959        skb = mp->rx_bufs[i];
 960        if (!skb) {
 961            skb = dev_alloc_skb(RX_BUFLEN + 2);
 962            if (skb) {
 963                skb_reserve(skb, 2);
 964                mp->rx_bufs[i] = skb;
 965            }
 966        }
 967        st_le16(&cp->req_count, RX_BUFLEN);
 968        data = skb? skb->data: dummy_buf;
 969        st_le32(&cp->phy_addr, virt_to_bus(data));
 970        out_le16(&cp->xfer_status, 0);
 971        out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
 972#if 0
 973        if ((ld_le32(&rd->status) & ACTIVE) != 0) {
 974            out_le32(&rd->control, (PAUSE << 16) | PAUSE);
 975            while ((in_le32(&rd->status) & ACTIVE) != 0)
 976                ;
 977        }
 978#endif
 979        i = next;
 980    }
 981    if (i != mp->rx_fill) {
 982        out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
 983        mp->rx_fill = i;
 984    }
 985    spin_unlock_irqrestore(&mp->lock, flags);
 986    return IRQ_HANDLED;
 987}
 988
 989static struct of_device_id mace_match[] =
 990{
 991        {
 992        .name           = "mace",
 993        },
 994        {},
 995};
 996MODULE_DEVICE_TABLE (of, mace_match);
 997
 998static struct macio_driver mace_driver =
 999{
1000        .name           = "mace",
1001        .match_table    = mace_match,
1002        .probe          = mace_probe,
1003        .remove         = mace_remove,
1004};
1005
1006
1007static int __init mace_init(void)
1008{
1009        return macio_register_driver(&mace_driver);
1010}
1011
1012static void __exit mace_cleanup(void)
1013{
1014        macio_unregister_driver(&mace_driver);
1015
1016        kfree(dummy_buf);
1017        dummy_buf = NULL;
1018}
1019
1020MODULE_AUTHOR("Paul Mackerras");
1021MODULE_DESCRIPTION("PowerMac MACE driver.");
1022module_param(port_aaui, int, 0);
1023MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
1024MODULE_LICENSE("GPL");
1025
1026module_init(mace_init);
1027module_exit(mace_cleanup);
1028