linux/drivers/net/ethernet/apple/mace.c
<<
>>
Prefs
   1/*
   2 * Network device driver for the MACE ethernet controller on
   3 * Apple Powermacs.  Assumes it's under a DBDMA controller.
   4 *
   5 * Copyright (C) 1996 Paul Mackerras.
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/kernel.h>
  10#include <linux/netdevice.h>
  11#include <linux/etherdevice.h>
  12#include <linux/delay.h>
  13#include <linux/string.h>
  14#include <linux/timer.h>
  15#include <linux/init.h>
  16#include <linux/interrupt.h>
  17#include <linux/crc32.h>
  18#include <linux/spinlock.h>
  19#include <linux/bitrev.h>
  20#include <linux/slab.h>
  21#include <asm/prom.h>
  22#include <asm/dbdma.h>
  23#include <asm/io.h>
  24#include <asm/pgtable.h>
  25#include <asm/macio.h>
  26
  27#include "mace.h"
  28
  29static int port_aaui = -1;
  30
  31#define N_RX_RING       8
  32#define N_TX_RING       6
  33#define MAX_TX_ACTIVE   1
  34#define NCMDS_TX        1       /* dma commands per element in tx ring */
  35#define RX_BUFLEN       (ETH_FRAME_LEN + 8)
  36#define TX_TIMEOUT      HZ      /* 1 second */
  37
  38/* Chip rev needs workaround on HW & multicast addr change */
  39#define BROKEN_ADDRCHG_REV      0x0941
  40
  41/* Bits in transmit DMA status */
  42#define TX_DMA_ERR      0x80
  43
  44struct mace_data {
  45    volatile struct mace __iomem *mace;
  46    volatile struct dbdma_regs __iomem *tx_dma;
  47    int tx_dma_intr;
  48    volatile struct dbdma_regs __iomem *rx_dma;
  49    int rx_dma_intr;
  50    volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
  51    volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
  52    struct sk_buff *rx_bufs[N_RX_RING];
  53    int rx_fill;
  54    int rx_empty;
  55    struct sk_buff *tx_bufs[N_TX_RING];
  56    int tx_fill;
  57    int tx_empty;
  58    unsigned char maccc;
  59    unsigned char tx_fullup;
  60    unsigned char tx_active;
  61    unsigned char tx_bad_runt;
  62    struct timer_list tx_timeout;
  63    int timeout_active;
  64    int port_aaui;
  65    int chipid;
  66    struct macio_dev *mdev;
  67    spinlock_t lock;
  68};
  69
  70/*
  71 * Number of bytes of private data per MACE: allow enough for
  72 * the rx and tx dma commands plus a branch dma command each,
  73 * and another 16 bytes to allow us to align the dma command
  74 * buffers on a 16 byte boundary.
  75 */
  76#define PRIV_BYTES      (sizeof(struct mace_data) \
  77        + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
  78
  79static int mace_open(struct net_device *dev);
  80static int mace_close(struct net_device *dev);
  81static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
  82static void mace_set_multicast(struct net_device *dev);
  83static void mace_reset(struct net_device *dev);
  84static int mace_set_address(struct net_device *dev, void *addr);
  85static irqreturn_t mace_interrupt(int irq, void *dev_id);
  86static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
  87static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
  88static void mace_set_timeout(struct net_device *dev);
  89static void mace_tx_timeout(struct timer_list *t);
  90static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
  91static inline void mace_clean_rings(struct mace_data *mp);
  92static void __mace_set_address(struct net_device *dev, void *addr);
  93
  94/*
  95 * If we can't get a skbuff when we need it, we use this area for DMA.
  96 */
  97static unsigned char *dummy_buf;
  98
  99static const struct net_device_ops mace_netdev_ops = {
 100        .ndo_open               = mace_open,
 101        .ndo_stop               = mace_close,
 102        .ndo_start_xmit         = mace_xmit_start,
 103        .ndo_set_rx_mode        = mace_set_multicast,
 104        .ndo_set_mac_address    = mace_set_address,
 105        .ndo_validate_addr      = eth_validate_addr,
 106};
 107
 108static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
 109{
 110        struct device_node *mace = macio_get_of_node(mdev);
 111        struct net_device *dev;
 112        struct mace_data *mp;
 113        const unsigned char *addr;
 114        int j, rev, rc = -EBUSY;
 115
 116        if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
 117                printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n",
 118                       mace);
 119                return -ENODEV;
 120        }
 121
 122        addr = of_get_property(mace, "mac-address", NULL);
 123        if (addr == NULL) {
 124                addr = of_get_property(mace, "local-mac-address", NULL);
 125                if (addr == NULL) {
 126                        printk(KERN_ERR "Can't get mac-address for MACE %pOF\n",
 127                               mace);
 128                        return -ENODEV;
 129                }
 130        }
 131
 132        /*
 133         * lazy allocate the driver-wide dummy buffer. (Note that we
 134         * never have more than one MACE in the system anyway)
 135         */
 136        if (dummy_buf == NULL) {
 137                dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
 138                if (dummy_buf == NULL)
 139                        return -ENOMEM;
 140        }
 141
 142        if (macio_request_resources(mdev, "mace")) {
 143                printk(KERN_ERR "MACE: can't request IO resources !\n");
 144                return -EBUSY;
 145        }
 146
 147        dev = alloc_etherdev(PRIV_BYTES);
 148        if (!dev) {
 149                rc = -ENOMEM;
 150                goto err_release;
 151        }
 152        SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
 153
 154        mp = netdev_priv(dev);
 155        mp->mdev = mdev;
 156        macio_set_drvdata(mdev, dev);
 157
 158        dev->base_addr = macio_resource_start(mdev, 0);
 159        mp->mace = ioremap(dev->base_addr, 0x1000);
 160        if (mp->mace == NULL) {
 161                printk(KERN_ERR "MACE: can't map IO resources !\n");
 162                rc = -ENOMEM;
 163                goto err_free;
 164        }
 165        dev->irq = macio_irq(mdev, 0);
 166
 167        rev = addr[0] == 0 && addr[1] == 0xA0;
 168        for (j = 0; j < 6; ++j) {
 169                dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
 170        }
 171        mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
 172                        in_8(&mp->mace->chipid_lo);
 173
 174
 175        mp = netdev_priv(dev);
 176        mp->maccc = ENXMT | ENRCV;
 177
 178        mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
 179        if (mp->tx_dma == NULL) {
 180                printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
 181                rc = -ENOMEM;
 182                goto err_unmap_io;
 183        }
 184        mp->tx_dma_intr = macio_irq(mdev, 1);
 185
 186        mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
 187        if (mp->rx_dma == NULL) {
 188                printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
 189                rc = -ENOMEM;
 190                goto err_unmap_tx_dma;
 191        }
 192        mp->rx_dma_intr = macio_irq(mdev, 2);
 193
 194        mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
 195        mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
 196
 197        memset((char *) mp->tx_cmds, 0,
 198               (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
 199        timer_setup(&mp->tx_timeout, mace_tx_timeout, 0);
 200        spin_lock_init(&mp->lock);
 201        mp->timeout_active = 0;
 202
 203        if (port_aaui >= 0)
 204                mp->port_aaui = port_aaui;
 205        else {
 206                /* Apple Network Server uses the AAUI port */
 207                if (of_machine_is_compatible("AAPL,ShinerESB"))
 208                        mp->port_aaui = 1;
 209                else {
 210#ifdef CONFIG_MACE_AAUI_PORT
 211                        mp->port_aaui = 1;
 212#else
 213                        mp->port_aaui = 0;
 214#endif
 215                }
 216        }
 217
 218        dev->netdev_ops = &mace_netdev_ops;
 219
 220        /*
 221         * Most of what is below could be moved to mace_open()
 222         */
 223        mace_reset(dev);
 224
 225        rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
 226        if (rc) {
 227                printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
 228                goto err_unmap_rx_dma;
 229        }
 230        rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
 231        if (rc) {
 232                printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
 233                goto err_free_irq;
 234        }
 235        rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
 236        if (rc) {
 237                printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
 238                goto err_free_tx_irq;
 239        }
 240
 241        rc = register_netdev(dev);
 242        if (rc) {
 243                printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
 244                goto err_free_rx_irq;
 245        }
 246
 247        printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
 248               dev->name, dev->dev_addr,
 249               mp->chipid >> 8, mp->chipid & 0xff);
 250
 251        return 0;
 252
 253 err_free_rx_irq:
 254        free_irq(macio_irq(mdev, 2), dev);
 255 err_free_tx_irq:
 256        free_irq(macio_irq(mdev, 1), dev);
 257 err_free_irq:
 258        free_irq(macio_irq(mdev, 0), dev);
 259 err_unmap_rx_dma:
 260        iounmap(mp->rx_dma);
 261 err_unmap_tx_dma:
 262        iounmap(mp->tx_dma);
 263 err_unmap_io:
 264        iounmap(mp->mace);
 265 err_free:
 266        free_netdev(dev);
 267 err_release:
 268        macio_release_resources(mdev);
 269
 270        return rc;
 271}
 272
 273static int mace_remove(struct macio_dev *mdev)
 274{
 275        struct net_device *dev = macio_get_drvdata(mdev);
 276        struct mace_data *mp;
 277
 278        BUG_ON(dev == NULL);
 279
 280        macio_set_drvdata(mdev, NULL);
 281
 282        mp = netdev_priv(dev);
 283
 284        unregister_netdev(dev);
 285
 286        free_irq(dev->irq, dev);
 287        free_irq(mp->tx_dma_intr, dev);
 288        free_irq(mp->rx_dma_intr, dev);
 289
 290        iounmap(mp->rx_dma);
 291        iounmap(mp->tx_dma);
 292        iounmap(mp->mace);
 293
 294        free_netdev(dev);
 295
 296        macio_release_resources(mdev);
 297
 298        return 0;
 299}
 300
 301static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
 302{
 303    int i;
 304
 305    out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
 306
 307    /*
 308     * Yes this looks peculiar, but apparently it needs to be this
 309     * way on some machines.
 310     */
 311    for (i = 200; i > 0; --i)
 312        if (le32_to_cpu(dma->control) & RUN)
 313            udelay(1);
 314}
 315
 316static void mace_reset(struct net_device *dev)
 317{
 318    struct mace_data *mp = netdev_priv(dev);
 319    volatile struct mace __iomem *mb = mp->mace;
 320    int i;
 321
 322    /* soft-reset the chip */
 323    i = 200;
 324    while (--i) {
 325        out_8(&mb->biucc, SWRST);
 326        if (in_8(&mb->biucc) & SWRST) {
 327            udelay(10);
 328            continue;
 329        }
 330        break;
 331    }
 332    if (!i) {
 333        printk(KERN_ERR "mace: cannot reset chip!\n");
 334        return;
 335    }
 336
 337    out_8(&mb->imr, 0xff);      /* disable all intrs for now */
 338    i = in_8(&mb->ir);
 339    out_8(&mb->maccc, 0);       /* turn off tx, rx */
 340
 341    out_8(&mb->biucc, XMTSP_64);
 342    out_8(&mb->utr, RTRD);
 343    out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
 344    out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
 345    out_8(&mb->rcvfc, 0);
 346
 347    /* load up the hardware address */
 348    __mace_set_address(dev, dev->dev_addr);
 349
 350    /* clear the multicast filter */
 351    if (mp->chipid == BROKEN_ADDRCHG_REV)
 352        out_8(&mb->iac, LOGADDR);
 353    else {
 354        out_8(&mb->iac, ADDRCHG | LOGADDR);
 355        while ((in_8(&mb->iac) & ADDRCHG) != 0)
 356                ;
 357    }
 358    for (i = 0; i < 8; ++i)
 359        out_8(&mb->ladrf, 0);
 360
 361    /* done changing address */
 362    if (mp->chipid != BROKEN_ADDRCHG_REV)
 363        out_8(&mb->iac, 0);
 364
 365    if (mp->port_aaui)
 366        out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
 367    else
 368        out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
 369}
 370
 371static void __mace_set_address(struct net_device *dev, void *addr)
 372{
 373    struct mace_data *mp = netdev_priv(dev);
 374    volatile struct mace __iomem *mb = mp->mace;
 375    unsigned char *p = addr;
 376    int i;
 377
 378    /* load up the hardware address */
 379    if (mp->chipid == BROKEN_ADDRCHG_REV)
 380        out_8(&mb->iac, PHYADDR);
 381    else {
 382        out_8(&mb->iac, ADDRCHG | PHYADDR);
 383        while ((in_8(&mb->iac) & ADDRCHG) != 0)
 384            ;
 385    }
 386    for (i = 0; i < 6; ++i)
 387        out_8(&mb->padr, dev->dev_addr[i] = p[i]);
 388    if (mp->chipid != BROKEN_ADDRCHG_REV)
 389        out_8(&mb->iac, 0);
 390}
 391
 392static int mace_set_address(struct net_device *dev, void *addr)
 393{
 394    struct mace_data *mp = netdev_priv(dev);
 395    volatile struct mace __iomem *mb = mp->mace;
 396    unsigned long flags;
 397
 398    spin_lock_irqsave(&mp->lock, flags);
 399
 400    __mace_set_address(dev, addr);
 401
 402    /* note: setting ADDRCHG clears ENRCV */
 403    out_8(&mb->maccc, mp->maccc);
 404
 405    spin_unlock_irqrestore(&mp->lock, flags);
 406    return 0;
 407}
 408
 409static inline void mace_clean_rings(struct mace_data *mp)
 410{
 411    int i;
 412
 413    /* free some skb's */
 414    for (i = 0; i < N_RX_RING; ++i) {
 415        if (mp->rx_bufs[i] != NULL) {
 416            dev_kfree_skb(mp->rx_bufs[i]);
 417            mp->rx_bufs[i] = NULL;
 418        }
 419    }
 420    for (i = mp->tx_empty; i != mp->tx_fill; ) {
 421        dev_kfree_skb(mp->tx_bufs[i]);
 422        if (++i >= N_TX_RING)
 423            i = 0;
 424    }
 425}
 426
 427static int mace_open(struct net_device *dev)
 428{
 429    struct mace_data *mp = netdev_priv(dev);
 430    volatile struct mace __iomem *mb = mp->mace;
 431    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 432    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 433    volatile struct dbdma_cmd *cp;
 434    int i;
 435    struct sk_buff *skb;
 436    unsigned char *data;
 437
 438    /* reset the chip */
 439    mace_reset(dev);
 440
 441    /* initialize list of sk_buffs for receiving and set up recv dma */
 442    mace_clean_rings(mp);
 443    memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
 444    cp = mp->rx_cmds;
 445    for (i = 0; i < N_RX_RING - 1; ++i) {
 446        skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
 447        if (!skb) {
 448            data = dummy_buf;
 449        } else {
 450            skb_reserve(skb, 2);        /* so IP header lands on 4-byte bdry */
 451            data = skb->data;
 452        }
 453        mp->rx_bufs[i] = skb;
 454        cp->req_count = cpu_to_le16(RX_BUFLEN);
 455        cp->command = cpu_to_le16(INPUT_LAST + INTR_ALWAYS);
 456        cp->phy_addr = cpu_to_le32(virt_to_bus(data));
 457        cp->xfer_status = 0;
 458        ++cp;
 459    }
 460    mp->rx_bufs[i] = NULL;
 461    cp->command = cpu_to_le16(DBDMA_STOP);
 462    mp->rx_fill = i;
 463    mp->rx_empty = 0;
 464
 465    /* Put a branch back to the beginning of the receive command list */
 466    ++cp;
 467    cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
 468    cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->rx_cmds));
 469
 470    /* start rx dma */
 471    out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
 472    out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
 473    out_le32(&rd->control, (RUN << 16) | RUN);
 474
 475    /* put a branch at the end of the tx command list */
 476    cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
 477    cp->command = cpu_to_le16(DBDMA_NOP + BR_ALWAYS);
 478    cp->cmd_dep = cpu_to_le32(virt_to_bus(mp->tx_cmds));
 479
 480    /* reset tx dma */
 481    out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
 482    out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
 483    mp->tx_fill = 0;
 484    mp->tx_empty = 0;
 485    mp->tx_fullup = 0;
 486    mp->tx_active = 0;
 487    mp->tx_bad_runt = 0;
 488
 489    /* turn it on! */
 490    out_8(&mb->maccc, mp->maccc);
 491    /* enable all interrupts except receive interrupts */
 492    out_8(&mb->imr, RCVINT);
 493
 494    return 0;
 495}
 496
 497static int mace_close(struct net_device *dev)
 498{
 499    struct mace_data *mp = netdev_priv(dev);
 500    volatile struct mace __iomem *mb = mp->mace;
 501    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 502    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 503
 504    /* disable rx and tx */
 505    out_8(&mb->maccc, 0);
 506    out_8(&mb->imr, 0xff);              /* disable all intrs */
 507
 508    /* disable rx and tx dma */
 509    rd->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
 510    td->control = cpu_to_le32((RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
 511
 512    mace_clean_rings(mp);
 513
 514    return 0;
 515}
 516
 517static inline void mace_set_timeout(struct net_device *dev)
 518{
 519    struct mace_data *mp = netdev_priv(dev);
 520
 521    if (mp->timeout_active)
 522        del_timer(&mp->tx_timeout);
 523    mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
 524    add_timer(&mp->tx_timeout);
 525    mp->timeout_active = 1;
 526}
 527
 528static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
 529{
 530    struct mace_data *mp = netdev_priv(dev);
 531    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 532    volatile struct dbdma_cmd *cp, *np;
 533    unsigned long flags;
 534    int fill, next, len;
 535
 536    /* see if there's a free slot in the tx ring */
 537    spin_lock_irqsave(&mp->lock, flags);
 538    fill = mp->tx_fill;
 539    next = fill + 1;
 540    if (next >= N_TX_RING)
 541        next = 0;
 542    if (next == mp->tx_empty) {
 543        netif_stop_queue(dev);
 544        mp->tx_fullup = 1;
 545        spin_unlock_irqrestore(&mp->lock, flags);
 546        return NETDEV_TX_BUSY;          /* can't take it at the moment */
 547    }
 548    spin_unlock_irqrestore(&mp->lock, flags);
 549
 550    /* partially fill in the dma command block */
 551    len = skb->len;
 552    if (len > ETH_FRAME_LEN) {
 553        printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
 554        len = ETH_FRAME_LEN;
 555    }
 556    mp->tx_bufs[fill] = skb;
 557    cp = mp->tx_cmds + NCMDS_TX * fill;
 558    cp->req_count = cpu_to_le16(len);
 559    cp->phy_addr = cpu_to_le32(virt_to_bus(skb->data));
 560
 561    np = mp->tx_cmds + NCMDS_TX * next;
 562    out_le16(&np->command, DBDMA_STOP);
 563
 564    /* poke the tx dma channel */
 565    spin_lock_irqsave(&mp->lock, flags);
 566    mp->tx_fill = next;
 567    if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
 568        out_le16(&cp->xfer_status, 0);
 569        out_le16(&cp->command, OUTPUT_LAST);
 570        out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
 571        ++mp->tx_active;
 572        mace_set_timeout(dev);
 573    }
 574    if (++next >= N_TX_RING)
 575        next = 0;
 576    if (next == mp->tx_empty)
 577        netif_stop_queue(dev);
 578    spin_unlock_irqrestore(&mp->lock, flags);
 579
 580    return NETDEV_TX_OK;
 581}
 582
 583static void mace_set_multicast(struct net_device *dev)
 584{
 585    struct mace_data *mp = netdev_priv(dev);
 586    volatile struct mace __iomem *mb = mp->mace;
 587    int i;
 588    u32 crc;
 589    unsigned long flags;
 590
 591    spin_lock_irqsave(&mp->lock, flags);
 592    mp->maccc &= ~PROM;
 593    if (dev->flags & IFF_PROMISC) {
 594        mp->maccc |= PROM;
 595    } else {
 596        unsigned char multicast_filter[8];
 597        struct netdev_hw_addr *ha;
 598
 599        if (dev->flags & IFF_ALLMULTI) {
 600            for (i = 0; i < 8; i++)
 601                multicast_filter[i] = 0xff;
 602        } else {
 603            for (i = 0; i < 8; i++)
 604                multicast_filter[i] = 0;
 605            netdev_for_each_mc_addr(ha, dev) {
 606                crc = ether_crc_le(6, ha->addr);
 607                i = crc >> 26;  /* bit number in multicast_filter */
 608                multicast_filter[i >> 3] |= 1 << (i & 7);
 609            }
 610        }
 611#if 0
 612        printk("Multicast filter :");
 613        for (i = 0; i < 8; i++)
 614            printk("%02x ", multicast_filter[i]);
 615        printk("\n");
 616#endif
 617
 618        if (mp->chipid == BROKEN_ADDRCHG_REV)
 619            out_8(&mb->iac, LOGADDR);
 620        else {
 621            out_8(&mb->iac, ADDRCHG | LOGADDR);
 622            while ((in_8(&mb->iac) & ADDRCHG) != 0)
 623                ;
 624        }
 625        for (i = 0; i < 8; ++i)
 626            out_8(&mb->ladrf, multicast_filter[i]);
 627        if (mp->chipid != BROKEN_ADDRCHG_REV)
 628            out_8(&mb->iac, 0);
 629    }
 630    /* reset maccc */
 631    out_8(&mb->maccc, mp->maccc);
 632    spin_unlock_irqrestore(&mp->lock, flags);
 633}
 634
 635static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
 636{
 637    volatile struct mace __iomem *mb = mp->mace;
 638    static int mace_babbles, mace_jabbers;
 639
 640    if (intr & MPCO)
 641        dev->stats.rx_missed_errors += 256;
 642    dev->stats.rx_missed_errors += in_8(&mb->mpc);   /* reading clears it */
 643    if (intr & RNTPCO)
 644        dev->stats.rx_length_errors += 256;
 645    dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
 646    if (intr & CERR)
 647        ++dev->stats.tx_heartbeat_errors;
 648    if (intr & BABBLE)
 649        if (mace_babbles++ < 4)
 650            printk(KERN_DEBUG "mace: babbling transmitter\n");
 651    if (intr & JABBER)
 652        if (mace_jabbers++ < 4)
 653            printk(KERN_DEBUG "mace: jabbering transceiver\n");
 654}
 655
 656static irqreturn_t mace_interrupt(int irq, void *dev_id)
 657{
 658    struct net_device *dev = (struct net_device *) dev_id;
 659    struct mace_data *mp = netdev_priv(dev);
 660    volatile struct mace __iomem *mb = mp->mace;
 661    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 662    volatile struct dbdma_cmd *cp;
 663    int intr, fs, i, stat, x;
 664    int xcount, dstat;
 665    unsigned long flags;
 666    /* static int mace_last_fs, mace_last_xcount; */
 667
 668    spin_lock_irqsave(&mp->lock, flags);
 669    intr = in_8(&mb->ir);               /* read interrupt register */
 670    in_8(&mb->xmtrc);                   /* get retries */
 671    mace_handle_misc_intrs(mp, intr, dev);
 672
 673    i = mp->tx_empty;
 674    while (in_8(&mb->pr) & XMTSV) {
 675        del_timer(&mp->tx_timeout);
 676        mp->timeout_active = 0;
 677        /*
 678         * Clear any interrupt indication associated with this status
 679         * word.  This appears to unlatch any error indication from
 680         * the DMA controller.
 681         */
 682        intr = in_8(&mb->ir);
 683        if (intr != 0)
 684            mace_handle_misc_intrs(mp, intr, dev);
 685        if (mp->tx_bad_runt) {
 686            fs = in_8(&mb->xmtfs);
 687            mp->tx_bad_runt = 0;
 688            out_8(&mb->xmtfc, AUTO_PAD_XMIT);
 689            continue;
 690        }
 691        dstat = le32_to_cpu(td->status);
 692        /* stop DMA controller */
 693        out_le32(&td->control, RUN << 16);
 694        /*
 695         * xcount is the number of complete frames which have been
 696         * written to the fifo but for which status has not been read.
 697         */
 698        xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
 699        if (xcount == 0 || (dstat & DEAD)) {
 700            /*
 701             * If a packet was aborted before the DMA controller has
 702             * finished transferring it, it seems that there are 2 bytes
 703             * which are stuck in some buffer somewhere.  These will get
 704             * transmitted as soon as we read the frame status (which
 705             * reenables the transmit data transfer request).  Turning
 706             * off the DMA controller and/or resetting the MACE doesn't
 707             * help.  So we disable auto-padding and FCS transmission
 708             * so the two bytes will only be a runt packet which should
 709             * be ignored by other stations.
 710             */
 711            out_8(&mb->xmtfc, DXMTFCS);
 712        }
 713        fs = in_8(&mb->xmtfs);
 714        if ((fs & XMTSV) == 0) {
 715            printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
 716                   fs, xcount, dstat);
 717            mace_reset(dev);
 718                /*
 719                 * XXX mace likes to hang the machine after a xmtfs error.
 720                 * This is hard to reproduce, resetting *may* help
 721                 */
 722        }
 723        cp = mp->tx_cmds + NCMDS_TX * i;
 724        stat = le16_to_cpu(cp->xfer_status);
 725        if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
 726            /*
 727             * Check whether there were in fact 2 bytes written to
 728             * the transmit FIFO.
 729             */
 730            udelay(1);
 731            x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
 732            if (x != 0) {
 733                /* there were two bytes with an end-of-packet indication */
 734                mp->tx_bad_runt = 1;
 735                mace_set_timeout(dev);
 736            } else {
 737                /*
 738                 * Either there weren't the two bytes buffered up, or they
 739                 * didn't have an end-of-packet indication.
 740                 * We flush the transmit FIFO just in case (by setting the
 741                 * XMTFWU bit with the transmitter disabled).
 742                 */
 743                out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
 744                out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
 745                udelay(1);
 746                out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
 747                out_8(&mb->xmtfc, AUTO_PAD_XMIT);
 748            }
 749        }
 750        /* dma should have finished */
 751        if (i == mp->tx_fill) {
 752            printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
 753                   fs, xcount, dstat);
 754            continue;
 755        }
 756        /* Update stats */
 757        if (fs & (UFLO|LCOL|LCAR|RTRY)) {
 758            ++dev->stats.tx_errors;
 759            if (fs & LCAR)
 760                ++dev->stats.tx_carrier_errors;
 761            if (fs & (UFLO|LCOL|RTRY))
 762                ++dev->stats.tx_aborted_errors;
 763        } else {
 764            dev->stats.tx_bytes += mp->tx_bufs[i]->len;
 765            ++dev->stats.tx_packets;
 766        }
 767        dev_kfree_skb_irq(mp->tx_bufs[i]);
 768        --mp->tx_active;
 769        if (++i >= N_TX_RING)
 770            i = 0;
 771#if 0
 772        mace_last_fs = fs;
 773        mace_last_xcount = xcount;
 774#endif
 775    }
 776
 777    if (i != mp->tx_empty) {
 778        mp->tx_fullup = 0;
 779        netif_wake_queue(dev);
 780    }
 781    mp->tx_empty = i;
 782    i += mp->tx_active;
 783    if (i >= N_TX_RING)
 784        i -= N_TX_RING;
 785    if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
 786        do {
 787            /* set up the next one */
 788            cp = mp->tx_cmds + NCMDS_TX * i;
 789            out_le16(&cp->xfer_status, 0);
 790            out_le16(&cp->command, OUTPUT_LAST);
 791            ++mp->tx_active;
 792            if (++i >= N_TX_RING)
 793                i = 0;
 794        } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
 795        out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
 796        mace_set_timeout(dev);
 797    }
 798    spin_unlock_irqrestore(&mp->lock, flags);
 799    return IRQ_HANDLED;
 800}
 801
 802static void mace_tx_timeout(struct timer_list *t)
 803{
 804    struct mace_data *mp = from_timer(mp, t, tx_timeout);
 805    struct net_device *dev = macio_get_drvdata(mp->mdev);
 806    volatile struct mace __iomem *mb = mp->mace;
 807    volatile struct dbdma_regs __iomem *td = mp->tx_dma;
 808    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 809    volatile struct dbdma_cmd *cp;
 810    unsigned long flags;
 811    int i;
 812
 813    spin_lock_irqsave(&mp->lock, flags);
 814    mp->timeout_active = 0;
 815    if (mp->tx_active == 0 && !mp->tx_bad_runt)
 816        goto out;
 817
 818    /* update various counters */
 819    mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
 820
 821    cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
 822
 823    /* turn off both tx and rx and reset the chip */
 824    out_8(&mb->maccc, 0);
 825    printk(KERN_ERR "mace: transmit timeout - resetting\n");
 826    dbdma_reset(td);
 827    mace_reset(dev);
 828
 829    /* restart rx dma */
 830    cp = bus_to_virt(le32_to_cpu(rd->cmdptr));
 831    dbdma_reset(rd);
 832    out_le16(&cp->xfer_status, 0);
 833    out_le32(&rd->cmdptr, virt_to_bus(cp));
 834    out_le32(&rd->control, (RUN << 16) | RUN);
 835
 836    /* fix up the transmit side */
 837    i = mp->tx_empty;
 838    mp->tx_active = 0;
 839    ++dev->stats.tx_errors;
 840    if (mp->tx_bad_runt) {
 841        mp->tx_bad_runt = 0;
 842    } else if (i != mp->tx_fill) {
 843        dev_kfree_skb(mp->tx_bufs[i]);
 844        if (++i >= N_TX_RING)
 845            i = 0;
 846        mp->tx_empty = i;
 847    }
 848    mp->tx_fullup = 0;
 849    netif_wake_queue(dev);
 850    if (i != mp->tx_fill) {
 851        cp = mp->tx_cmds + NCMDS_TX * i;
 852        out_le16(&cp->xfer_status, 0);
 853        out_le16(&cp->command, OUTPUT_LAST);
 854        out_le32(&td->cmdptr, virt_to_bus(cp));
 855        out_le32(&td->control, (RUN << 16) | RUN);
 856        ++mp->tx_active;
 857        mace_set_timeout(dev);
 858    }
 859
 860    /* turn it back on */
 861    out_8(&mb->imr, RCVINT);
 862    out_8(&mb->maccc, mp->maccc);
 863
 864out:
 865    spin_unlock_irqrestore(&mp->lock, flags);
 866}
 867
 868static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
 869{
 870        return IRQ_HANDLED;
 871}
 872
 873static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
 874{
 875    struct net_device *dev = (struct net_device *) dev_id;
 876    struct mace_data *mp = netdev_priv(dev);
 877    volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
 878    volatile struct dbdma_cmd *cp, *np;
 879    int i, nb, stat, next;
 880    struct sk_buff *skb;
 881    unsigned frame_status;
 882    static int mace_lost_status;
 883    unsigned char *data;
 884    unsigned long flags;
 885
 886    spin_lock_irqsave(&mp->lock, flags);
 887    for (i = mp->rx_empty; i != mp->rx_fill; ) {
 888        cp = mp->rx_cmds + i;
 889        stat = le16_to_cpu(cp->xfer_status);
 890        if ((stat & ACTIVE) == 0) {
 891            next = i + 1;
 892            if (next >= N_RX_RING)
 893                next = 0;
 894            np = mp->rx_cmds + next;
 895            if (next != mp->rx_fill &&
 896                (le16_to_cpu(np->xfer_status) & ACTIVE) != 0) {
 897                printk(KERN_DEBUG "mace: lost a status word\n");
 898                ++mace_lost_status;
 899            } else
 900                break;
 901        }
 902        nb = le16_to_cpu(cp->req_count) - le16_to_cpu(cp->res_count);
 903        out_le16(&cp->command, DBDMA_STOP);
 904        /* got a packet, have a look at it */
 905        skb = mp->rx_bufs[i];
 906        if (!skb) {
 907            ++dev->stats.rx_dropped;
 908        } else if (nb > 8) {
 909            data = skb->data;
 910            frame_status = (data[nb-3] << 8) + data[nb-4];
 911            if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
 912                ++dev->stats.rx_errors;
 913                if (frame_status & RS_OFLO)
 914                    ++dev->stats.rx_over_errors;
 915                if (frame_status & RS_FRAMERR)
 916                    ++dev->stats.rx_frame_errors;
 917                if (frame_status & RS_FCSERR)
 918                    ++dev->stats.rx_crc_errors;
 919            } else {
 920                /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
 921                 * FCS on frames with 802.3 headers. This means that Ethernet
 922                 * frames have 8 extra octets at the end, while 802.3 frames
 923                 * have only 4. We need to correctly account for this. */
 924                if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
 925                    nb -= 4;
 926                else    /* Ethernet header; mace includes FCS */
 927                    nb -= 8;
 928                skb_put(skb, nb);
 929                skb->protocol = eth_type_trans(skb, dev);
 930                dev->stats.rx_bytes += skb->len;
 931                netif_rx(skb);
 932                mp->rx_bufs[i] = NULL;
 933                ++dev->stats.rx_packets;
 934            }
 935        } else {
 936            ++dev->stats.rx_errors;
 937            ++dev->stats.rx_length_errors;
 938        }
 939
 940        /* advance to next */
 941        if (++i >= N_RX_RING)
 942            i = 0;
 943    }
 944    mp->rx_empty = i;
 945
 946    i = mp->rx_fill;
 947    for (;;) {
 948        next = i + 1;
 949        if (next >= N_RX_RING)
 950            next = 0;
 951        if (next == mp->rx_empty)
 952            break;
 953        cp = mp->rx_cmds + i;
 954        skb = mp->rx_bufs[i];
 955        if (!skb) {
 956            skb = netdev_alloc_skb(dev, RX_BUFLEN + 2);
 957            if (skb) {
 958                skb_reserve(skb, 2);
 959                mp->rx_bufs[i] = skb;
 960            }
 961        }
 962        cp->req_count = cpu_to_le16(RX_BUFLEN);
 963        data = skb? skb->data: dummy_buf;
 964        cp->phy_addr = cpu_to_le32(virt_to_bus(data));
 965        out_le16(&cp->xfer_status, 0);
 966        out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
 967#if 0
 968        if ((le32_to_cpu(rd->status) & ACTIVE) != 0) {
 969            out_le32(&rd->control, (PAUSE << 16) | PAUSE);
 970            while ((in_le32(&rd->status) & ACTIVE) != 0)
 971                ;
 972        }
 973#endif
 974        i = next;
 975    }
 976    if (i != mp->rx_fill) {
 977        out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
 978        mp->rx_fill = i;
 979    }
 980    spin_unlock_irqrestore(&mp->lock, flags);
 981    return IRQ_HANDLED;
 982}
 983
 984static const struct of_device_id mace_match[] =
 985{
 986        {
 987        .name           = "mace",
 988        },
 989        {},
 990};
 991MODULE_DEVICE_TABLE (of, mace_match);
 992
 993static struct macio_driver mace_driver =
 994{
 995        .driver = {
 996                .name           = "mace",
 997                .owner          = THIS_MODULE,
 998                .of_match_table = mace_match,
 999        },
1000        .probe          = mace_probe,
1001        .remove         = mace_remove,
1002};
1003
1004
1005static int __init mace_init(void)
1006{
1007        return macio_register_driver(&mace_driver);
1008}
1009
1010static void __exit mace_cleanup(void)
1011{
1012        macio_unregister_driver(&mace_driver);
1013
1014        kfree(dummy_buf);
1015        dummy_buf = NULL;
1016}
1017
1018MODULE_AUTHOR("Paul Mackerras");
1019MODULE_DESCRIPTION("PowerMac MACE driver.");
1020module_param(port_aaui, int, 0);
1021MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
1022MODULE_LICENSE("GPL");
1023
1024module_init(mace_init);
1025module_exit(mace_cleanup);
1026