linux/drivers/net/macmace.c
<<
>>
Prefs
   1/*
   2 *      Driver for the Macintosh 68K onboard MACE controller with PSC
   3 *      driven DMA. The MACE driver code is derived from mace.c. The
   4 *      Mac68k theory of operation is courtesy of the MacBSD wizards.
   5 *
   6 *      This program is free software; you can redistribute it and/or
   7 *      modify it under the terms of the GNU General Public License
   8 *      as published by the Free Software Foundation; either version
   9 *      2 of the License, or (at your option) any later version.
  10 *
  11 *      Copyright (C) 1996 Paul Mackerras.
  12 *      Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
  13 *
  14 *      Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
  15 *
  16 *      Copyright (C) 2007 Finn Thain
  17 *
  18 *      Converted to DMA API, converted to unified driver model,
  19 *      sync'd some routines with mace.c and fixed various bugs.
  20 */
  21
  22
  23#include <linux/kernel.h>
  24#include <linux/module.h>
  25#include <linux/netdevice.h>
  26#include <linux/etherdevice.h>
  27#include <linux/delay.h>
  28#include <linux/string.h>
  29#include <linux/crc32.h>
  30#include <linux/bitrev.h>
  31#include <linux/dma-mapping.h>
  32#include <linux/platform_device.h>
  33#include <asm/io.h>
  34#include <asm/irq.h>
  35#include <asm/macintosh.h>
  36#include <asm/macints.h>
  37#include <asm/mac_psc.h>
  38#include <asm/page.h>
  39#include "mace.h"
  40
  41static char mac_mace_string[] = "macmace";
  42static struct platform_device *mac_mace_device;
  43
  44#define N_TX_BUFF_ORDER 0
  45#define N_TX_RING       (1 << N_TX_BUFF_ORDER)
  46#define N_RX_BUFF_ORDER 3
  47#define N_RX_RING       (1 << N_RX_BUFF_ORDER)
  48
  49#define TX_TIMEOUT      HZ
  50
  51#define MACE_BUFF_SIZE  0x800
  52
  53/* Chip rev needs workaround on HW & multicast addr change */
  54#define BROKEN_ADDRCHG_REV      0x0941
  55
  56/* The MACE is simply wired down on a Mac68K box */
  57
  58#define MACE_BASE       (void *)(0x50F1C000)
  59#define MACE_PROM       (void *)(0x50F08001)
  60
  61struct mace_data {
  62        volatile struct mace *mace;
  63        unsigned char *tx_ring;
  64        dma_addr_t tx_ring_phys;
  65        unsigned char *rx_ring;
  66        dma_addr_t rx_ring_phys;
  67        int dma_intr;
  68        int rx_slot, rx_tail;
  69        int tx_slot, tx_sloti, tx_count;
  70        int chipid;
  71        struct device *device;
  72};
  73
  74struct mace_frame {
  75        u8      rcvcnt;
  76        u8      pad1;
  77        u8      rcvsts;
  78        u8      pad2;
  79        u8      rntpc;
  80        u8      pad3;
  81        u8      rcvcc;
  82        u8      pad4;
  83        u32     pad5;
  84        u32     pad6;
  85        u8      data[1];
  86        /* And frame continues.. */
  87};
  88
  89#define PRIV_BYTES      sizeof(struct mace_data)
  90
  91static int mace_open(struct net_device *dev);
  92static int mace_close(struct net_device *dev);
  93static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
  94static void mace_set_multicast(struct net_device *dev);
  95static int mace_set_address(struct net_device *dev, void *addr);
  96static void mace_reset(struct net_device *dev);
  97static irqreturn_t mace_interrupt(int irq, void *dev_id);
  98static irqreturn_t mace_dma_intr(int irq, void *dev_id);
  99static void mace_tx_timeout(struct net_device *dev);
 100static void __mace_set_address(struct net_device *dev, void *addr);
 101
 102/*
 103 * Load a receive DMA channel with a base address and ring length
 104 */
 105
 106static void mace_load_rxdma_base(struct net_device *dev, int set)
 107{
 108        struct mace_data *mp = netdev_priv(dev);
 109
 110        psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
 111        psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
 112        psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
 113        psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
 114        mp->rx_tail = 0;
 115}
 116
 117/*
 118 * Reset the receive DMA subsystem
 119 */
 120
 121static void mace_rxdma_reset(struct net_device *dev)
 122{
 123        struct mace_data *mp = netdev_priv(dev);
 124        volatile struct mace *mace = mp->mace;
 125        u8 maccc = mace->maccc;
 126
 127        mace->maccc = maccc & ~ENRCV;
 128
 129        psc_write_word(PSC_ENETRD_CTL, 0x8800);
 130        mace_load_rxdma_base(dev, 0x00);
 131        psc_write_word(PSC_ENETRD_CTL, 0x0400);
 132
 133        psc_write_word(PSC_ENETRD_CTL, 0x8800);
 134        mace_load_rxdma_base(dev, 0x10);
 135        psc_write_word(PSC_ENETRD_CTL, 0x0400);
 136
 137        mace->maccc = maccc;
 138        mp->rx_slot = 0;
 139
 140        psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
 141        psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
 142}
 143
 144/*
 145 * Reset the transmit DMA subsystem
 146 */
 147
 148static void mace_txdma_reset(struct net_device *dev)
 149{
 150        struct mace_data *mp = netdev_priv(dev);
 151        volatile struct mace *mace = mp->mace;
 152        u8 maccc;
 153
 154        psc_write_word(PSC_ENETWR_CTL, 0x8800);
 155
 156        maccc = mace->maccc;
 157        mace->maccc = maccc & ~ENXMT;
 158
 159        mp->tx_slot = mp->tx_sloti = 0;
 160        mp->tx_count = N_TX_RING;
 161
 162        psc_write_word(PSC_ENETWR_CTL, 0x0400);
 163        mace->maccc = maccc;
 164}
 165
 166/*
 167 * Disable DMA
 168 */
 169
 170static void mace_dma_off(struct net_device *dev)
 171{
 172        psc_write_word(PSC_ENETRD_CTL, 0x8800);
 173        psc_write_word(PSC_ENETRD_CTL, 0x1000);
 174        psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
 175        psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
 176
 177        psc_write_word(PSC_ENETWR_CTL, 0x8800);
 178        psc_write_word(PSC_ENETWR_CTL, 0x1000);
 179        psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
 180        psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
 181}
 182
 183static const struct net_device_ops mace_netdev_ops = {
 184        .ndo_open               = mace_open,
 185        .ndo_stop               = mace_close,
 186        .ndo_start_xmit         = mace_xmit_start,
 187        .ndo_tx_timeout         = mace_tx_timeout,
 188        .ndo_set_multicast_list = mace_set_multicast,
 189        .ndo_set_mac_address    = mace_set_address,
 190        .ndo_change_mtu         = eth_change_mtu,
 191        .ndo_validate_addr      = eth_validate_addr,
 192};
 193
 194/*
 195 * Not really much of a probe. The hardware table tells us if this
 196 * model of Macintrash has a MACE (AV macintoshes)
 197 */
 198
 199static int __devinit mace_probe(struct platform_device *pdev)
 200{
 201        int j;
 202        struct mace_data *mp;
 203        unsigned char *addr;
 204        struct net_device *dev;
 205        unsigned char checksum = 0;
 206        static int found = 0;
 207        int err;
 208
 209        if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
 210                return -ENODEV;
 211
 212        found = 1;      /* prevent 'finding' one on every device probe */
 213
 214        dev = alloc_etherdev(PRIV_BYTES);
 215        if (!dev)
 216                return -ENOMEM;
 217
 218        mp = netdev_priv(dev);
 219
 220        mp->device = &pdev->dev;
 221        SET_NETDEV_DEV(dev, &pdev->dev);
 222
 223        dev->base_addr = (u32)MACE_BASE;
 224        mp->mace = (volatile struct mace *) MACE_BASE;
 225
 226        dev->irq = IRQ_MAC_MACE;
 227        mp->dma_intr = IRQ_MAC_MACE_DMA;
 228
 229        mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
 230
 231        /*
 232         * The PROM contains 8 bytes which total 0xFF when XOR'd
 233         * together. Due to the usual peculiar apple brain damage
 234         * the bytes are spaced out in a strange boundary and the
 235         * bits are reversed.
 236         */
 237
 238        addr = (void *)MACE_PROM;
 239
 240        for (j = 0; j < 6; ++j) {
 241                u8 v = bitrev8(addr[j<<4]);
 242                checksum ^= v;
 243                dev->dev_addr[j] = v;
 244        }
 245        for (; j < 8; ++j) {
 246                checksum ^= bitrev8(addr[j<<4]);
 247        }
 248
 249        if (checksum != 0xFF) {
 250                free_netdev(dev);
 251                return -ENODEV;
 252        }
 253
 254        dev->netdev_ops         = &mace_netdev_ops;
 255        dev->watchdog_timeo     = TX_TIMEOUT;
 256
 257        printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
 258               dev->name, dev->dev_addr);
 259
 260        err = register_netdev(dev);
 261        if (!err)
 262                return 0;
 263
 264        free_netdev(dev);
 265        return err;
 266}
 267
 268/*
 269 * Reset the chip.
 270 */
 271
 272static void mace_reset(struct net_device *dev)
 273{
 274        struct mace_data *mp = netdev_priv(dev);
 275        volatile struct mace *mb = mp->mace;
 276        int i;
 277
 278        /* soft-reset the chip */
 279        i = 200;
 280        while (--i) {
 281                mb->biucc = SWRST;
 282                if (mb->biucc & SWRST) {
 283                        udelay(10);
 284                        continue;
 285                }
 286                break;
 287        }
 288        if (!i) {
 289                printk(KERN_ERR "macmace: cannot reset chip!\n");
 290                return;
 291        }
 292
 293        mb->maccc = 0;  /* turn off tx, rx */
 294        mb->imr = 0xFF; /* disable all intrs for now */
 295        i = mb->ir;
 296
 297        mb->biucc = XMTSP_64;
 298        mb->utr = RTRD;
 299        mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
 300
 301        mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
 302        mb->rcvfc = 0;
 303
 304        /* load up the hardware address */
 305        __mace_set_address(dev, dev->dev_addr);
 306
 307        /* clear the multicast filter */
 308        if (mp->chipid == BROKEN_ADDRCHG_REV)
 309                mb->iac = LOGADDR;
 310        else {
 311                mb->iac = ADDRCHG | LOGADDR;
 312                while ((mb->iac & ADDRCHG) != 0)
 313                        ;
 314        }
 315        for (i = 0; i < 8; ++i)
 316                mb->ladrf = 0;
 317
 318        /* done changing address */
 319        if (mp->chipid != BROKEN_ADDRCHG_REV)
 320                mb->iac = 0;
 321
 322        mb->plscc = PORTSEL_AUI;
 323}
 324
 325/*
 326 * Load the address on a mace controller.
 327 */
 328
 329static void __mace_set_address(struct net_device *dev, void *addr)
 330{
 331        struct mace_data *mp = netdev_priv(dev);
 332        volatile struct mace *mb = mp->mace;
 333        unsigned char *p = addr;
 334        int i;
 335
 336        /* load up the hardware address */
 337        if (mp->chipid == BROKEN_ADDRCHG_REV)
 338                mb->iac = PHYADDR;
 339        else {
 340                mb->iac = ADDRCHG | PHYADDR;
 341                while ((mb->iac & ADDRCHG) != 0)
 342                        ;
 343        }
 344        for (i = 0; i < 6; ++i)
 345                mb->padr = dev->dev_addr[i] = p[i];
 346        if (mp->chipid != BROKEN_ADDRCHG_REV)
 347                mb->iac = 0;
 348}
 349
 350static int mace_set_address(struct net_device *dev, void *addr)
 351{
 352        struct mace_data *mp = netdev_priv(dev);
 353        volatile struct mace *mb = mp->mace;
 354        unsigned long flags;
 355        u8 maccc;
 356
 357        local_irq_save(flags);
 358
 359        maccc = mb->maccc;
 360
 361        __mace_set_address(dev, addr);
 362
 363        mb->maccc = maccc;
 364
 365        local_irq_restore(flags);
 366
 367        return 0;
 368}
 369
 370/*
 371 * Open the Macintosh MACE. Most of this is playing with the DMA
 372 * engine. The ethernet chip is quite friendly.
 373 */
 374
 375static int mace_open(struct net_device *dev)
 376{
 377        struct mace_data *mp = netdev_priv(dev);
 378        volatile struct mace *mb = mp->mace;
 379
 380        /* reset the chip */
 381        mace_reset(dev);
 382
 383        if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
 384                printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
 385                return -EAGAIN;
 386        }
 387        if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
 388                printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
 389                free_irq(dev->irq, dev);
 390                return -EAGAIN;
 391        }
 392
 393        /* Allocate the DMA ring buffers */
 394
 395        mp->tx_ring = dma_alloc_coherent(mp->device,
 396                        N_TX_RING * MACE_BUFF_SIZE,
 397                        &mp->tx_ring_phys, GFP_KERNEL);
 398        if (mp->tx_ring == NULL) {
 399                printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
 400                goto out1;
 401        }
 402
 403        mp->rx_ring = dma_alloc_coherent(mp->device,
 404                        N_RX_RING * MACE_BUFF_SIZE,
 405                        &mp->rx_ring_phys, GFP_KERNEL);
 406        if (mp->rx_ring == NULL) {
 407                printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
 408                goto out2;
 409        }
 410
 411        mace_dma_off(dev);
 412
 413        /* Not sure what these do */
 414
 415        psc_write_word(PSC_ENETWR_CTL, 0x9000);
 416        psc_write_word(PSC_ENETRD_CTL, 0x9000);
 417        psc_write_word(PSC_ENETWR_CTL, 0x0400);
 418        psc_write_word(PSC_ENETRD_CTL, 0x0400);
 419
 420        mace_rxdma_reset(dev);
 421        mace_txdma_reset(dev);
 422
 423        /* turn it on! */
 424        mb->maccc = ENXMT | ENRCV;
 425        /* enable all interrupts except receive interrupts */
 426        mb->imr = RCVINT;
 427        return 0;
 428
 429out2:
 430        dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
 431                          mp->tx_ring, mp->tx_ring_phys);
 432out1:
 433        free_irq(dev->irq, dev);
 434        free_irq(mp->dma_intr, dev);
 435        return -ENOMEM;
 436}
 437
 438/*
 439 * Shut down the mace and its interrupt channel
 440 */
 441
 442static int mace_close(struct net_device *dev)
 443{
 444        struct mace_data *mp = netdev_priv(dev);
 445        volatile struct mace *mb = mp->mace;
 446
 447        mb->maccc = 0;          /* disable rx and tx     */
 448        mb->imr = 0xFF;         /* disable all irqs      */
 449        mace_dma_off(dev);      /* disable rx and tx dma */
 450
 451        return 0;
 452}
 453
 454/*
 455 * Transmit a frame
 456 */
 457
 458static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
 459{
 460        struct mace_data *mp = netdev_priv(dev);
 461        unsigned long flags;
 462
 463        /* Stop the queue since there's only the one buffer */
 464
 465        local_irq_save(flags);
 466        netif_stop_queue(dev);
 467        if (!mp->tx_count) {
 468                printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
 469                local_irq_restore(flags);
 470                return NETDEV_TX_BUSY;
 471        }
 472        mp->tx_count--;
 473        local_irq_restore(flags);
 474
 475        dev->stats.tx_packets++;
 476        dev->stats.tx_bytes += skb->len;
 477
 478        /* We need to copy into our xmit buffer to take care of alignment and caching issues */
 479        skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
 480
 481        /* load the Tx DMA and fire it off */
 482
 483        psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32)  mp->tx_ring_phys);
 484        psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
 485        psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
 486
 487        mp->tx_slot ^= 0x10;
 488
 489        dev_kfree_skb(skb);
 490
 491        dev->trans_start = jiffies;
 492        return NETDEV_TX_OK;
 493}
 494
 495static void mace_set_multicast(struct net_device *dev)
 496{
 497        struct mace_data *mp = netdev_priv(dev);
 498        volatile struct mace *mb = mp->mace;
 499        int i, j;
 500        u32 crc;
 501        u8 maccc;
 502        unsigned long flags;
 503
 504        local_irq_save(flags);
 505        maccc = mb->maccc;
 506        mb->maccc &= ~PROM;
 507
 508        if (dev->flags & IFF_PROMISC) {
 509                mb->maccc |= PROM;
 510        } else {
 511                unsigned char multicast_filter[8];
 512                struct dev_mc_list *dmi = dev->mc_list;
 513
 514                if (dev->flags & IFF_ALLMULTI) {
 515                        for (i = 0; i < 8; i++) {
 516                                multicast_filter[i] = 0xFF;
 517                        }
 518                } else {
 519                        for (i = 0; i < 8; i++)
 520                                multicast_filter[i] = 0;
 521                        for (i = 0; i < dev->mc_count; i++) {
 522                                crc = ether_crc_le(6, dmi->dmi_addr);
 523                                j = crc >> 26;  /* bit number in multicast_filter */
 524                                multicast_filter[j >> 3] |= 1 << (j & 7);
 525                                dmi = dmi->next;
 526                        }
 527                }
 528
 529                if (mp->chipid == BROKEN_ADDRCHG_REV)
 530                        mb->iac = LOGADDR;
 531                else {
 532                        mb->iac = ADDRCHG | LOGADDR;
 533                        while ((mb->iac & ADDRCHG) != 0)
 534                                ;
 535                }
 536                for (i = 0; i < 8; ++i)
 537                        mb->ladrf = multicast_filter[i];
 538                if (mp->chipid != BROKEN_ADDRCHG_REV)
 539                        mb->iac = 0;
 540        }
 541
 542        mb->maccc = maccc;
 543        local_irq_restore(flags);
 544}
 545
 546static void mace_handle_misc_intrs(struct net_device *dev, int intr)
 547{
 548        struct mace_data *mp = netdev_priv(dev);
 549        volatile struct mace *mb = mp->mace;
 550        static int mace_babbles, mace_jabbers;
 551
 552        if (intr & MPCO)
 553                dev->stats.rx_missed_errors += 256;
 554        dev->stats.rx_missed_errors += mb->mpc;   /* reading clears it */
 555        if (intr & RNTPCO)
 556                dev->stats.rx_length_errors += 256;
 557        dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
 558        if (intr & CERR)
 559                ++dev->stats.tx_heartbeat_errors;
 560        if (intr & BABBLE)
 561                if (mace_babbles++ < 4)
 562                        printk(KERN_DEBUG "macmace: babbling transmitter\n");
 563        if (intr & JABBER)
 564                if (mace_jabbers++ < 4)
 565                        printk(KERN_DEBUG "macmace: jabbering transceiver\n");
 566}
 567
 568static irqreturn_t mace_interrupt(int irq, void *dev_id)
 569{
 570        struct net_device *dev = (struct net_device *) dev_id;
 571        struct mace_data *mp = netdev_priv(dev);
 572        volatile struct mace *mb = mp->mace;
 573        int intr, fs;
 574        unsigned long flags;
 575
 576        /* don't want the dma interrupt handler to fire */
 577        local_irq_save(flags);
 578
 579        intr = mb->ir; /* read interrupt register */
 580        mace_handle_misc_intrs(dev, intr);
 581
 582        if (intr & XMTINT) {
 583                fs = mb->xmtfs;
 584                if ((fs & XMTSV) == 0) {
 585                        printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
 586                        mace_reset(dev);
 587                        /*
 588                         * XXX mace likes to hang the machine after a xmtfs error.
 589                         * This is hard to reproduce, reseting *may* help
 590                         */
 591                }
 592                /* dma should have finished */
 593                if (!mp->tx_count) {
 594                        printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
 595                }
 596                /* Update stats */
 597                if (fs & (UFLO|LCOL|LCAR|RTRY)) {
 598                        ++dev->stats.tx_errors;
 599                        if (fs & LCAR)
 600                                ++dev->stats.tx_carrier_errors;
 601                        else if (fs & (UFLO|LCOL|RTRY)) {
 602                                ++dev->stats.tx_aborted_errors;
 603                                if (mb->xmtfs & UFLO) {
 604                                        printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
 605                                        dev->stats.tx_fifo_errors++;
 606                                        mace_txdma_reset(dev);
 607                                }
 608                        }
 609                }
 610        }
 611
 612        if (mp->tx_count)
 613                netif_wake_queue(dev);
 614
 615        local_irq_restore(flags);
 616
 617        return IRQ_HANDLED;
 618}
 619
 620static void mace_tx_timeout(struct net_device *dev)
 621{
 622        struct mace_data *mp = netdev_priv(dev);
 623        volatile struct mace *mb = mp->mace;
 624        unsigned long flags;
 625
 626        local_irq_save(flags);
 627
 628        /* turn off both tx and rx and reset the chip */
 629        mb->maccc = 0;
 630        printk(KERN_ERR "macmace: transmit timeout - resetting\n");
 631        mace_txdma_reset(dev);
 632        mace_reset(dev);
 633
 634        /* restart rx dma */
 635        mace_rxdma_reset(dev);
 636
 637        mp->tx_count = N_TX_RING;
 638        netif_wake_queue(dev);
 639
 640        /* turn it on! */
 641        mb->maccc = ENXMT | ENRCV;
 642        /* enable all interrupts except receive interrupts */
 643        mb->imr = RCVINT;
 644
 645        local_irq_restore(flags);
 646}
 647
 648/*
 649 * Handle a newly arrived frame
 650 */
 651
 652static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
 653{
 654        struct sk_buff *skb;
 655        unsigned int frame_status = mf->rcvsts;
 656
 657        if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
 658                dev->stats.rx_errors++;
 659                if (frame_status & RS_OFLO) {
 660                        printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
 661                        dev->stats.rx_fifo_errors++;
 662                }
 663                if (frame_status & RS_CLSN)
 664                        dev->stats.collisions++;
 665                if (frame_status & RS_FRAMERR)
 666                        dev->stats.rx_frame_errors++;
 667                if (frame_status & RS_FCSERR)
 668                        dev->stats.rx_crc_errors++;
 669        } else {
 670                unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
 671
 672                skb = dev_alloc_skb(frame_length + 2);
 673                if (!skb) {
 674                        dev->stats.rx_dropped++;
 675                        return;
 676                }
 677                skb_reserve(skb, 2);
 678                memcpy(skb_put(skb, frame_length), mf->data, frame_length);
 679
 680                skb->protocol = eth_type_trans(skb, dev);
 681                netif_rx(skb);
 682                dev->stats.rx_packets++;
 683                dev->stats.rx_bytes += frame_length;
 684        }
 685}
 686
 687/*
 688 * The PSC has passed us a DMA interrupt event.
 689 */
 690
 691static irqreturn_t mace_dma_intr(int irq, void *dev_id)
 692{
 693        struct net_device *dev = (struct net_device *) dev_id;
 694        struct mace_data *mp = netdev_priv(dev);
 695        int left, head;
 696        u16 status;
 697        u32 baka;
 698
 699        /* Not sure what this does */
 700
 701        while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
 702        if (!(baka & 0x60000000)) return IRQ_NONE;
 703
 704        /*
 705         * Process the read queue
 706         */
 707
 708        status = psc_read_word(PSC_ENETRD_CTL);
 709
 710        if (status & 0x2000) {
 711                mace_rxdma_reset(dev);
 712        } else if (status & 0x0100) {
 713                psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
 714
 715                left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
 716                head = N_RX_RING - left;
 717
 718                /* Loop through the ring buffer and process new packages */
 719
 720                while (mp->rx_tail < head) {
 721                        mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
 722                                + (mp->rx_tail * MACE_BUFF_SIZE)));
 723                        mp->rx_tail++;
 724                }
 725
 726                /* If we're out of buffers in this ring then switch to */
 727                /* the other set, otherwise just reactivate this one.  */
 728
 729                if (!left) {
 730                        mace_load_rxdma_base(dev, mp->rx_slot);
 731                        mp->rx_slot ^= 0x10;
 732                } else {
 733                        psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
 734                }
 735        }
 736
 737        /*
 738         * Process the write queue
 739         */
 740
 741        status = psc_read_word(PSC_ENETWR_CTL);
 742
 743        if (status & 0x2000) {
 744                mace_txdma_reset(dev);
 745        } else if (status & 0x0100) {
 746                psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
 747                mp->tx_sloti ^= 0x10;
 748                mp->tx_count++;
 749        }
 750        return IRQ_HANDLED;
 751}
 752
 753MODULE_LICENSE("GPL");
 754MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
 755
 756static int __devexit mac_mace_device_remove (struct platform_device *pdev)
 757{
 758        struct net_device *dev = platform_get_drvdata(pdev);
 759        struct mace_data *mp = netdev_priv(dev);
 760
 761        unregister_netdev(dev);
 762
 763        free_irq(dev->irq, dev);
 764        free_irq(IRQ_MAC_MACE_DMA, dev);
 765
 766        dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
 767                          mp->rx_ring, mp->rx_ring_phys);
 768        dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
 769                          mp->tx_ring, mp->tx_ring_phys);
 770
 771        free_netdev(dev);
 772
 773        return 0;
 774}
 775
 776static struct platform_driver mac_mace_driver = {
 777        .probe  = mace_probe,
 778        .remove = __devexit_p(mac_mace_device_remove),
 779        .driver = {
 780                .name = mac_mace_string,
 781        },
 782};
 783
 784static int __init mac_mace_init_module(void)
 785{
 786        int err;
 787
 788        if (!MACH_IS_MAC)
 789                return -ENODEV;
 790
 791        if ((err = platform_driver_register(&mac_mace_driver))) {
 792                printk(KERN_ERR "Driver registration failed\n");
 793                return err;
 794        }
 795
 796        mac_mace_device = platform_device_alloc(mac_mace_string, 0);
 797        if (!mac_mace_device)
 798                goto out_unregister;
 799
 800        if (platform_device_add(mac_mace_device)) {
 801                platform_device_put(mac_mace_device);
 802                mac_mace_device = NULL;
 803        }
 804
 805        return 0;
 806
 807out_unregister:
 808        platform_driver_unregister(&mac_mace_driver);
 809
 810        return -ENOMEM;
 811}
 812
 813static void __exit mac_mace_cleanup_module(void)
 814{
 815        platform_driver_unregister(&mac_mace_driver);
 816
 817        if (mac_mace_device) {
 818                platform_device_unregister(mac_mace_device);
 819                mac_mace_device = NULL;
 820        }
 821}
 822
 823module_init(mac_mace_init_module);
 824module_exit(mac_mace_cleanup_module);
 825