linux/drivers/net/sunqe.c
<<
>>
Prefs
   1/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
   2 *          Once again I am out to prove that every ethernet
   3 *          controller out there can be most efficiently programmed
   4 *          if you make it look like a LANCE.
   5 *
   6 * Copyright (C) 1996, 1999, 2003, 2006 David S. Miller (davem@davemloft.net)
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/types.h>
  12#include <linux/errno.h>
  13#include <linux/fcntl.h>
  14#include <linux/interrupt.h>
  15#include <linux/ioport.h>
  16#include <linux/in.h>
  17#include <linux/slab.h>
  18#include <linux/string.h>
  19#include <linux/delay.h>
  20#include <linux/init.h>
  21#include <linux/crc32.h>
  22#include <linux/netdevice.h>
  23#include <linux/etherdevice.h>
  24#include <linux/skbuff.h>
  25#include <linux/ethtool.h>
  26#include <linux/bitops.h>
  27
  28#include <asm/system.h>
  29#include <asm/io.h>
  30#include <asm/dma.h>
  31#include <asm/byteorder.h>
  32#include <asm/idprom.h>
  33#include <asm/sbus.h>
  34#include <asm/openprom.h>
  35#include <asm/oplib.h>
  36#include <asm/auxio.h>
  37#include <asm/pgtable.h>
  38#include <asm/irq.h>
  39
  40#include "sunqe.h"
  41
  42#define DRV_NAME        "sunqe"
  43#define DRV_VERSION     "4.0"
  44#define DRV_RELDATE     "June 23, 2006"
  45#define DRV_AUTHOR      "David S. Miller (davem@davemloft.net)"
  46
  47static char version[] =
  48        DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
  49
  50MODULE_VERSION(DRV_VERSION);
  51MODULE_AUTHOR(DRV_AUTHOR);
  52MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
  53MODULE_LICENSE("GPL");
  54
  55static struct sunqec *root_qec_dev;
  56
  57static void qe_set_multicast(struct net_device *dev);
  58
  59#define QEC_RESET_TRIES 200
  60
  61static inline int qec_global_reset(void __iomem *gregs)
  62{
  63        int tries = QEC_RESET_TRIES;
  64
  65        sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
  66        while (--tries) {
  67                u32 tmp = sbus_readl(gregs + GLOB_CTRL);
  68                if (tmp & GLOB_CTRL_RESET) {
  69                        udelay(20);
  70                        continue;
  71                }
  72                break;
  73        }
  74        if (tries)
  75                return 0;
  76        printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
  77        return -1;
  78}
  79
  80#define MACE_RESET_RETRIES 200
  81#define QE_RESET_RETRIES   200
  82
  83static inline int qe_stop(struct sunqe *qep)
  84{
  85        void __iomem *cregs = qep->qcregs;
  86        void __iomem *mregs = qep->mregs;
  87        int tries;
  88
  89        /* Reset the MACE, then the QEC channel. */
  90        sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
  91        tries = MACE_RESET_RETRIES;
  92        while (--tries) {
  93                u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
  94                if (tmp & MREGS_BCONFIG_RESET) {
  95                        udelay(20);
  96                        continue;
  97                }
  98                break;
  99        }
 100        if (!tries) {
 101                printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
 102                return -1;
 103        }
 104
 105        sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
 106        tries = QE_RESET_RETRIES;
 107        while (--tries) {
 108                u32 tmp = sbus_readl(cregs + CREG_CTRL);
 109                if (tmp & CREG_CTRL_RESET) {
 110                        udelay(20);
 111                        continue;
 112                }
 113                break;
 114        }
 115        if (!tries) {
 116                printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
 117                return -1;
 118        }
 119        return 0;
 120}
 121
 122static void qe_init_rings(struct sunqe *qep)
 123{
 124        struct qe_init_block *qb = qep->qe_block;
 125        struct sunqe_buffers *qbufs = qep->buffers;
 126        __u32 qbufs_dvma = qep->buffers_dvma;
 127        int i;
 128
 129        qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
 130        memset(qb, 0, sizeof(struct qe_init_block));
 131        memset(qbufs, 0, sizeof(struct sunqe_buffers));
 132        for (i = 0; i < RX_RING_SIZE; i++) {
 133                qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
 134                qb->qe_rxd[i].rx_flags =
 135                        (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 136        }
 137}
 138
 139static int qe_init(struct sunqe *qep, int from_irq)
 140{
 141        struct sunqec *qecp = qep->parent;
 142        void __iomem *cregs = qep->qcregs;
 143        void __iomem *mregs = qep->mregs;
 144        void __iomem *gregs = qecp->gregs;
 145        unsigned char *e = &qep->dev->dev_addr[0];
 146        u32 tmp;
 147        int i;
 148
 149        /* Shut it up. */
 150        if (qe_stop(qep))
 151                return -EAGAIN;
 152
 153        /* Setup initial rx/tx init block pointers. */
 154        sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
 155        sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
 156
 157        /* Enable/mask the various irq's. */
 158        sbus_writel(0, cregs + CREG_RIMASK);
 159        sbus_writel(1, cregs + CREG_TIMASK);
 160
 161        sbus_writel(0, cregs + CREG_QMASK);
 162        sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
 163
 164        /* Setup the FIFO pointers into QEC local memory. */
 165        tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
 166        sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
 167        sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
 168
 169        tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
 170                sbus_readl(gregs + GLOB_RSIZE);
 171        sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
 172        sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
 173
 174        /* Clear the channel collision counter. */
 175        sbus_writel(0, cregs + CREG_CCNT);
 176
 177        /* For 10baseT, inter frame space nor throttle seems to be necessary. */
 178        sbus_writel(0, cregs + CREG_PIPG);
 179
 180        /* Now dork with the AMD MACE. */
 181        sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
 182        sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
 183        sbus_writeb(0, mregs + MREGS_RXFCNTL);
 184
 185        /* The QEC dma's the rx'd packets from local memory out to main memory,
 186         * and therefore it interrupts when the packet reception is "complete".
 187         * So don't listen for the MACE talking about it.
 188         */
 189        sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
 190        sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
 191        sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
 192                     MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
 193                    mregs + MREGS_FCONFIG);
 194
 195        /* Only usable interface on QuadEther is twisted pair. */
 196        sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
 197
 198        /* Tell MACE we are changing the ether address. */
 199        sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
 200                    mregs + MREGS_IACONFIG);
 201        while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 202                barrier();
 203        sbus_writeb(e[0], mregs + MREGS_ETHADDR);
 204        sbus_writeb(e[1], mregs + MREGS_ETHADDR);
 205        sbus_writeb(e[2], mregs + MREGS_ETHADDR);
 206        sbus_writeb(e[3], mregs + MREGS_ETHADDR);
 207        sbus_writeb(e[4], mregs + MREGS_ETHADDR);
 208        sbus_writeb(e[5], mregs + MREGS_ETHADDR);
 209
 210        /* Clear out the address filter. */
 211        sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 212                    mregs + MREGS_IACONFIG);
 213        while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 214                barrier();
 215        for (i = 0; i < 8; i++)
 216                sbus_writeb(0, mregs + MREGS_FILTER);
 217
 218        /* Address changes are now complete. */
 219        sbus_writeb(0, mregs + MREGS_IACONFIG);
 220
 221        qe_init_rings(qep);
 222
 223        /* Wait a little bit for the link to come up... */
 224        mdelay(5);
 225        if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
 226                int tries = 50;
 227
 228                while (tries--) {
 229                        u8 tmp;
 230
 231                        mdelay(5);
 232                        barrier();
 233                        tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
 234                        if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
 235                                break;
 236                }
 237                if (tries == 0)
 238                        printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
 239        }
 240
 241        /* Missed packet counter is cleared on a read. */
 242        sbus_readb(mregs + MREGS_MPCNT);
 243
 244        /* Reload multicast information, this will enable the receiver
 245         * and transmitter.
 246         */
 247        qe_set_multicast(qep->dev);
 248
 249        /* QEC should now start to show interrupts. */
 250        return 0;
 251}
 252
 253/* Grrr, certain error conditions completely lock up the AMD MACE,
 254 * so when we get these we _must_ reset the chip.
 255 */
 256static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
 257{
 258        struct net_device *dev = qep->dev;
 259        int mace_hwbug_workaround = 0;
 260
 261        if (qe_status & CREG_STAT_EDEFER) {
 262                printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
 263                dev->stats.tx_errors++;
 264        }
 265
 266        if (qe_status & CREG_STAT_CLOSS) {
 267                printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
 268                dev->stats.tx_errors++;
 269                dev->stats.tx_carrier_errors++;
 270        }
 271
 272        if (qe_status & CREG_STAT_ERETRIES) {
 273                printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
 274                dev->stats.tx_errors++;
 275                mace_hwbug_workaround = 1;
 276        }
 277
 278        if (qe_status & CREG_STAT_LCOLL) {
 279                printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
 280                dev->stats.tx_errors++;
 281                dev->stats.collisions++;
 282                mace_hwbug_workaround = 1;
 283        }
 284
 285        if (qe_status & CREG_STAT_FUFLOW) {
 286                printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
 287                dev->stats.tx_errors++;
 288                mace_hwbug_workaround = 1;
 289        }
 290
 291        if (qe_status & CREG_STAT_JERROR) {
 292                printk(KERN_ERR "%s: Jabber error.\n", dev->name);
 293        }
 294
 295        if (qe_status & CREG_STAT_BERROR) {
 296                printk(KERN_ERR "%s: Babble error.\n", dev->name);
 297        }
 298
 299        if (qe_status & CREG_STAT_CCOFLOW) {
 300                dev->stats.tx_errors += 256;
 301                dev->stats.collisions += 256;
 302        }
 303
 304        if (qe_status & CREG_STAT_TXDERROR) {
 305                printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
 306                dev->stats.tx_errors++;
 307                dev->stats.tx_aborted_errors++;
 308                mace_hwbug_workaround = 1;
 309        }
 310
 311        if (qe_status & CREG_STAT_TXLERR) {
 312                printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
 313                dev->stats.tx_errors++;
 314                mace_hwbug_workaround = 1;
 315        }
 316
 317        if (qe_status & CREG_STAT_TXPERR) {
 318                printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
 319                dev->stats.tx_errors++;
 320                dev->stats.tx_aborted_errors++;
 321                mace_hwbug_workaround = 1;
 322        }
 323
 324        if (qe_status & CREG_STAT_TXSERR) {
 325                printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
 326                dev->stats.tx_errors++;
 327                dev->stats.tx_aborted_errors++;
 328                mace_hwbug_workaround = 1;
 329        }
 330
 331        if (qe_status & CREG_STAT_RCCOFLOW) {
 332                dev->stats.rx_errors += 256;
 333                dev->stats.collisions += 256;
 334        }
 335
 336        if (qe_status & CREG_STAT_RUOFLOW) {
 337                dev->stats.rx_errors += 256;
 338                dev->stats.rx_over_errors += 256;
 339        }
 340
 341        if (qe_status & CREG_STAT_MCOFLOW) {
 342                dev->stats.rx_errors += 256;
 343                dev->stats.rx_missed_errors += 256;
 344        }
 345
 346        if (qe_status & CREG_STAT_RXFOFLOW) {
 347                printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
 348                dev->stats.rx_errors++;
 349                dev->stats.rx_over_errors++;
 350        }
 351
 352        if (qe_status & CREG_STAT_RLCOLL) {
 353                printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
 354                dev->stats.rx_errors++;
 355                dev->stats.collisions++;
 356        }
 357
 358        if (qe_status & CREG_STAT_FCOFLOW) {
 359                dev->stats.rx_errors += 256;
 360                dev->stats.rx_frame_errors += 256;
 361        }
 362
 363        if (qe_status & CREG_STAT_CECOFLOW) {
 364                dev->stats.rx_errors += 256;
 365                dev->stats.rx_crc_errors += 256;
 366        }
 367
 368        if (qe_status & CREG_STAT_RXDROP) {
 369                printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
 370                dev->stats.rx_errors++;
 371                dev->stats.rx_dropped++;
 372                dev->stats.rx_missed_errors++;
 373        }
 374
 375        if (qe_status & CREG_STAT_RXSMALL) {
 376                printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
 377                dev->stats.rx_errors++;
 378                dev->stats.rx_length_errors++;
 379        }
 380
 381        if (qe_status & CREG_STAT_RXLERR) {
 382                printk(KERN_ERR "%s: Receive late error.\n", dev->name);
 383                dev->stats.rx_errors++;
 384                mace_hwbug_workaround = 1;
 385        }
 386
 387        if (qe_status & CREG_STAT_RXPERR) {
 388                printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
 389                dev->stats.rx_errors++;
 390                dev->stats.rx_missed_errors++;
 391                mace_hwbug_workaround = 1;
 392        }
 393
 394        if (qe_status & CREG_STAT_RXSERR) {
 395                printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
 396                dev->stats.rx_errors++;
 397                dev->stats.rx_missed_errors++;
 398                mace_hwbug_workaround = 1;
 399        }
 400
 401        if (mace_hwbug_workaround)
 402                qe_init(qep, 1);
 403        return mace_hwbug_workaround;
 404}
 405
 406/* Per-QE receive interrupt service routine.  Just like on the happy meal
 407 * we receive directly into skb's with a small packet copy water mark.
 408 */
 409static void qe_rx(struct sunqe *qep)
 410{
 411        struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
 412        struct net_device *dev = qep->dev;
 413        struct qe_rxd *this;
 414        struct sunqe_buffers *qbufs = qep->buffers;
 415        __u32 qbufs_dvma = qep->buffers_dvma;
 416        int elem = qep->rx_new, drops = 0;
 417        u32 flags;
 418
 419        this = &rxbase[elem];
 420        while (!((flags = this->rx_flags) & RXD_OWN)) {
 421                struct sk_buff *skb;
 422                unsigned char *this_qbuf =
 423                        &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
 424                __u32 this_qbuf_dvma = qbufs_dvma +
 425                        qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
 426                struct qe_rxd *end_rxd =
 427                        &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
 428                int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
 429
 430                /* Check for errors. */
 431                if (len < ETH_ZLEN) {
 432                        dev->stats.rx_errors++;
 433                        dev->stats.rx_length_errors++;
 434                        dev->stats.rx_dropped++;
 435                } else {
 436                        skb = dev_alloc_skb(len + 2);
 437                        if (skb == NULL) {
 438                                drops++;
 439                                dev->stats.rx_dropped++;
 440                        } else {
 441                                skb_reserve(skb, 2);
 442                                skb_put(skb, len);
 443                                skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
 444                                                 len);
 445                                skb->protocol = eth_type_trans(skb, qep->dev);
 446                                netif_rx(skb);
 447                                qep->dev->last_rx = jiffies;
 448                                dev->stats.rx_packets++;
 449                                dev->stats.rx_bytes += len;
 450                        }
 451                }
 452                end_rxd->rx_addr = this_qbuf_dvma;
 453                end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 454
 455                elem = NEXT_RX(elem);
 456                this = &rxbase[elem];
 457        }
 458        qep->rx_new = elem;
 459        if (drops)
 460                printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
 461}
 462
 463static void qe_tx_reclaim(struct sunqe *qep);
 464
 465/* Interrupts for all QE's get filtered out via the QEC master controller,
 466 * so we just run through each qe and check to see who is signaling
 467 * and thus needs to be serviced.
 468 */
 469static irqreturn_t qec_interrupt(int irq, void *dev_id)
 470{
 471        struct sunqec *qecp = dev_id;
 472        u32 qec_status;
 473        int channel = 0;
 474
 475        /* Latch the status now. */
 476        qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
 477        while (channel < 4) {
 478                if (qec_status & 0xf) {
 479                        struct sunqe *qep = qecp->qes[channel];
 480                        u32 qe_status;
 481
 482                        qe_status = sbus_readl(qep->qcregs + CREG_STAT);
 483                        if (qe_status & CREG_STAT_ERRORS) {
 484                                if (qe_is_bolixed(qep, qe_status))
 485                                        goto next;
 486                        }
 487                        if (qe_status & CREG_STAT_RXIRQ)
 488                                qe_rx(qep);
 489                        if (netif_queue_stopped(qep->dev) &&
 490                            (qe_status & CREG_STAT_TXIRQ)) {
 491                                spin_lock(&qep->lock);
 492                                qe_tx_reclaim(qep);
 493                                if (TX_BUFFS_AVAIL(qep) > 0) {
 494                                        /* Wake net queue and return to
 495                                         * lazy tx reclaim.
 496                                         */
 497                                        netif_wake_queue(qep->dev);
 498                                        sbus_writel(1, qep->qcregs + CREG_TIMASK);
 499                                }
 500                                spin_unlock(&qep->lock);
 501                        }
 502        next:
 503                        ;
 504                }
 505                qec_status >>= 4;
 506                channel++;
 507        }
 508
 509        return IRQ_HANDLED;
 510}
 511
 512static int qe_open(struct net_device *dev)
 513{
 514        struct sunqe *qep = (struct sunqe *) dev->priv;
 515
 516        qep->mconfig = (MREGS_MCONFIG_TXENAB |
 517                        MREGS_MCONFIG_RXENAB |
 518                        MREGS_MCONFIG_MBAENAB);
 519        return qe_init(qep, 0);
 520}
 521
 522static int qe_close(struct net_device *dev)
 523{
 524        struct sunqe *qep = (struct sunqe *) dev->priv;
 525
 526        qe_stop(qep);
 527        return 0;
 528}
 529
 530/* Reclaim TX'd frames from the ring.  This must always run under
 531 * the IRQ protected qep->lock.
 532 */
 533static void qe_tx_reclaim(struct sunqe *qep)
 534{
 535        struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
 536        int elem = qep->tx_old;
 537
 538        while (elem != qep->tx_new) {
 539                u32 flags = txbase[elem].tx_flags;
 540
 541                if (flags & TXD_OWN)
 542                        break;
 543                elem = NEXT_TX(elem);
 544        }
 545        qep->tx_old = elem;
 546}
 547
 548static void qe_tx_timeout(struct net_device *dev)
 549{
 550        struct sunqe *qep = (struct sunqe *) dev->priv;
 551        int tx_full;
 552
 553        spin_lock_irq(&qep->lock);
 554
 555        /* Try to reclaim, if that frees up some tx
 556         * entries, we're fine.
 557         */
 558        qe_tx_reclaim(qep);
 559        tx_full = TX_BUFFS_AVAIL(qep) <= 0;
 560
 561        spin_unlock_irq(&qep->lock);
 562
 563        if (! tx_full)
 564                goto out;
 565
 566        printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
 567        qe_init(qep, 1);
 568
 569out:
 570        netif_wake_queue(dev);
 571}
 572
 573/* Get a packet queued to go onto the wire. */
 574static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
 575{
 576        struct sunqe *qep = (struct sunqe *) dev->priv;
 577        struct sunqe_buffers *qbufs = qep->buffers;
 578        __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
 579        unsigned char *txbuf;
 580        int len, entry;
 581
 582        spin_lock_irq(&qep->lock);
 583
 584        qe_tx_reclaim(qep);
 585
 586        len = skb->len;
 587        entry = qep->tx_new;
 588
 589        txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
 590        txbuf_dvma = qbufs_dvma +
 591                qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
 592
 593        /* Avoid a race... */
 594        qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
 595
 596        skb_copy_from_linear_data(skb, txbuf, len);
 597
 598        qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
 599        qep->qe_block->qe_txd[entry].tx_flags =
 600                (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
 601        qep->tx_new = NEXT_TX(entry);
 602
 603        /* Get it going. */
 604        dev->trans_start = jiffies;
 605        sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
 606
 607        dev->stats.tx_packets++;
 608        dev->stats.tx_bytes += len;
 609
 610        if (TX_BUFFS_AVAIL(qep) <= 0) {
 611                /* Halt the net queue and enable tx interrupts.
 612                 * When the tx queue empties the tx irq handler
 613                 * will wake up the queue and return us back to
 614                 * the lazy tx reclaim scheme.
 615                 */
 616                netif_stop_queue(dev);
 617                sbus_writel(0, qep->qcregs + CREG_TIMASK);
 618        }
 619        spin_unlock_irq(&qep->lock);
 620
 621        dev_kfree_skb(skb);
 622
 623        return 0;
 624}
 625
 626static void qe_set_multicast(struct net_device *dev)
 627{
 628        struct sunqe *qep = (struct sunqe *) dev->priv;
 629        struct dev_mc_list *dmi = dev->mc_list;
 630        u8 new_mconfig = qep->mconfig;
 631        char *addrs;
 632        int i;
 633        u32 crc;
 634
 635        /* Lock out others. */
 636        netif_stop_queue(dev);
 637
 638        if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
 639                sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 640                            qep->mregs + MREGS_IACONFIG);
 641                while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 642                        barrier();
 643                for (i = 0; i < 8; i++)
 644                        sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
 645                sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
 646        } else if (dev->flags & IFF_PROMISC) {
 647                new_mconfig |= MREGS_MCONFIG_PROMISC;
 648        } else {
 649                u16 hash_table[4];
 650                u8 *hbytes = (unsigned char *) &hash_table[0];
 651
 652                for (i = 0; i < 4; i++)
 653                        hash_table[i] = 0;
 654
 655                for (i = 0; i < dev->mc_count; i++) {
 656                        addrs = dmi->dmi_addr;
 657                        dmi = dmi->next;
 658
 659                        if (!(*addrs & 1))
 660                                continue;
 661                        crc = ether_crc_le(6, addrs);
 662                        crc >>= 26;
 663                        hash_table[crc >> 4] |= 1 << (crc & 0xf);
 664                }
 665                /* Program the qe with the new filter value. */
 666                sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 667                            qep->mregs + MREGS_IACONFIG);
 668                while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 669                        barrier();
 670                for (i = 0; i < 8; i++) {
 671                        u8 tmp = *hbytes++;
 672                        sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
 673                }
 674                sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
 675        }
 676
 677        /* Any change of the logical address filter, the physical address,
 678         * or enabling/disabling promiscuous mode causes the MACE to disable
 679         * the receiver.  So we must re-enable them here or else the MACE
 680         * refuses to listen to anything on the network.  Sheesh, took
 681         * me a day or two to find this bug.
 682         */
 683        qep->mconfig = new_mconfig;
 684        sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
 685
 686        /* Let us get going again. */
 687        netif_wake_queue(dev);
 688}
 689
 690/* Ethtool support... */
 691static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 692{
 693        struct sunqe *qep = dev->priv;
 694
 695        strcpy(info->driver, "sunqe");
 696        strcpy(info->version, "3.0");
 697        sprintf(info->bus_info, "SBUS:%d",
 698                qep->qe_sdev->slot);
 699}
 700
 701static u32 qe_get_link(struct net_device *dev)
 702{
 703        struct sunqe *qep = dev->priv;
 704        void __iomem *mregs = qep->mregs;
 705        u8 phyconfig;
 706
 707        spin_lock_irq(&qep->lock);
 708        phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
 709        spin_unlock_irq(&qep->lock);
 710
 711        return (phyconfig & MREGS_PHYCONFIG_LSTAT);
 712}
 713
 714static const struct ethtool_ops qe_ethtool_ops = {
 715        .get_drvinfo            = qe_get_drvinfo,
 716        .get_link               = qe_get_link,
 717};
 718
 719/* This is only called once at boot time for each card probed. */
 720static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev)
 721{
 722        u8 bsizes = qecp->qec_bursts;
 723
 724        if (sbus_can_burst64(qsdev) && (bsizes & DMA_BURST64)) {
 725                sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
 726        } else if (bsizes & DMA_BURST32) {
 727                sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
 728        } else {
 729                sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
 730        }
 731
 732        /* Packetsize only used in 100baseT BigMAC configurations,
 733         * set it to zero just to be on the safe side.
 734         */
 735        sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
 736
 737        /* Set the local memsize register, divided up to one piece per QE channel. */
 738        sbus_writel((qsdev->reg_addrs[1].reg_size >> 2),
 739                    qecp->gregs + GLOB_MSIZE);
 740
 741        /* Divide up the local QEC memory amongst the 4 QE receiver and
 742         * transmitter FIFOs.  Basically it is (total / 2 / num_channels).
 743         */
 744        sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
 745                    qecp->gregs + GLOB_TSIZE);
 746        sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
 747                    qecp->gregs + GLOB_RSIZE);
 748}
 749
 750static u8 __init qec_get_burst(struct device_node *dp)
 751{
 752        u8 bsizes, bsizes_more;
 753
 754        /* Find and set the burst sizes for the QEC, since it
 755         * does the actual dma for all 4 channels.
 756         */
 757        bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
 758        bsizes &= 0xff;
 759        bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
 760
 761        if (bsizes_more != 0xff)
 762                bsizes &= bsizes_more;
 763        if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
 764            (bsizes & DMA_BURST32)==0)
 765                bsizes = (DMA_BURST32 - 1);
 766
 767        return bsizes;
 768}
 769
 770static struct sunqec * __init get_qec(struct sbus_dev *child_sdev)
 771{
 772        struct sbus_dev *qec_sdev = child_sdev->parent;
 773        struct sunqec *qecp;
 774
 775        for (qecp = root_qec_dev; qecp; qecp = qecp->next_module) {
 776                if (qecp->qec_sdev == qec_sdev)
 777                        break;
 778        }
 779        if (!qecp) {
 780                qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
 781                if (qecp) {
 782                        u32 ctrl;
 783
 784                        qecp->qec_sdev = qec_sdev;
 785                        qecp->gregs = sbus_ioremap(&qec_sdev->resource[0], 0,
 786                                                   GLOB_REG_SIZE,
 787                                                   "QEC Global Registers");
 788                        if (!qecp->gregs)
 789                                goto fail;
 790
 791                        /* Make sure the QEC is in MACE mode. */
 792                        ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
 793                        ctrl &= 0xf0000000;
 794                        if (ctrl != GLOB_CTRL_MMODE) {
 795                                printk(KERN_ERR "qec: Not in MACE mode!\n");
 796                                goto fail;
 797                        }
 798
 799                        if (qec_global_reset(qecp->gregs))
 800                                goto fail;
 801
 802                        qecp->qec_bursts = qec_get_burst(qec_sdev->ofdev.node);
 803
 804                        qec_init_once(qecp, qec_sdev);
 805
 806                        if (request_irq(qec_sdev->irqs[0], &qec_interrupt,
 807                                        IRQF_SHARED, "qec", (void *) qecp)) {
 808                                printk(KERN_ERR "qec: Can't register irq.\n");
 809                                goto fail;
 810                        }
 811
 812                        qecp->next_module = root_qec_dev;
 813                        root_qec_dev = qecp;
 814                }
 815        }
 816
 817        return qecp;
 818
 819fail:
 820        if (qecp->gregs)
 821                sbus_iounmap(qecp->gregs, GLOB_REG_SIZE);
 822        kfree(qecp);
 823        return NULL;
 824}
 825
 826static int __init qec_ether_init(struct sbus_dev *sdev)
 827{
 828        static unsigned version_printed;
 829        struct net_device *dev;
 830        struct sunqe *qe;
 831        struct sunqec *qecp;
 832        int i, res;
 833
 834        if (version_printed++ == 0)
 835                printk(KERN_INFO "%s", version);
 836
 837        dev = alloc_etherdev(sizeof(struct sunqe));
 838        if (!dev)
 839                return -ENOMEM;
 840
 841        memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
 842
 843        qe = netdev_priv(dev);
 844
 845        i = of_getintprop_default(sdev->ofdev.node, "channel#", -1);
 846        if (i == -1) {
 847                struct sbus_dev *td = sdev->parent->child;
 848                i = 0;
 849                while (td != sdev) {
 850                        td = td->next;
 851                        i++;
 852                }
 853        }
 854        qe->channel = i;
 855        spin_lock_init(&qe->lock);
 856
 857        res = -ENODEV;
 858        qecp = get_qec(sdev);
 859        if (!qecp)
 860                goto fail;
 861
 862        qecp->qes[qe->channel] = qe;
 863        qe->dev = dev;
 864        qe->parent = qecp;
 865        qe->qe_sdev = sdev;
 866
 867        res = -ENOMEM;
 868        qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0,
 869                                  CREG_REG_SIZE, "QEC Channel Registers");
 870        if (!qe->qcregs) {
 871                printk(KERN_ERR "qe: Cannot map channel registers.\n");
 872                goto fail;
 873        }
 874
 875        qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0,
 876                                 MREGS_REG_SIZE, "QE MACE Registers");
 877        if (!qe->mregs) {
 878                printk(KERN_ERR "qe: Cannot map MACE registers.\n");
 879                goto fail;
 880        }
 881
 882        qe->qe_block = sbus_alloc_consistent(qe->qe_sdev,
 883                                             PAGE_SIZE,
 884                                             &qe->qblock_dvma);
 885        qe->buffers = sbus_alloc_consistent(qe->qe_sdev,
 886                                            sizeof(struct sunqe_buffers),
 887                                            &qe->buffers_dvma);
 888        if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
 889            qe->buffers == NULL || qe->buffers_dvma == 0)
 890                goto fail;
 891
 892        /* Stop this QE. */
 893        qe_stop(qe);
 894
 895        SET_NETDEV_DEV(dev, &sdev->ofdev.dev);
 896
 897        dev->open = qe_open;
 898        dev->stop = qe_close;
 899        dev->hard_start_xmit = qe_start_xmit;
 900        dev->set_multicast_list = qe_set_multicast;
 901        dev->tx_timeout = qe_tx_timeout;
 902        dev->watchdog_timeo = 5*HZ;
 903        dev->irq = sdev->irqs[0];
 904        dev->dma = 0;
 905        dev->ethtool_ops = &qe_ethtool_ops;
 906
 907        res = register_netdev(dev);
 908        if (res)
 909                goto fail;
 910
 911        dev_set_drvdata(&sdev->ofdev.dev, qe);
 912
 913        printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
 914        for (i = 0; i < 6; i++)
 915                printk ("%2.2x%c",
 916                        dev->dev_addr[i],
 917                        i == 5 ? ' ': ':');
 918        printk("\n");
 919
 920
 921        return 0;
 922
 923fail:
 924        if (qe->qcregs)
 925                sbus_iounmap(qe->qcregs, CREG_REG_SIZE);
 926        if (qe->mregs)
 927                sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
 928        if (qe->qe_block)
 929                sbus_free_consistent(qe->qe_sdev,
 930                                     PAGE_SIZE,
 931                                     qe->qe_block,
 932                                     qe->qblock_dvma);
 933        if (qe->buffers)
 934                sbus_free_consistent(qe->qe_sdev,
 935                                     sizeof(struct sunqe_buffers),
 936                                     qe->buffers,
 937                                     qe->buffers_dvma);
 938
 939        free_netdev(dev);
 940
 941        return res;
 942}
 943
 944static int __devinit qec_sbus_probe(struct of_device *dev, const struct of_device_id *match)
 945{
 946        struct sbus_dev *sdev = to_sbus_device(&dev->dev);
 947
 948        return qec_ether_init(sdev);
 949}
 950
 951static int __devexit qec_sbus_remove(struct of_device *dev)
 952{
 953        struct sunqe *qp = dev_get_drvdata(&dev->dev);
 954        struct net_device *net_dev = qp->dev;
 955
 956        unregister_netdev(net_dev);
 957
 958        sbus_iounmap(qp->qcregs, CREG_REG_SIZE);
 959        sbus_iounmap(qp->mregs, MREGS_REG_SIZE);
 960        sbus_free_consistent(qp->qe_sdev,
 961                             PAGE_SIZE,
 962                             qp->qe_block,
 963                             qp->qblock_dvma);
 964        sbus_free_consistent(qp->qe_sdev,
 965                             sizeof(struct sunqe_buffers),
 966                             qp->buffers,
 967                             qp->buffers_dvma);
 968
 969        free_netdev(net_dev);
 970
 971        dev_set_drvdata(&dev->dev, NULL);
 972
 973        return 0;
 974}
 975
 976static struct of_device_id qec_sbus_match[] = {
 977        {
 978                .name = "qe",
 979        },
 980        {},
 981};
 982
 983MODULE_DEVICE_TABLE(of, qec_sbus_match);
 984
 985static struct of_platform_driver qec_sbus_driver = {
 986        .name           = "qec",
 987        .match_table    = qec_sbus_match,
 988        .probe          = qec_sbus_probe,
 989        .remove         = __devexit_p(qec_sbus_remove),
 990};
 991
 992static int __init qec_init(void)
 993{
 994        return of_register_driver(&qec_sbus_driver, &sbus_bus_type);
 995}
 996
 997static void __exit qec_exit(void)
 998{
 999        of_unregister_driver(&qec_sbus_driver);
1000
1001        while (root_qec_dev) {
1002                struct sunqec *next = root_qec_dev->next_module;
1003
1004                free_irq(root_qec_dev->qec_sdev->irqs[0],
1005                         (void *) root_qec_dev);
1006                sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE);
1007
1008                kfree(root_qec_dev);
1009
1010                root_qec_dev = next;
1011        }
1012}
1013
1014module_init(qec_init);
1015module_exit(qec_exit);
1016