linux/drivers/net/sunqe.c
<<
>>
Prefs
   1/* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
   2 *          Once again I am out to prove that every ethernet
   3 *          controller out there can be most efficiently programmed
   4 *          if you make it look like a LANCE.
   5 *
   6 * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/kernel.h>
  11#include <linux/types.h>
  12#include <linux/errno.h>
  13#include <linux/fcntl.h>
  14#include <linux/interrupt.h>
  15#include <linux/ioport.h>
  16#include <linux/in.h>
  17#include <linux/slab.h>
  18#include <linux/string.h>
  19#include <linux/delay.h>
  20#include <linux/init.h>
  21#include <linux/crc32.h>
  22#include <linux/netdevice.h>
  23#include <linux/etherdevice.h>
  24#include <linux/skbuff.h>
  25#include <linux/ethtool.h>
  26#include <linux/bitops.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/of.h>
  29#include <linux/of_device.h>
  30
  31#include <asm/system.h>
  32#include <asm/io.h>
  33#include <asm/dma.h>
  34#include <asm/byteorder.h>
  35#include <asm/idprom.h>
  36#include <asm/openprom.h>
  37#include <asm/oplib.h>
  38#include <asm/auxio.h>
  39#include <asm/pgtable.h>
  40#include <asm/irq.h>
  41
  42#include "sunqe.h"
  43
  44#define DRV_NAME        "sunqe"
  45#define DRV_VERSION     "4.1"
  46#define DRV_RELDATE     "August 27, 2008"
  47#define DRV_AUTHOR      "David S. Miller (davem@davemloft.net)"
  48
  49static char version[] =
  50        DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
  51
  52MODULE_VERSION(DRV_VERSION);
  53MODULE_AUTHOR(DRV_AUTHOR);
  54MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
  55MODULE_LICENSE("GPL");
  56
  57static struct sunqec *root_qec_dev;
  58
  59static void qe_set_multicast(struct net_device *dev);
  60
  61#define QEC_RESET_TRIES 200
  62
  63static inline int qec_global_reset(void __iomem *gregs)
  64{
  65        int tries = QEC_RESET_TRIES;
  66
  67        sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
  68        while (--tries) {
  69                u32 tmp = sbus_readl(gregs + GLOB_CTRL);
  70                if (tmp & GLOB_CTRL_RESET) {
  71                        udelay(20);
  72                        continue;
  73                }
  74                break;
  75        }
  76        if (tries)
  77                return 0;
  78        printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
  79        return -1;
  80}
  81
  82#define MACE_RESET_RETRIES 200
  83#define QE_RESET_RETRIES   200
  84
  85static inline int qe_stop(struct sunqe *qep)
  86{
  87        void __iomem *cregs = qep->qcregs;
  88        void __iomem *mregs = qep->mregs;
  89        int tries;
  90
  91        /* Reset the MACE, then the QEC channel. */
  92        sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
  93        tries = MACE_RESET_RETRIES;
  94        while (--tries) {
  95                u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
  96                if (tmp & MREGS_BCONFIG_RESET) {
  97                        udelay(20);
  98                        continue;
  99                }
 100                break;
 101        }
 102        if (!tries) {
 103                printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
 104                return -1;
 105        }
 106
 107        sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
 108        tries = QE_RESET_RETRIES;
 109        while (--tries) {
 110                u32 tmp = sbus_readl(cregs + CREG_CTRL);
 111                if (tmp & CREG_CTRL_RESET) {
 112                        udelay(20);
 113                        continue;
 114                }
 115                break;
 116        }
 117        if (!tries) {
 118                printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
 119                return -1;
 120        }
 121        return 0;
 122}
 123
 124static void qe_init_rings(struct sunqe *qep)
 125{
 126        struct qe_init_block *qb = qep->qe_block;
 127        struct sunqe_buffers *qbufs = qep->buffers;
 128        __u32 qbufs_dvma = qep->buffers_dvma;
 129        int i;
 130
 131        qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
 132        memset(qb, 0, sizeof(struct qe_init_block));
 133        memset(qbufs, 0, sizeof(struct sunqe_buffers));
 134        for (i = 0; i < RX_RING_SIZE; i++) {
 135                qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
 136                qb->qe_rxd[i].rx_flags =
 137                        (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 138        }
 139}
 140
 141static int qe_init(struct sunqe *qep, int from_irq)
 142{
 143        struct sunqec *qecp = qep->parent;
 144        void __iomem *cregs = qep->qcregs;
 145        void __iomem *mregs = qep->mregs;
 146        void __iomem *gregs = qecp->gregs;
 147        unsigned char *e = &qep->dev->dev_addr[0];
 148        u32 tmp;
 149        int i;
 150
 151        /* Shut it up. */
 152        if (qe_stop(qep))
 153                return -EAGAIN;
 154
 155        /* Setup initial rx/tx init block pointers. */
 156        sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
 157        sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
 158
 159        /* Enable/mask the various irq's. */
 160        sbus_writel(0, cregs + CREG_RIMASK);
 161        sbus_writel(1, cregs + CREG_TIMASK);
 162
 163        sbus_writel(0, cregs + CREG_QMASK);
 164        sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
 165
 166        /* Setup the FIFO pointers into QEC local memory. */
 167        tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
 168        sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
 169        sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
 170
 171        tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
 172                sbus_readl(gregs + GLOB_RSIZE);
 173        sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
 174        sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
 175
 176        /* Clear the channel collision counter. */
 177        sbus_writel(0, cregs + CREG_CCNT);
 178
 179        /* For 10baseT, inter frame space nor throttle seems to be necessary. */
 180        sbus_writel(0, cregs + CREG_PIPG);
 181
 182        /* Now dork with the AMD MACE. */
 183        sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
 184        sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
 185        sbus_writeb(0, mregs + MREGS_RXFCNTL);
 186
 187        /* The QEC dma's the rx'd packets from local memory out to main memory,
 188         * and therefore it interrupts when the packet reception is "complete".
 189         * So don't listen for the MACE talking about it.
 190         */
 191        sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
 192        sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
 193        sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
 194                     MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
 195                    mregs + MREGS_FCONFIG);
 196
 197        /* Only usable interface on QuadEther is twisted pair. */
 198        sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
 199
 200        /* Tell MACE we are changing the ether address. */
 201        sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
 202                    mregs + MREGS_IACONFIG);
 203        while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 204                barrier();
 205        sbus_writeb(e[0], mregs + MREGS_ETHADDR);
 206        sbus_writeb(e[1], mregs + MREGS_ETHADDR);
 207        sbus_writeb(e[2], mregs + MREGS_ETHADDR);
 208        sbus_writeb(e[3], mregs + MREGS_ETHADDR);
 209        sbus_writeb(e[4], mregs + MREGS_ETHADDR);
 210        sbus_writeb(e[5], mregs + MREGS_ETHADDR);
 211
 212        /* Clear out the address filter. */
 213        sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 214                    mregs + MREGS_IACONFIG);
 215        while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 216                barrier();
 217        for (i = 0; i < 8; i++)
 218                sbus_writeb(0, mregs + MREGS_FILTER);
 219
 220        /* Address changes are now complete. */
 221        sbus_writeb(0, mregs + MREGS_IACONFIG);
 222
 223        qe_init_rings(qep);
 224
 225        /* Wait a little bit for the link to come up... */
 226        mdelay(5);
 227        if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
 228                int tries = 50;
 229
 230                while (--tries) {
 231                        u8 tmp;
 232
 233                        mdelay(5);
 234                        barrier();
 235                        tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
 236                        if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
 237                                break;
 238                }
 239                if (tries == 0)
 240                        printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
 241        }
 242
 243        /* Missed packet counter is cleared on a read. */
 244        sbus_readb(mregs + MREGS_MPCNT);
 245
 246        /* Reload multicast information, this will enable the receiver
 247         * and transmitter.
 248         */
 249        qe_set_multicast(qep->dev);
 250
 251        /* QEC should now start to show interrupts. */
 252        return 0;
 253}
 254
 255/* Grrr, certain error conditions completely lock up the AMD MACE,
 256 * so when we get these we _must_ reset the chip.
 257 */
 258static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
 259{
 260        struct net_device *dev = qep->dev;
 261        int mace_hwbug_workaround = 0;
 262
 263        if (qe_status & CREG_STAT_EDEFER) {
 264                printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
 265                dev->stats.tx_errors++;
 266        }
 267
 268        if (qe_status & CREG_STAT_CLOSS) {
 269                printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
 270                dev->stats.tx_errors++;
 271                dev->stats.tx_carrier_errors++;
 272        }
 273
 274        if (qe_status & CREG_STAT_ERETRIES) {
 275                printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
 276                dev->stats.tx_errors++;
 277                mace_hwbug_workaround = 1;
 278        }
 279
 280        if (qe_status & CREG_STAT_LCOLL) {
 281                printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
 282                dev->stats.tx_errors++;
 283                dev->stats.collisions++;
 284                mace_hwbug_workaround = 1;
 285        }
 286
 287        if (qe_status & CREG_STAT_FUFLOW) {
 288                printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
 289                dev->stats.tx_errors++;
 290                mace_hwbug_workaround = 1;
 291        }
 292
 293        if (qe_status & CREG_STAT_JERROR) {
 294                printk(KERN_ERR "%s: Jabber error.\n", dev->name);
 295        }
 296
 297        if (qe_status & CREG_STAT_BERROR) {
 298                printk(KERN_ERR "%s: Babble error.\n", dev->name);
 299        }
 300
 301        if (qe_status & CREG_STAT_CCOFLOW) {
 302                dev->stats.tx_errors += 256;
 303                dev->stats.collisions += 256;
 304        }
 305
 306        if (qe_status & CREG_STAT_TXDERROR) {
 307                printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
 308                dev->stats.tx_errors++;
 309                dev->stats.tx_aborted_errors++;
 310                mace_hwbug_workaround = 1;
 311        }
 312
 313        if (qe_status & CREG_STAT_TXLERR) {
 314                printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
 315                dev->stats.tx_errors++;
 316                mace_hwbug_workaround = 1;
 317        }
 318
 319        if (qe_status & CREG_STAT_TXPERR) {
 320                printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
 321                dev->stats.tx_errors++;
 322                dev->stats.tx_aborted_errors++;
 323                mace_hwbug_workaround = 1;
 324        }
 325
 326        if (qe_status & CREG_STAT_TXSERR) {
 327                printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
 328                dev->stats.tx_errors++;
 329                dev->stats.tx_aborted_errors++;
 330                mace_hwbug_workaround = 1;
 331        }
 332
 333        if (qe_status & CREG_STAT_RCCOFLOW) {
 334                dev->stats.rx_errors += 256;
 335                dev->stats.collisions += 256;
 336        }
 337
 338        if (qe_status & CREG_STAT_RUOFLOW) {
 339                dev->stats.rx_errors += 256;
 340                dev->stats.rx_over_errors += 256;
 341        }
 342
 343        if (qe_status & CREG_STAT_MCOFLOW) {
 344                dev->stats.rx_errors += 256;
 345                dev->stats.rx_missed_errors += 256;
 346        }
 347
 348        if (qe_status & CREG_STAT_RXFOFLOW) {
 349                printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
 350                dev->stats.rx_errors++;
 351                dev->stats.rx_over_errors++;
 352        }
 353
 354        if (qe_status & CREG_STAT_RLCOLL) {
 355                printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
 356                dev->stats.rx_errors++;
 357                dev->stats.collisions++;
 358        }
 359
 360        if (qe_status & CREG_STAT_FCOFLOW) {
 361                dev->stats.rx_errors += 256;
 362                dev->stats.rx_frame_errors += 256;
 363        }
 364
 365        if (qe_status & CREG_STAT_CECOFLOW) {
 366                dev->stats.rx_errors += 256;
 367                dev->stats.rx_crc_errors += 256;
 368        }
 369
 370        if (qe_status & CREG_STAT_RXDROP) {
 371                printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
 372                dev->stats.rx_errors++;
 373                dev->stats.rx_dropped++;
 374                dev->stats.rx_missed_errors++;
 375        }
 376
 377        if (qe_status & CREG_STAT_RXSMALL) {
 378                printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
 379                dev->stats.rx_errors++;
 380                dev->stats.rx_length_errors++;
 381        }
 382
 383        if (qe_status & CREG_STAT_RXLERR) {
 384                printk(KERN_ERR "%s: Receive late error.\n", dev->name);
 385                dev->stats.rx_errors++;
 386                mace_hwbug_workaround = 1;
 387        }
 388
 389        if (qe_status & CREG_STAT_RXPERR) {
 390                printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
 391                dev->stats.rx_errors++;
 392                dev->stats.rx_missed_errors++;
 393                mace_hwbug_workaround = 1;
 394        }
 395
 396        if (qe_status & CREG_STAT_RXSERR) {
 397                printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
 398                dev->stats.rx_errors++;
 399                dev->stats.rx_missed_errors++;
 400                mace_hwbug_workaround = 1;
 401        }
 402
 403        if (mace_hwbug_workaround)
 404                qe_init(qep, 1);
 405        return mace_hwbug_workaround;
 406}
 407
 408/* Per-QE receive interrupt service routine.  Just like on the happy meal
 409 * we receive directly into skb's with a small packet copy water mark.
 410 */
 411static void qe_rx(struct sunqe *qep)
 412{
 413        struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
 414        struct net_device *dev = qep->dev;
 415        struct qe_rxd *this;
 416        struct sunqe_buffers *qbufs = qep->buffers;
 417        __u32 qbufs_dvma = qep->buffers_dvma;
 418        int elem = qep->rx_new, drops = 0;
 419        u32 flags;
 420
 421        this = &rxbase[elem];
 422        while (!((flags = this->rx_flags) & RXD_OWN)) {
 423                struct sk_buff *skb;
 424                unsigned char *this_qbuf =
 425                        &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
 426                __u32 this_qbuf_dvma = qbufs_dvma +
 427                        qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
 428                struct qe_rxd *end_rxd =
 429                        &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
 430                int len = (flags & RXD_LENGTH) - 4;  /* QE adds ether FCS size to len */
 431
 432                /* Check for errors. */
 433                if (len < ETH_ZLEN) {
 434                        dev->stats.rx_errors++;
 435                        dev->stats.rx_length_errors++;
 436                        dev->stats.rx_dropped++;
 437                } else {
 438                        skb = dev_alloc_skb(len + 2);
 439                        if (skb == NULL) {
 440                                drops++;
 441                                dev->stats.rx_dropped++;
 442                        } else {
 443                                skb_reserve(skb, 2);
 444                                skb_put(skb, len);
 445                                skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
 446                                                 len);
 447                                skb->protocol = eth_type_trans(skb, qep->dev);
 448                                netif_rx(skb);
 449                                dev->stats.rx_packets++;
 450                                dev->stats.rx_bytes += len;
 451                        }
 452                }
 453                end_rxd->rx_addr = this_qbuf_dvma;
 454                end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
 455
 456                elem = NEXT_RX(elem);
 457                this = &rxbase[elem];
 458        }
 459        qep->rx_new = elem;
 460        if (drops)
 461                printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
 462}
 463
 464static void qe_tx_reclaim(struct sunqe *qep);
 465
 466/* Interrupts for all QE's get filtered out via the QEC master controller,
 467 * so we just run through each qe and check to see who is signaling
 468 * and thus needs to be serviced.
 469 */
 470static irqreturn_t qec_interrupt(int irq, void *dev_id)
 471{
 472        struct sunqec *qecp = dev_id;
 473        u32 qec_status;
 474        int channel = 0;
 475
 476        /* Latch the status now. */
 477        qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
 478        while (channel < 4) {
 479                if (qec_status & 0xf) {
 480                        struct sunqe *qep = qecp->qes[channel];
 481                        u32 qe_status;
 482
 483                        qe_status = sbus_readl(qep->qcregs + CREG_STAT);
 484                        if (qe_status & CREG_STAT_ERRORS) {
 485                                if (qe_is_bolixed(qep, qe_status))
 486                                        goto next;
 487                        }
 488                        if (qe_status & CREG_STAT_RXIRQ)
 489                                qe_rx(qep);
 490                        if (netif_queue_stopped(qep->dev) &&
 491                            (qe_status & CREG_STAT_TXIRQ)) {
 492                                spin_lock(&qep->lock);
 493                                qe_tx_reclaim(qep);
 494                                if (TX_BUFFS_AVAIL(qep) > 0) {
 495                                        /* Wake net queue and return to
 496                                         * lazy tx reclaim.
 497                                         */
 498                                        netif_wake_queue(qep->dev);
 499                                        sbus_writel(1, qep->qcregs + CREG_TIMASK);
 500                                }
 501                                spin_unlock(&qep->lock);
 502                        }
 503        next:
 504                        ;
 505                }
 506                qec_status >>= 4;
 507                channel++;
 508        }
 509
 510        return IRQ_HANDLED;
 511}
 512
 513static int qe_open(struct net_device *dev)
 514{
 515        struct sunqe *qep = netdev_priv(dev);
 516
 517        qep->mconfig = (MREGS_MCONFIG_TXENAB |
 518                        MREGS_MCONFIG_RXENAB |
 519                        MREGS_MCONFIG_MBAENAB);
 520        return qe_init(qep, 0);
 521}
 522
 523static int qe_close(struct net_device *dev)
 524{
 525        struct sunqe *qep = netdev_priv(dev);
 526
 527        qe_stop(qep);
 528        return 0;
 529}
 530
 531/* Reclaim TX'd frames from the ring.  This must always run under
 532 * the IRQ protected qep->lock.
 533 */
 534static void qe_tx_reclaim(struct sunqe *qep)
 535{
 536        struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
 537        int elem = qep->tx_old;
 538
 539        while (elem != qep->tx_new) {
 540                u32 flags = txbase[elem].tx_flags;
 541
 542                if (flags & TXD_OWN)
 543                        break;
 544                elem = NEXT_TX(elem);
 545        }
 546        qep->tx_old = elem;
 547}
 548
 549static void qe_tx_timeout(struct net_device *dev)
 550{
 551        struct sunqe *qep = netdev_priv(dev);
 552        int tx_full;
 553
 554        spin_lock_irq(&qep->lock);
 555
 556        /* Try to reclaim, if that frees up some tx
 557         * entries, we're fine.
 558         */
 559        qe_tx_reclaim(qep);
 560        tx_full = TX_BUFFS_AVAIL(qep) <= 0;
 561
 562        spin_unlock_irq(&qep->lock);
 563
 564        if (! tx_full)
 565                goto out;
 566
 567        printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
 568        qe_init(qep, 1);
 569
 570out:
 571        netif_wake_queue(dev);
 572}
 573
 574/* Get a packet queued to go onto the wire. */
 575static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
 576{
 577        struct sunqe *qep = netdev_priv(dev);
 578        struct sunqe_buffers *qbufs = qep->buffers;
 579        __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
 580        unsigned char *txbuf;
 581        int len, entry;
 582
 583        spin_lock_irq(&qep->lock);
 584
 585        qe_tx_reclaim(qep);
 586
 587        len = skb->len;
 588        entry = qep->tx_new;
 589
 590        txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
 591        txbuf_dvma = qbufs_dvma +
 592                qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
 593
 594        /* Avoid a race... */
 595        qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
 596
 597        skb_copy_from_linear_data(skb, txbuf, len);
 598
 599        qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
 600        qep->qe_block->qe_txd[entry].tx_flags =
 601                (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
 602        qep->tx_new = NEXT_TX(entry);
 603
 604        /* Get it going. */
 605        dev->trans_start = jiffies;
 606        sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
 607
 608        dev->stats.tx_packets++;
 609        dev->stats.tx_bytes += len;
 610
 611        if (TX_BUFFS_AVAIL(qep) <= 0) {
 612                /* Halt the net queue and enable tx interrupts.
 613                 * When the tx queue empties the tx irq handler
 614                 * will wake up the queue and return us back to
 615                 * the lazy tx reclaim scheme.
 616                 */
 617                netif_stop_queue(dev);
 618                sbus_writel(0, qep->qcregs + CREG_TIMASK);
 619        }
 620        spin_unlock_irq(&qep->lock);
 621
 622        dev_kfree_skb(skb);
 623
 624        return NETDEV_TX_OK;
 625}
 626
 627static void qe_set_multicast(struct net_device *dev)
 628{
 629        struct sunqe *qep = netdev_priv(dev);
 630        struct dev_mc_list *dmi = dev->mc_list;
 631        u8 new_mconfig = qep->mconfig;
 632        char *addrs;
 633        int i;
 634        u32 crc;
 635
 636        /* Lock out others. */
 637        netif_stop_queue(dev);
 638
 639        if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
 640                sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 641                            qep->mregs + MREGS_IACONFIG);
 642                while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 643                        barrier();
 644                for (i = 0; i < 8; i++)
 645                        sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
 646                sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
 647        } else if (dev->flags & IFF_PROMISC) {
 648                new_mconfig |= MREGS_MCONFIG_PROMISC;
 649        } else {
 650                u16 hash_table[4];
 651                u8 *hbytes = (unsigned char *) &hash_table[0];
 652
 653                for (i = 0; i < 4; i++)
 654                        hash_table[i] = 0;
 655
 656                for (i = 0; i < dev->mc_count; i++) {
 657                        addrs = dmi->dmi_addr;
 658                        dmi = dmi->next;
 659
 660                        if (!(*addrs & 1))
 661                                continue;
 662                        crc = ether_crc_le(6, addrs);
 663                        crc >>= 26;
 664                        hash_table[crc >> 4] |= 1 << (crc & 0xf);
 665                }
 666                /* Program the qe with the new filter value. */
 667                sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
 668                            qep->mregs + MREGS_IACONFIG);
 669                while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
 670                        barrier();
 671                for (i = 0; i < 8; i++) {
 672                        u8 tmp = *hbytes++;
 673                        sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
 674                }
 675                sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
 676        }
 677
 678        /* Any change of the logical address filter, the physical address,
 679         * or enabling/disabling promiscuous mode causes the MACE to disable
 680         * the receiver.  So we must re-enable them here or else the MACE
 681         * refuses to listen to anything on the network.  Sheesh, took
 682         * me a day or two to find this bug.
 683         */
 684        qep->mconfig = new_mconfig;
 685        sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
 686
 687        /* Let us get going again. */
 688        netif_wake_queue(dev);
 689}
 690
 691/* Ethtool support... */
 692static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 693{
 694        const struct linux_prom_registers *regs;
 695        struct sunqe *qep = netdev_priv(dev);
 696        struct of_device *op;
 697
 698        strcpy(info->driver, "sunqe");
 699        strcpy(info->version, "3.0");
 700
 701        op = qep->op;
 702        regs = of_get_property(op->node, "reg", NULL);
 703        if (regs)
 704                sprintf(info->bus_info, "SBUS:%d", regs->which_io);
 705
 706}
 707
 708static u32 qe_get_link(struct net_device *dev)
 709{
 710        struct sunqe *qep = netdev_priv(dev);
 711        void __iomem *mregs = qep->mregs;
 712        u8 phyconfig;
 713
 714        spin_lock_irq(&qep->lock);
 715        phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
 716        spin_unlock_irq(&qep->lock);
 717
 718        return (phyconfig & MREGS_PHYCONFIG_LSTAT);
 719}
 720
 721static const struct ethtool_ops qe_ethtool_ops = {
 722        .get_drvinfo            = qe_get_drvinfo,
 723        .get_link               = qe_get_link,
 724};
 725
 726/* This is only called once at boot time for each card probed. */
 727static void qec_init_once(struct sunqec *qecp, struct of_device *op)
 728{
 729        u8 bsizes = qecp->qec_bursts;
 730
 731        if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
 732                sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
 733        } else if (bsizes & DMA_BURST32) {
 734                sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
 735        } else {
 736                sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
 737        }
 738
 739        /* Packetsize only used in 100baseT BigMAC configurations,
 740         * set it to zero just to be on the safe side.
 741         */
 742        sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
 743
 744        /* Set the local memsize register, divided up to one piece per QE channel. */
 745        sbus_writel((resource_size(&op->resource[1]) >> 2),
 746                    qecp->gregs + GLOB_MSIZE);
 747
 748        /* Divide up the local QEC memory amongst the 4 QE receiver and
 749         * transmitter FIFOs.  Basically it is (total / 2 / num_channels).
 750         */
 751        sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
 752                    qecp->gregs + GLOB_TSIZE);
 753        sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
 754                    qecp->gregs + GLOB_RSIZE);
 755}
 756
 757static u8 __devinit qec_get_burst(struct device_node *dp)
 758{
 759        u8 bsizes, bsizes_more;
 760
 761        /* Find and set the burst sizes for the QEC, since it
 762         * does the actual dma for all 4 channels.
 763         */
 764        bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
 765        bsizes &= 0xff;
 766        bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
 767
 768        if (bsizes_more != 0xff)
 769                bsizes &= bsizes_more;
 770        if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
 771            (bsizes & DMA_BURST32)==0)
 772                bsizes = (DMA_BURST32 - 1);
 773
 774        return bsizes;
 775}
 776
 777static struct sunqec * __devinit get_qec(struct of_device *child)
 778{
 779        struct of_device *op = to_of_device(child->dev.parent);
 780        struct sunqec *qecp;
 781
 782        qecp = dev_get_drvdata(&op->dev);
 783        if (!qecp) {
 784                qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
 785                if (qecp) {
 786                        u32 ctrl;
 787
 788                        qecp->op = op;
 789                        qecp->gregs = of_ioremap(&op->resource[0], 0,
 790                                                 GLOB_REG_SIZE,
 791                                                 "QEC Global Registers");
 792                        if (!qecp->gregs)
 793                                goto fail;
 794
 795                        /* Make sure the QEC is in MACE mode. */
 796                        ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
 797                        ctrl &= 0xf0000000;
 798                        if (ctrl != GLOB_CTRL_MMODE) {
 799                                printk(KERN_ERR "qec: Not in MACE mode!\n");
 800                                goto fail;
 801                        }
 802
 803                        if (qec_global_reset(qecp->gregs))
 804                                goto fail;
 805
 806                        qecp->qec_bursts = qec_get_burst(op->node);
 807
 808                        qec_init_once(qecp, op);
 809
 810                        if (request_irq(op->irqs[0], &qec_interrupt,
 811                                        IRQF_SHARED, "qec", (void *) qecp)) {
 812                                printk(KERN_ERR "qec: Can't register irq.\n");
 813                                goto fail;
 814                        }
 815
 816                        dev_set_drvdata(&op->dev, qecp);
 817
 818                        qecp->next_module = root_qec_dev;
 819                        root_qec_dev = qecp;
 820                }
 821        }
 822
 823        return qecp;
 824
 825fail:
 826        if (qecp->gregs)
 827                of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
 828        kfree(qecp);
 829        return NULL;
 830}
 831
 832static const struct net_device_ops qec_ops = {
 833        .ndo_open               = qe_open,
 834        .ndo_stop               = qe_close,
 835        .ndo_start_xmit         = qe_start_xmit,
 836        .ndo_set_multicast_list = qe_set_multicast,
 837        .ndo_tx_timeout         = qe_tx_timeout,
 838        .ndo_change_mtu         = eth_change_mtu,
 839        .ndo_set_mac_address    = eth_mac_addr,
 840        .ndo_validate_addr      = eth_validate_addr,
 841};
 842
 843static int __devinit qec_ether_init(struct of_device *op)
 844{
 845        static unsigned version_printed;
 846        struct net_device *dev;
 847        struct sunqec *qecp;
 848        struct sunqe *qe;
 849        int i, res;
 850
 851        if (version_printed++ == 0)
 852                printk(KERN_INFO "%s", version);
 853
 854        dev = alloc_etherdev(sizeof(struct sunqe));
 855        if (!dev)
 856                return -ENOMEM;
 857
 858        memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
 859
 860        qe = netdev_priv(dev);
 861
 862        res = -ENODEV;
 863
 864        i = of_getintprop_default(op->node, "channel#", -1);
 865        if (i == -1)
 866                goto fail;
 867        qe->channel = i;
 868        spin_lock_init(&qe->lock);
 869
 870        qecp = get_qec(op);
 871        if (!qecp)
 872                goto fail;
 873
 874        qecp->qes[qe->channel] = qe;
 875        qe->dev = dev;
 876        qe->parent = qecp;
 877        qe->op = op;
 878
 879        res = -ENOMEM;
 880        qe->qcregs = of_ioremap(&op->resource[0], 0,
 881                                CREG_REG_SIZE, "QEC Channel Registers");
 882        if (!qe->qcregs) {
 883                printk(KERN_ERR "qe: Cannot map channel registers.\n");
 884                goto fail;
 885        }
 886
 887        qe->mregs = of_ioremap(&op->resource[1], 0,
 888                               MREGS_REG_SIZE, "QE MACE Registers");
 889        if (!qe->mregs) {
 890                printk(KERN_ERR "qe: Cannot map MACE registers.\n");
 891                goto fail;
 892        }
 893
 894        qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
 895                                          &qe->qblock_dvma, GFP_ATOMIC);
 896        qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
 897                                         &qe->buffers_dvma, GFP_ATOMIC);
 898        if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
 899            qe->buffers == NULL || qe->buffers_dvma == 0)
 900                goto fail;
 901
 902        /* Stop this QE. */
 903        qe_stop(qe);
 904
 905        SET_NETDEV_DEV(dev, &op->dev);
 906
 907        dev->watchdog_timeo = 5*HZ;
 908        dev->irq = op->irqs[0];
 909        dev->dma = 0;
 910        dev->ethtool_ops = &qe_ethtool_ops;
 911        dev->netdev_ops = &qec_ops;
 912
 913        res = register_netdev(dev);
 914        if (res)
 915                goto fail;
 916
 917        dev_set_drvdata(&op->dev, qe);
 918
 919        printk(KERN_INFO "%s: qe channel[%d] ", dev->name, qe->channel);
 920        for (i = 0; i < 6; i++)
 921                printk ("%2.2x%c",
 922                        dev->dev_addr[i],
 923                        i == 5 ? ' ': ':');
 924        printk("\n");
 925
 926
 927        return 0;
 928
 929fail:
 930        if (qe->qcregs)
 931                of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
 932        if (qe->mregs)
 933                of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
 934        if (qe->qe_block)
 935                dma_free_coherent(&op->dev, PAGE_SIZE,
 936                                  qe->qe_block, qe->qblock_dvma);
 937        if (qe->buffers)
 938                dma_free_coherent(&op->dev,
 939                                  sizeof(struct sunqe_buffers),
 940                                  qe->buffers,
 941                                  qe->buffers_dvma);
 942
 943        free_netdev(dev);
 944
 945        return res;
 946}
 947
 948static int __devinit qec_sbus_probe(struct of_device *op, const struct of_device_id *match)
 949{
 950        return qec_ether_init(op);
 951}
 952
 953static int __devexit qec_sbus_remove(struct of_device *op)
 954{
 955        struct sunqe *qp = dev_get_drvdata(&op->dev);
 956        struct net_device *net_dev = qp->dev;
 957
 958        unregister_netdev(net_dev);
 959
 960        of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
 961        of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
 962        dma_free_coherent(&op->dev, PAGE_SIZE,
 963                          qp->qe_block, qp->qblock_dvma);
 964        dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
 965                          qp->buffers, qp->buffers_dvma);
 966
 967        free_netdev(net_dev);
 968
 969        dev_set_drvdata(&op->dev, NULL);
 970
 971        return 0;
 972}
 973
 974static const struct of_device_id qec_sbus_match[] = {
 975        {
 976                .name = "qe",
 977        },
 978        {},
 979};
 980
 981MODULE_DEVICE_TABLE(of, qec_sbus_match);
 982
 983static struct of_platform_driver qec_sbus_driver = {
 984        .name           = "qec",
 985        .match_table    = qec_sbus_match,
 986        .probe          = qec_sbus_probe,
 987        .remove         = __devexit_p(qec_sbus_remove),
 988};
 989
 990static int __init qec_init(void)
 991{
 992        return of_register_driver(&qec_sbus_driver, &of_bus_type);
 993}
 994
 995static void __exit qec_exit(void)
 996{
 997        of_unregister_driver(&qec_sbus_driver);
 998
 999        while (root_qec_dev) {
1000                struct sunqec *next = root_qec_dev->next_module;
1001                struct of_device *op = root_qec_dev->op;
1002
1003                free_irq(op->irqs[0], (void *) root_qec_dev);
1004                of_iounmap(&op->resource[0], root_qec_dev->gregs,
1005                           GLOB_REG_SIZE);
1006                kfree(root_qec_dev);
1007
1008                root_qec_dev = next;
1009        }
1010}
1011
1012module_init(qec_init);
1013module_exit(qec_exit);
1014