linux/drivers/net/ethoc.c
<<
>>
Prefs
   1/*
   2 * linux/drivers/net/ethoc.c
   3 *
   4 * Copyright (C) 2007-2008 Avionic Design Development GmbH
   5 * Copyright (C) 2008-2009 Avionic Design GmbH
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * Written by Thierry Reding <thierry.reding@avionic-design.de>
  12 */
  13
  14#include <linux/etherdevice.h>
  15#include <linux/crc32.h>
  16#include <linux/io.h>
  17#include <linux/mii.h>
  18#include <linux/phy.h>
  19#include <linux/platform_device.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/of.h>
  23#include <net/ethoc.h>
  24
  25static int buffer_size = 0x8000; /* 32 KBytes */
  26module_param(buffer_size, int, 0);
  27MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
  28
  29/* register offsets */
  30#define MODER           0x00
  31#define INT_SOURCE      0x04
  32#define INT_MASK        0x08
  33#define IPGT            0x0c
  34#define IPGR1           0x10
  35#define IPGR2           0x14
  36#define PACKETLEN       0x18
  37#define COLLCONF        0x1c
  38#define TX_BD_NUM       0x20
  39#define CTRLMODER       0x24
  40#define MIIMODER        0x28
  41#define MIICOMMAND      0x2c
  42#define MIIADDRESS      0x30
  43#define MIITX_DATA      0x34
  44#define MIIRX_DATA      0x38
  45#define MIISTATUS       0x3c
  46#define MAC_ADDR0       0x40
  47#define MAC_ADDR1       0x44
  48#define ETH_HASH0       0x48
  49#define ETH_HASH1       0x4c
  50#define ETH_TXCTRL      0x50
  51
  52/* mode register */
  53#define MODER_RXEN      (1 <<  0) /* receive enable */
  54#define MODER_TXEN      (1 <<  1) /* transmit enable */
  55#define MODER_NOPRE     (1 <<  2) /* no preamble */
  56#define MODER_BRO       (1 <<  3) /* broadcast address */
  57#define MODER_IAM       (1 <<  4) /* individual address mode */
  58#define MODER_PRO       (1 <<  5) /* promiscuous mode */
  59#define MODER_IFG       (1 <<  6) /* interframe gap for incoming frames */
  60#define MODER_LOOP      (1 <<  7) /* loopback */
  61#define MODER_NBO       (1 <<  8) /* no back-off */
  62#define MODER_EDE       (1 <<  9) /* excess defer enable */
  63#define MODER_FULLD     (1 << 10) /* full duplex */
  64#define MODER_RESET     (1 << 11) /* FIXME: reset (undocumented) */
  65#define MODER_DCRC      (1 << 12) /* delayed CRC enable */
  66#define MODER_CRC       (1 << 13) /* CRC enable */
  67#define MODER_HUGE      (1 << 14) /* huge packets enable */
  68#define MODER_PAD       (1 << 15) /* padding enabled */
  69#define MODER_RSM       (1 << 16) /* receive small packets */
  70
  71/* interrupt source and mask registers */
  72#define INT_MASK_TXF    (1 << 0) /* transmit frame */
  73#define INT_MASK_TXE    (1 << 1) /* transmit error */
  74#define INT_MASK_RXF    (1 << 2) /* receive frame */
  75#define INT_MASK_RXE    (1 << 3) /* receive error */
  76#define INT_MASK_BUSY   (1 << 4)
  77#define INT_MASK_TXC    (1 << 5) /* transmit control frame */
  78#define INT_MASK_RXC    (1 << 6) /* receive control frame */
  79
  80#define INT_MASK_TX     (INT_MASK_TXF | INT_MASK_TXE)
  81#define INT_MASK_RX     (INT_MASK_RXF | INT_MASK_RXE)
  82
  83#define INT_MASK_ALL ( \
  84                INT_MASK_TXF | INT_MASK_TXE | \
  85                INT_MASK_RXF | INT_MASK_RXE | \
  86                INT_MASK_TXC | INT_MASK_RXC | \
  87                INT_MASK_BUSY \
  88        )
  89
  90/* packet length register */
  91#define PACKETLEN_MIN(min)              (((min) & 0xffff) << 16)
  92#define PACKETLEN_MAX(max)              (((max) & 0xffff) <<  0)
  93#define PACKETLEN_MIN_MAX(min, max)     (PACKETLEN_MIN(min) | \
  94                                        PACKETLEN_MAX(max))
  95
  96/* transmit buffer number register */
  97#define TX_BD_NUM_VAL(x)        (((x) <= 0x80) ? (x) : 0x80)
  98
  99/* control module mode register */
 100#define CTRLMODER_PASSALL       (1 << 0) /* pass all receive frames */
 101#define CTRLMODER_RXFLOW        (1 << 1) /* receive control flow */
 102#define CTRLMODER_TXFLOW        (1 << 2) /* transmit control flow */
 103
 104/* MII mode register */
 105#define MIIMODER_CLKDIV(x)      ((x) & 0xfe) /* needs to be an even number */
 106#define MIIMODER_NOPRE          (1 << 8) /* no preamble */
 107
 108/* MII command register */
 109#define MIICOMMAND_SCAN         (1 << 0) /* scan status */
 110#define MIICOMMAND_READ         (1 << 1) /* read status */
 111#define MIICOMMAND_WRITE        (1 << 2) /* write control data */
 112
 113/* MII address register */
 114#define MIIADDRESS_FIAD(x)              (((x) & 0x1f) << 0)
 115#define MIIADDRESS_RGAD(x)              (((x) & 0x1f) << 8)
 116#define MIIADDRESS_ADDR(phy, reg)       (MIIADDRESS_FIAD(phy) | \
 117                                        MIIADDRESS_RGAD(reg))
 118
 119/* MII transmit data register */
 120#define MIITX_DATA_VAL(x)       ((x) & 0xffff)
 121
 122/* MII receive data register */
 123#define MIIRX_DATA_VAL(x)       ((x) & 0xffff)
 124
 125/* MII status register */
 126#define MIISTATUS_LINKFAIL      (1 << 0)
 127#define MIISTATUS_BUSY          (1 << 1)
 128#define MIISTATUS_INVALID       (1 << 2)
 129
 130/* TX buffer descriptor */
 131#define TX_BD_CS                (1 <<  0) /* carrier sense lost */
 132#define TX_BD_DF                (1 <<  1) /* defer indication */
 133#define TX_BD_LC                (1 <<  2) /* late collision */
 134#define TX_BD_RL                (1 <<  3) /* retransmission limit */
 135#define TX_BD_RETRY_MASK        (0x00f0)
 136#define TX_BD_RETRY(x)          (((x) & 0x00f0) >>  4)
 137#define TX_BD_UR                (1 <<  8) /* transmitter underrun */
 138#define TX_BD_CRC               (1 << 11) /* TX CRC enable */
 139#define TX_BD_PAD               (1 << 12) /* pad enable for short packets */
 140#define TX_BD_WRAP              (1 << 13)
 141#define TX_BD_IRQ               (1 << 14) /* interrupt request enable */
 142#define TX_BD_READY             (1 << 15) /* TX buffer ready */
 143#define TX_BD_LEN(x)            (((x) & 0xffff) << 16)
 144#define TX_BD_LEN_MASK          (0xffff << 16)
 145
 146#define TX_BD_STATS             (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
 147                                TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
 148
 149/* RX buffer descriptor */
 150#define RX_BD_LC        (1 <<  0) /* late collision */
 151#define RX_BD_CRC       (1 <<  1) /* RX CRC error */
 152#define RX_BD_SF        (1 <<  2) /* short frame */
 153#define RX_BD_TL        (1 <<  3) /* too long */
 154#define RX_BD_DN        (1 <<  4) /* dribble nibble */
 155#define RX_BD_IS        (1 <<  5) /* invalid symbol */
 156#define RX_BD_OR        (1 <<  6) /* receiver overrun */
 157#define RX_BD_MISS      (1 <<  7)
 158#define RX_BD_CF        (1 <<  8) /* control frame */
 159#define RX_BD_WRAP      (1 << 13)
 160#define RX_BD_IRQ       (1 << 14) /* interrupt request enable */
 161#define RX_BD_EMPTY     (1 << 15)
 162#define RX_BD_LEN(x)    (((x) & 0xffff) << 16)
 163
 164#define RX_BD_STATS     (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
 165                        RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
 166
 167#define ETHOC_BUFSIZ            1536
 168#define ETHOC_ZLEN              64
 169#define ETHOC_BD_BASE           0x400
 170#define ETHOC_TIMEOUT           (HZ / 2)
 171#define ETHOC_MII_TIMEOUT       (1 + (HZ / 5))
 172
 173/**
 174 * struct ethoc - driver-private device structure
 175 * @iobase:     pointer to I/O memory region
 176 * @membase:    pointer to buffer memory region
 177 * @dma_alloc:  dma allocated buffer size
 178 * @io_region_size:     I/O memory region size
 179 * @num_tx:     number of send buffers
 180 * @cur_tx:     last send buffer written
 181 * @dty_tx:     last buffer actually sent
 182 * @num_rx:     number of receive buffers
 183 * @cur_rx:     current receive buffer
 184 * @vma:        pointer to array of virtual memory addresses for buffers
 185 * @netdev:     pointer to network device structure
 186 * @napi:       NAPI structure
 187 * @msg_enable: device state flags
 188 * @lock:       device lock
 189 * @phy:        attached PHY
 190 * @mdio:       MDIO bus for PHY access
 191 * @phy_id:     address of attached PHY
 192 */
 193struct ethoc {
 194        void __iomem *iobase;
 195        void __iomem *membase;
 196        int dma_alloc;
 197        resource_size_t io_region_size;
 198
 199        unsigned int num_tx;
 200        unsigned int cur_tx;
 201        unsigned int dty_tx;
 202
 203        unsigned int num_rx;
 204        unsigned int cur_rx;
 205
 206        void** vma;
 207
 208        struct net_device *netdev;
 209        struct napi_struct napi;
 210        u32 msg_enable;
 211
 212        spinlock_t lock;
 213
 214        struct phy_device *phy;
 215        struct mii_bus *mdio;
 216        s8 phy_id;
 217};
 218
 219/**
 220 * struct ethoc_bd - buffer descriptor
 221 * @stat:       buffer statistics
 222 * @addr:       physical memory address
 223 */
 224struct ethoc_bd {
 225        u32 stat;
 226        u32 addr;
 227};
 228
 229static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
 230{
 231        return ioread32(dev->iobase + offset);
 232}
 233
 234static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
 235{
 236        iowrite32(data, dev->iobase + offset);
 237}
 238
 239static inline void ethoc_read_bd(struct ethoc *dev, int index,
 240                struct ethoc_bd *bd)
 241{
 242        loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
 243        bd->stat = ethoc_read(dev, offset + 0);
 244        bd->addr = ethoc_read(dev, offset + 4);
 245}
 246
 247static inline void ethoc_write_bd(struct ethoc *dev, int index,
 248                const struct ethoc_bd *bd)
 249{
 250        loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
 251        ethoc_write(dev, offset + 0, bd->stat);
 252        ethoc_write(dev, offset + 4, bd->addr);
 253}
 254
 255static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
 256{
 257        u32 imask = ethoc_read(dev, INT_MASK);
 258        imask |= mask;
 259        ethoc_write(dev, INT_MASK, imask);
 260}
 261
 262static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
 263{
 264        u32 imask = ethoc_read(dev, INT_MASK);
 265        imask &= ~mask;
 266        ethoc_write(dev, INT_MASK, imask);
 267}
 268
 269static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
 270{
 271        ethoc_write(dev, INT_SOURCE, mask);
 272}
 273
 274static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
 275{
 276        u32 mode = ethoc_read(dev, MODER);
 277        mode |= MODER_RXEN | MODER_TXEN;
 278        ethoc_write(dev, MODER, mode);
 279}
 280
 281static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
 282{
 283        u32 mode = ethoc_read(dev, MODER);
 284        mode &= ~(MODER_RXEN | MODER_TXEN);
 285        ethoc_write(dev, MODER, mode);
 286}
 287
 288static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
 289{
 290        struct ethoc_bd bd;
 291        int i;
 292        void* vma;
 293
 294        dev->cur_tx = 0;
 295        dev->dty_tx = 0;
 296        dev->cur_rx = 0;
 297
 298        ethoc_write(dev, TX_BD_NUM, dev->num_tx);
 299
 300        /* setup transmission buffers */
 301        bd.addr = mem_start;
 302        bd.stat = TX_BD_IRQ | TX_BD_CRC;
 303        vma = dev->membase;
 304
 305        for (i = 0; i < dev->num_tx; i++) {
 306                if (i == dev->num_tx - 1)
 307                        bd.stat |= TX_BD_WRAP;
 308
 309                ethoc_write_bd(dev, i, &bd);
 310                bd.addr += ETHOC_BUFSIZ;
 311
 312                dev->vma[i] = vma;
 313                vma += ETHOC_BUFSIZ;
 314        }
 315
 316        bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
 317
 318        for (i = 0; i < dev->num_rx; i++) {
 319                if (i == dev->num_rx - 1)
 320                        bd.stat |= RX_BD_WRAP;
 321
 322                ethoc_write_bd(dev, dev->num_tx + i, &bd);
 323                bd.addr += ETHOC_BUFSIZ;
 324
 325                dev->vma[dev->num_tx + i] = vma;
 326                vma += ETHOC_BUFSIZ;
 327        }
 328
 329        return 0;
 330}
 331
 332static int ethoc_reset(struct ethoc *dev)
 333{
 334        u32 mode;
 335
 336        /* TODO: reset controller? */
 337
 338        ethoc_disable_rx_and_tx(dev);
 339
 340        /* TODO: setup registers */
 341
 342        /* enable FCS generation and automatic padding */
 343        mode = ethoc_read(dev, MODER);
 344        mode |= MODER_CRC | MODER_PAD;
 345        ethoc_write(dev, MODER, mode);
 346
 347        /* set full-duplex mode */
 348        mode = ethoc_read(dev, MODER);
 349        mode |= MODER_FULLD;
 350        ethoc_write(dev, MODER, mode);
 351        ethoc_write(dev, IPGT, 0x15);
 352
 353        ethoc_ack_irq(dev, INT_MASK_ALL);
 354        ethoc_enable_irq(dev, INT_MASK_ALL);
 355        ethoc_enable_rx_and_tx(dev);
 356        return 0;
 357}
 358
 359static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
 360                struct ethoc_bd *bd)
 361{
 362        struct net_device *netdev = dev->netdev;
 363        unsigned int ret = 0;
 364
 365        if (bd->stat & RX_BD_TL) {
 366                dev_err(&netdev->dev, "RX: frame too long\n");
 367                netdev->stats.rx_length_errors++;
 368                ret++;
 369        }
 370
 371        if (bd->stat & RX_BD_SF) {
 372                dev_err(&netdev->dev, "RX: frame too short\n");
 373                netdev->stats.rx_length_errors++;
 374                ret++;
 375        }
 376
 377        if (bd->stat & RX_BD_DN) {
 378                dev_err(&netdev->dev, "RX: dribble nibble\n");
 379                netdev->stats.rx_frame_errors++;
 380        }
 381
 382        if (bd->stat & RX_BD_CRC) {
 383                dev_err(&netdev->dev, "RX: wrong CRC\n");
 384                netdev->stats.rx_crc_errors++;
 385                ret++;
 386        }
 387
 388        if (bd->stat & RX_BD_OR) {
 389                dev_err(&netdev->dev, "RX: overrun\n");
 390                netdev->stats.rx_over_errors++;
 391                ret++;
 392        }
 393
 394        if (bd->stat & RX_BD_MISS)
 395                netdev->stats.rx_missed_errors++;
 396
 397        if (bd->stat & RX_BD_LC) {
 398                dev_err(&netdev->dev, "RX: late collision\n");
 399                netdev->stats.collisions++;
 400                ret++;
 401        }
 402
 403        return ret;
 404}
 405
 406static int ethoc_rx(struct net_device *dev, int limit)
 407{
 408        struct ethoc *priv = netdev_priv(dev);
 409        int count;
 410
 411        for (count = 0; count < limit; ++count) {
 412                unsigned int entry;
 413                struct ethoc_bd bd;
 414
 415                entry = priv->num_tx + priv->cur_rx;
 416                ethoc_read_bd(priv, entry, &bd);
 417                if (bd.stat & RX_BD_EMPTY) {
 418                        ethoc_ack_irq(priv, INT_MASK_RX);
 419                        /* If packet (interrupt) came in between checking
 420                         * BD_EMTPY and clearing the interrupt source, then we
 421                         * risk missing the packet as the RX interrupt won't
 422                         * trigger right away when we reenable it; hence, check
 423                         * BD_EMTPY here again to make sure there isn't such a
 424                         * packet waiting for us...
 425                         */
 426                        ethoc_read_bd(priv, entry, &bd);
 427                        if (bd.stat & RX_BD_EMPTY)
 428                                break;
 429                }
 430
 431                if (ethoc_update_rx_stats(priv, &bd) == 0) {
 432                        int size = bd.stat >> 16;
 433                        struct sk_buff *skb;
 434
 435                        size -= 4; /* strip the CRC */
 436                        skb = netdev_alloc_skb_ip_align(dev, size);
 437
 438                        if (likely(skb)) {
 439                                void *src = priv->vma[entry];
 440                                memcpy_fromio(skb_put(skb, size), src, size);
 441                                skb->protocol = eth_type_trans(skb, dev);
 442                                dev->stats.rx_packets++;
 443                                dev->stats.rx_bytes += size;
 444                                netif_receive_skb(skb);
 445                        } else {
 446                                if (net_ratelimit())
 447                                        dev_warn(&dev->dev, "low on memory - "
 448                                                        "packet dropped\n");
 449
 450                                dev->stats.rx_dropped++;
 451                                break;
 452                        }
 453                }
 454
 455                /* clear the buffer descriptor so it can be reused */
 456                bd.stat &= ~RX_BD_STATS;
 457                bd.stat |=  RX_BD_EMPTY;
 458                ethoc_write_bd(priv, entry, &bd);
 459                if (++priv->cur_rx == priv->num_rx)
 460                        priv->cur_rx = 0;
 461        }
 462
 463        return count;
 464}
 465
 466static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
 467{
 468        struct net_device *netdev = dev->netdev;
 469
 470        if (bd->stat & TX_BD_LC) {
 471                dev_err(&netdev->dev, "TX: late collision\n");
 472                netdev->stats.tx_window_errors++;
 473        }
 474
 475        if (bd->stat & TX_BD_RL) {
 476                dev_err(&netdev->dev, "TX: retransmit limit\n");
 477                netdev->stats.tx_aborted_errors++;
 478        }
 479
 480        if (bd->stat & TX_BD_UR) {
 481                dev_err(&netdev->dev, "TX: underrun\n");
 482                netdev->stats.tx_fifo_errors++;
 483        }
 484
 485        if (bd->stat & TX_BD_CS) {
 486                dev_err(&netdev->dev, "TX: carrier sense lost\n");
 487                netdev->stats.tx_carrier_errors++;
 488        }
 489
 490        if (bd->stat & TX_BD_STATS)
 491                netdev->stats.tx_errors++;
 492
 493        netdev->stats.collisions += (bd->stat >> 4) & 0xf;
 494        netdev->stats.tx_bytes += bd->stat >> 16;
 495        netdev->stats.tx_packets++;
 496}
 497
 498static int ethoc_tx(struct net_device *dev, int limit)
 499{
 500        struct ethoc *priv = netdev_priv(dev);
 501        int count;
 502        struct ethoc_bd bd;
 503
 504        for (count = 0; count < limit; ++count) {
 505                unsigned int entry;
 506
 507                entry = priv->dty_tx & (priv->num_tx-1);
 508
 509                ethoc_read_bd(priv, entry, &bd);
 510
 511                if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
 512                        ethoc_ack_irq(priv, INT_MASK_TX);
 513                        /* If interrupt came in between reading in the BD
 514                         * and clearing the interrupt source, then we risk
 515                         * missing the event as the TX interrupt won't trigger
 516                         * right away when we reenable it; hence, check
 517                         * BD_EMPTY here again to make sure there isn't such an
 518                         * event pending...
 519                         */
 520                        ethoc_read_bd(priv, entry, &bd);
 521                        if (bd.stat & TX_BD_READY ||
 522                            (priv->dty_tx == priv->cur_tx))
 523                                break;
 524                }
 525
 526                ethoc_update_tx_stats(priv, &bd);
 527                priv->dty_tx++;
 528        }
 529
 530        if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
 531                netif_wake_queue(dev);
 532
 533        return count;
 534}
 535
 536static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
 537{
 538        struct net_device *dev = dev_id;
 539        struct ethoc *priv = netdev_priv(dev);
 540        u32 pending;
 541        u32 mask;
 542
 543        /* Figure out what triggered the interrupt...
 544         * The tricky bit here is that the interrupt source bits get
 545         * set in INT_SOURCE for an event irregardless of whether that
 546         * event is masked or not.  Thus, in order to figure out what
 547         * triggered the interrupt, we need to remove the sources
 548         * for all events that are currently masked.  This behaviour
 549         * is not particularly well documented but reasonable...
 550         */
 551        mask = ethoc_read(priv, INT_MASK);
 552        pending = ethoc_read(priv, INT_SOURCE);
 553        pending &= mask;
 554
 555        if (unlikely(pending == 0)) {
 556                return IRQ_NONE;
 557        }
 558
 559        ethoc_ack_irq(priv, pending);
 560
 561        /* We always handle the dropped packet interrupt */
 562        if (pending & INT_MASK_BUSY) {
 563                dev_err(&dev->dev, "packet dropped\n");
 564                dev->stats.rx_dropped++;
 565        }
 566
 567        /* Handle receive/transmit event by switching to polling */
 568        if (pending & (INT_MASK_TX | INT_MASK_RX)) {
 569                ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
 570                napi_schedule(&priv->napi);
 571        }
 572
 573        return IRQ_HANDLED;
 574}
 575
 576static int ethoc_get_mac_address(struct net_device *dev, void *addr)
 577{
 578        struct ethoc *priv = netdev_priv(dev);
 579        u8 *mac = (u8 *)addr;
 580        u32 reg;
 581
 582        reg = ethoc_read(priv, MAC_ADDR0);
 583        mac[2] = (reg >> 24) & 0xff;
 584        mac[3] = (reg >> 16) & 0xff;
 585        mac[4] = (reg >>  8) & 0xff;
 586        mac[5] = (reg >>  0) & 0xff;
 587
 588        reg = ethoc_read(priv, MAC_ADDR1);
 589        mac[0] = (reg >>  8) & 0xff;
 590        mac[1] = (reg >>  0) & 0xff;
 591
 592        return 0;
 593}
 594
 595static int ethoc_poll(struct napi_struct *napi, int budget)
 596{
 597        struct ethoc *priv = container_of(napi, struct ethoc, napi);
 598        int rx_work_done = 0;
 599        int tx_work_done = 0;
 600
 601        rx_work_done = ethoc_rx(priv->netdev, budget);
 602        tx_work_done = ethoc_tx(priv->netdev, budget);
 603
 604        if (rx_work_done < budget && tx_work_done < budget) {
 605                napi_complete(napi);
 606                ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
 607        }
 608
 609        return rx_work_done;
 610}
 611
 612static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
 613{
 614        struct ethoc *priv = bus->priv;
 615        int i;
 616
 617        ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
 618        ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
 619
 620        for (i=0; i < 5; i++) {
 621                u32 status = ethoc_read(priv, MIISTATUS);
 622                if (!(status & MIISTATUS_BUSY)) {
 623                        u32 data = ethoc_read(priv, MIIRX_DATA);
 624                        /* reset MII command register */
 625                        ethoc_write(priv, MIICOMMAND, 0);
 626                        return data;
 627                }
 628                usleep_range(100,200);
 629        }
 630
 631        return -EBUSY;
 632}
 633
 634static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 635{
 636        struct ethoc *priv = bus->priv;
 637        int i;
 638
 639        ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
 640        ethoc_write(priv, MIITX_DATA, val);
 641        ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
 642
 643        for (i=0; i < 5; i++) {
 644                u32 stat = ethoc_read(priv, MIISTATUS);
 645                if (!(stat & MIISTATUS_BUSY)) {
 646                        /* reset MII command register */
 647                        ethoc_write(priv, MIICOMMAND, 0);
 648                        return 0;
 649                }
 650                usleep_range(100,200);
 651        }
 652
 653        return -EBUSY;
 654}
 655
 656static int ethoc_mdio_reset(struct mii_bus *bus)
 657{
 658        return 0;
 659}
 660
 661static void ethoc_mdio_poll(struct net_device *dev)
 662{
 663}
 664
 665static int __devinit ethoc_mdio_probe(struct net_device *dev)
 666{
 667        struct ethoc *priv = netdev_priv(dev);
 668        struct phy_device *phy;
 669        int err;
 670
 671        if (priv->phy_id != -1) {
 672                phy = priv->mdio->phy_map[priv->phy_id];
 673        } else {
 674                phy = phy_find_first(priv->mdio);
 675        }
 676
 677        if (!phy) {
 678                dev_err(&dev->dev, "no PHY found\n");
 679                return -ENXIO;
 680        }
 681
 682        err = phy_connect_direct(dev, phy, ethoc_mdio_poll, 0,
 683                        PHY_INTERFACE_MODE_GMII);
 684        if (err) {
 685                dev_err(&dev->dev, "could not attach to PHY\n");
 686                return err;
 687        }
 688
 689        priv->phy = phy;
 690        return 0;
 691}
 692
 693static int ethoc_open(struct net_device *dev)
 694{
 695        struct ethoc *priv = netdev_priv(dev);
 696        int ret;
 697
 698        ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
 699                        dev->name, dev);
 700        if (ret)
 701                return ret;
 702
 703        ethoc_init_ring(priv, dev->mem_start);
 704        ethoc_reset(priv);
 705
 706        if (netif_queue_stopped(dev)) {
 707                dev_dbg(&dev->dev, " resuming queue\n");
 708                netif_wake_queue(dev);
 709        } else {
 710                dev_dbg(&dev->dev, " starting queue\n");
 711                netif_start_queue(dev);
 712        }
 713
 714        phy_start(priv->phy);
 715        napi_enable(&priv->napi);
 716
 717        if (netif_msg_ifup(priv)) {
 718                dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
 719                                dev->base_addr, dev->mem_start, dev->mem_end);
 720        }
 721
 722        return 0;
 723}
 724
 725static int ethoc_stop(struct net_device *dev)
 726{
 727        struct ethoc *priv = netdev_priv(dev);
 728
 729        napi_disable(&priv->napi);
 730
 731        if (priv->phy)
 732                phy_stop(priv->phy);
 733
 734        ethoc_disable_rx_and_tx(priv);
 735        free_irq(dev->irq, dev);
 736
 737        if (!netif_queue_stopped(dev))
 738                netif_stop_queue(dev);
 739
 740        return 0;
 741}
 742
 743static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 744{
 745        struct ethoc *priv = netdev_priv(dev);
 746        struct mii_ioctl_data *mdio = if_mii(ifr);
 747        struct phy_device *phy = NULL;
 748
 749        if (!netif_running(dev))
 750                return -EINVAL;
 751
 752        if (cmd != SIOCGMIIPHY) {
 753                if (mdio->phy_id >= PHY_MAX_ADDR)
 754                        return -ERANGE;
 755
 756                phy = priv->mdio->phy_map[mdio->phy_id];
 757                if (!phy)
 758                        return -ENODEV;
 759        } else {
 760                phy = priv->phy;
 761        }
 762
 763        return phy_mii_ioctl(phy, ifr, cmd);
 764}
 765
 766static int ethoc_config(struct net_device *dev, struct ifmap *map)
 767{
 768        return -ENOSYS;
 769}
 770
 771static int ethoc_set_mac_address(struct net_device *dev, void *addr)
 772{
 773        struct ethoc *priv = netdev_priv(dev);
 774        u8 *mac = (u8 *)addr;
 775
 776        ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
 777                                     (mac[4] <<  8) | (mac[5] <<  0));
 778        ethoc_write(priv, MAC_ADDR1, (mac[0] <<  8) | (mac[1] <<  0));
 779
 780        return 0;
 781}
 782
 783static void ethoc_set_multicast_list(struct net_device *dev)
 784{
 785        struct ethoc *priv = netdev_priv(dev);
 786        u32 mode = ethoc_read(priv, MODER);
 787        struct netdev_hw_addr *ha;
 788        u32 hash[2] = { 0, 0 };
 789
 790        /* set loopback mode if requested */
 791        if (dev->flags & IFF_LOOPBACK)
 792                mode |=  MODER_LOOP;
 793        else
 794                mode &= ~MODER_LOOP;
 795
 796        /* receive broadcast frames if requested */
 797        if (dev->flags & IFF_BROADCAST)
 798                mode &= ~MODER_BRO;
 799        else
 800                mode |=  MODER_BRO;
 801
 802        /* enable promiscuous mode if requested */
 803        if (dev->flags & IFF_PROMISC)
 804                mode |=  MODER_PRO;
 805        else
 806                mode &= ~MODER_PRO;
 807
 808        ethoc_write(priv, MODER, mode);
 809
 810        /* receive multicast frames */
 811        if (dev->flags & IFF_ALLMULTI) {
 812                hash[0] = 0xffffffff;
 813                hash[1] = 0xffffffff;
 814        } else {
 815                netdev_for_each_mc_addr(ha, dev) {
 816                        u32 crc = ether_crc(ETH_ALEN, ha->addr);
 817                        int bit = (crc >> 26) & 0x3f;
 818                        hash[bit >> 5] |= 1 << (bit & 0x1f);
 819                }
 820        }
 821
 822        ethoc_write(priv, ETH_HASH0, hash[0]);
 823        ethoc_write(priv, ETH_HASH1, hash[1]);
 824}
 825
 826static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
 827{
 828        return -ENOSYS;
 829}
 830
 831static void ethoc_tx_timeout(struct net_device *dev)
 832{
 833        struct ethoc *priv = netdev_priv(dev);
 834        u32 pending = ethoc_read(priv, INT_SOURCE);
 835        if (likely(pending))
 836                ethoc_interrupt(dev->irq, dev);
 837}
 838
 839static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
 840{
 841        struct ethoc *priv = netdev_priv(dev);
 842        struct ethoc_bd bd;
 843        unsigned int entry;
 844        void *dest;
 845
 846        if (unlikely(skb->len > ETHOC_BUFSIZ)) {
 847                dev->stats.tx_errors++;
 848                goto out;
 849        }
 850
 851        entry = priv->cur_tx % priv->num_tx;
 852        spin_lock_irq(&priv->lock);
 853        priv->cur_tx++;
 854
 855        ethoc_read_bd(priv, entry, &bd);
 856        if (unlikely(skb->len < ETHOC_ZLEN))
 857                bd.stat |=  TX_BD_PAD;
 858        else
 859                bd.stat &= ~TX_BD_PAD;
 860
 861        dest = priv->vma[entry];
 862        memcpy_toio(dest, skb->data, skb->len);
 863
 864        bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
 865        bd.stat |= TX_BD_LEN(skb->len);
 866        ethoc_write_bd(priv, entry, &bd);
 867
 868        bd.stat |= TX_BD_READY;
 869        ethoc_write_bd(priv, entry, &bd);
 870
 871        if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
 872                dev_dbg(&dev->dev, "stopping queue\n");
 873                netif_stop_queue(dev);
 874        }
 875
 876        spin_unlock_irq(&priv->lock);
 877out:
 878        dev_kfree_skb(skb);
 879        return NETDEV_TX_OK;
 880}
 881
 882static const struct net_device_ops ethoc_netdev_ops = {
 883        .ndo_open = ethoc_open,
 884        .ndo_stop = ethoc_stop,
 885        .ndo_do_ioctl = ethoc_ioctl,
 886        .ndo_set_config = ethoc_config,
 887        .ndo_set_mac_address = ethoc_set_mac_address,
 888        .ndo_set_multicast_list = ethoc_set_multicast_list,
 889        .ndo_change_mtu = ethoc_change_mtu,
 890        .ndo_tx_timeout = ethoc_tx_timeout,
 891        .ndo_start_xmit = ethoc_start_xmit,
 892};
 893
 894/**
 895 * ethoc_probe() - initialize OpenCores ethernet MAC
 896 * pdev:        platform device
 897 */
 898static int __devinit ethoc_probe(struct platform_device *pdev)
 899{
 900        struct net_device *netdev = NULL;
 901        struct resource *res = NULL;
 902        struct resource *mmio = NULL;
 903        struct resource *mem = NULL;
 904        struct ethoc *priv = NULL;
 905        unsigned int phy;
 906        int num_bd;
 907        int ret = 0;
 908
 909        /* allocate networking device */
 910        netdev = alloc_etherdev(sizeof(struct ethoc));
 911        if (!netdev) {
 912                dev_err(&pdev->dev, "cannot allocate network device\n");
 913                ret = -ENOMEM;
 914                goto out;
 915        }
 916
 917        SET_NETDEV_DEV(netdev, &pdev->dev);
 918        platform_set_drvdata(pdev, netdev);
 919
 920        /* obtain I/O memory space */
 921        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 922        if (!res) {
 923                dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
 924                ret = -ENXIO;
 925                goto free;
 926        }
 927
 928        mmio = devm_request_mem_region(&pdev->dev, res->start,
 929                        resource_size(res), res->name);
 930        if (!mmio) {
 931                dev_err(&pdev->dev, "cannot request I/O memory space\n");
 932                ret = -ENXIO;
 933                goto free;
 934        }
 935
 936        netdev->base_addr = mmio->start;
 937
 938        /* obtain buffer memory space */
 939        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 940        if (res) {
 941                mem = devm_request_mem_region(&pdev->dev, res->start,
 942                        resource_size(res), res->name);
 943                if (!mem) {
 944                        dev_err(&pdev->dev, "cannot request memory space\n");
 945                        ret = -ENXIO;
 946                        goto free;
 947                }
 948
 949                netdev->mem_start = mem->start;
 950                netdev->mem_end   = mem->end;
 951        }
 952
 953
 954        /* obtain device IRQ number */
 955        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 956        if (!res) {
 957                dev_err(&pdev->dev, "cannot obtain IRQ\n");
 958                ret = -ENXIO;
 959                goto free;
 960        }
 961
 962        netdev->irq = res->start;
 963
 964        /* setup driver-private data */
 965        priv = netdev_priv(netdev);
 966        priv->netdev = netdev;
 967        priv->dma_alloc = 0;
 968        priv->io_region_size = mmio->end - mmio->start + 1;
 969
 970        priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
 971                        resource_size(mmio));
 972        if (!priv->iobase) {
 973                dev_err(&pdev->dev, "cannot remap I/O memory space\n");
 974                ret = -ENXIO;
 975                goto error;
 976        }
 977
 978        if (netdev->mem_end) {
 979                priv->membase = devm_ioremap_nocache(&pdev->dev,
 980                        netdev->mem_start, resource_size(mem));
 981                if (!priv->membase) {
 982                        dev_err(&pdev->dev, "cannot remap memory space\n");
 983                        ret = -ENXIO;
 984                        goto error;
 985                }
 986        } else {
 987                /* Allocate buffer memory */
 988                priv->membase = dmam_alloc_coherent(&pdev->dev,
 989                        buffer_size, (void *)&netdev->mem_start,
 990                        GFP_KERNEL);
 991                if (!priv->membase) {
 992                        dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
 993                                buffer_size);
 994                        ret = -ENOMEM;
 995                        goto error;
 996                }
 997                netdev->mem_end = netdev->mem_start + buffer_size;
 998                priv->dma_alloc = buffer_size;
 999        }
1000
1001        /* calculate the number of TX/RX buffers, maximum 128 supported */
1002        num_bd = min_t(unsigned int,
1003                128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
1004        if (num_bd < 4) {
1005                ret = -ENODEV;
1006                goto error;
1007        }
1008        /* num_tx must be a power of two */
1009        priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
1010        priv->num_rx = num_bd - priv->num_tx;
1011
1012        dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1013                priv->num_tx, priv->num_rx);
1014
1015        priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
1016        if (!priv->vma) {
1017                ret = -ENOMEM;
1018                goto error;
1019        }
1020
1021        /* Allow the platform setup code to pass in a MAC address. */
1022        if (pdev->dev.platform_data) {
1023                struct ethoc_platform_data *pdata = pdev->dev.platform_data;
1024                memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
1025                priv->phy_id = pdata->phy_id;
1026        } else {
1027                priv->phy_id = -1;
1028
1029#ifdef CONFIG_OF
1030                {
1031                const uint8_t* mac;
1032
1033                mac = of_get_property(pdev->dev.of_node,
1034                                      "local-mac-address",
1035                                      NULL);
1036                if (mac)
1037                        memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1038                }
1039#endif
1040        }
1041
1042        /* Check that the given MAC address is valid. If it isn't, read the
1043         * current MAC from the controller. */
1044        if (!is_valid_ether_addr(netdev->dev_addr))
1045                ethoc_get_mac_address(netdev, netdev->dev_addr);
1046
1047        /* Check the MAC again for validity, if it still isn't choose and
1048         * program a random one. */
1049        if (!is_valid_ether_addr(netdev->dev_addr))
1050                random_ether_addr(netdev->dev_addr);
1051
1052        ethoc_set_mac_address(netdev, netdev->dev_addr);
1053
1054        /* register MII bus */
1055        priv->mdio = mdiobus_alloc();
1056        if (!priv->mdio) {
1057                ret = -ENOMEM;
1058                goto free;
1059        }
1060
1061        priv->mdio->name = "ethoc-mdio";
1062        snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
1063                        priv->mdio->name, pdev->id);
1064        priv->mdio->read = ethoc_mdio_read;
1065        priv->mdio->write = ethoc_mdio_write;
1066        priv->mdio->reset = ethoc_mdio_reset;
1067        priv->mdio->priv = priv;
1068
1069        priv->mdio->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1070        if (!priv->mdio->irq) {
1071                ret = -ENOMEM;
1072                goto free_mdio;
1073        }
1074
1075        for (phy = 0; phy < PHY_MAX_ADDR; phy++)
1076                priv->mdio->irq[phy] = PHY_POLL;
1077
1078        ret = mdiobus_register(priv->mdio);
1079        if (ret) {
1080                dev_err(&netdev->dev, "failed to register MDIO bus\n");
1081                goto free_mdio;
1082        }
1083
1084        ret = ethoc_mdio_probe(netdev);
1085        if (ret) {
1086                dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1087                goto error;
1088        }
1089
1090        ether_setup(netdev);
1091
1092        /* setup the net_device structure */
1093        netdev->netdev_ops = &ethoc_netdev_ops;
1094        netdev->watchdog_timeo = ETHOC_TIMEOUT;
1095        netdev->features |= 0;
1096
1097        /* setup NAPI */
1098        netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1099
1100        spin_lock_init(&priv->lock);
1101
1102        ret = register_netdev(netdev);
1103        if (ret < 0) {
1104                dev_err(&netdev->dev, "failed to register interface\n");
1105                goto error2;
1106        }
1107
1108        goto out;
1109
1110error2:
1111        netif_napi_del(&priv->napi);
1112error:
1113        mdiobus_unregister(priv->mdio);
1114free_mdio:
1115        kfree(priv->mdio->irq);
1116        mdiobus_free(priv->mdio);
1117free:
1118        free_netdev(netdev);
1119out:
1120        return ret;
1121}
1122
1123/**
1124 * ethoc_remove() - shutdown OpenCores ethernet MAC
1125 * @pdev:       platform device
1126 */
1127static int __devexit ethoc_remove(struct platform_device *pdev)
1128{
1129        struct net_device *netdev = platform_get_drvdata(pdev);
1130        struct ethoc *priv = netdev_priv(netdev);
1131
1132        platform_set_drvdata(pdev, NULL);
1133
1134        if (netdev) {
1135                netif_napi_del(&priv->napi);
1136                phy_disconnect(priv->phy);
1137                priv->phy = NULL;
1138
1139                if (priv->mdio) {
1140                        mdiobus_unregister(priv->mdio);
1141                        kfree(priv->mdio->irq);
1142                        mdiobus_free(priv->mdio);
1143                }
1144                unregister_netdev(netdev);
1145                free_netdev(netdev);
1146        }
1147
1148        return 0;
1149}
1150
1151#ifdef CONFIG_PM
1152static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1153{
1154        return -ENOSYS;
1155}
1156
1157static int ethoc_resume(struct platform_device *pdev)
1158{
1159        return -ENOSYS;
1160}
1161#else
1162# define ethoc_suspend NULL
1163# define ethoc_resume  NULL
1164#endif
1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = {
1168        {
1169                .compatible = "opencores,ethoc",
1170        },
1171        {},
1172};
1173MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175
1176static struct platform_driver ethoc_driver = {
1177        .probe   = ethoc_probe,
1178        .remove  = __devexit_p(ethoc_remove),
1179        .suspend = ethoc_suspend,
1180        .resume  = ethoc_resume,
1181        .driver  = {
1182                .name = "ethoc",
1183                .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185                .of_match_table = ethoc_match,
1186#endif
1187        },
1188};
1189
1190static int __init ethoc_init(void)
1191{
1192        return platform_driver_register(&ethoc_driver);
1193}
1194
1195static void __exit ethoc_exit(void)
1196{
1197        platform_driver_unregister(&ethoc_driver);
1198}
1199
1200module_init(ethoc_init);
1201module_exit(ethoc_exit);
1202
1203MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1204MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1205MODULE_LICENSE("GPL v2");
1206
1207