linux/drivers/net/ethernet/ethoc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * linux/drivers/net/ethernet/ethoc.c
   4 *
   5 * Copyright (C) 2007-2008 Avionic Design Development GmbH
   6 * Copyright (C) 2008-2009 Avionic Design GmbH
   7 *
   8 * Written by Thierry Reding <thierry.reding@avionic-design.de>
   9 */
  10
  11#include <linux/dma-mapping.h>
  12#include <linux/etherdevice.h>
  13#include <linux/clk.h>
  14#include <linux/crc32.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/mii.h>
  18#include <linux/phy.h>
  19#include <linux/platform_device.h>
  20#include <linux/sched.h>
  21#include <linux/slab.h>
  22#include <linux/of.h>
  23#include <linux/of_net.h>
  24#include <linux/module.h>
  25#include <net/ethoc.h>
  26
  27static int buffer_size = 0x8000; /* 32 KBytes */
  28module_param(buffer_size, int, 0);
  29MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
  30
  31/* register offsets */
  32#define MODER           0x00
  33#define INT_SOURCE      0x04
  34#define INT_MASK        0x08
  35#define IPGT            0x0c
  36#define IPGR1           0x10
  37#define IPGR2           0x14
  38#define PACKETLEN       0x18
  39#define COLLCONF        0x1c
  40#define TX_BD_NUM       0x20
  41#define CTRLMODER       0x24
  42#define MIIMODER        0x28
  43#define MIICOMMAND      0x2c
  44#define MIIADDRESS      0x30
  45#define MIITX_DATA      0x34
  46#define MIIRX_DATA      0x38
  47#define MIISTATUS       0x3c
  48#define MAC_ADDR0       0x40
  49#define MAC_ADDR1       0x44
  50#define ETH_HASH0       0x48
  51#define ETH_HASH1       0x4c
  52#define ETH_TXCTRL      0x50
  53#define ETH_END         0x54
  54
  55/* mode register */
  56#define MODER_RXEN      (1 <<  0) /* receive enable */
  57#define MODER_TXEN      (1 <<  1) /* transmit enable */
  58#define MODER_NOPRE     (1 <<  2) /* no preamble */
  59#define MODER_BRO       (1 <<  3) /* broadcast address */
  60#define MODER_IAM       (1 <<  4) /* individual address mode */
  61#define MODER_PRO       (1 <<  5) /* promiscuous mode */
  62#define MODER_IFG       (1 <<  6) /* interframe gap for incoming frames */
  63#define MODER_LOOP      (1 <<  7) /* loopback */
  64#define MODER_NBO       (1 <<  8) /* no back-off */
  65#define MODER_EDE       (1 <<  9) /* excess defer enable */
  66#define MODER_FULLD     (1 << 10) /* full duplex */
  67#define MODER_RESET     (1 << 11) /* FIXME: reset (undocumented) */
  68#define MODER_DCRC      (1 << 12) /* delayed CRC enable */
  69#define MODER_CRC       (1 << 13) /* CRC enable */
  70#define MODER_HUGE      (1 << 14) /* huge packets enable */
  71#define MODER_PAD       (1 << 15) /* padding enabled */
  72#define MODER_RSM       (1 << 16) /* receive small packets */
  73
  74/* interrupt source and mask registers */
  75#define INT_MASK_TXF    (1 << 0) /* transmit frame */
  76#define INT_MASK_TXE    (1 << 1) /* transmit error */
  77#define INT_MASK_RXF    (1 << 2) /* receive frame */
  78#define INT_MASK_RXE    (1 << 3) /* receive error */
  79#define INT_MASK_BUSY   (1 << 4)
  80#define INT_MASK_TXC    (1 << 5) /* transmit control frame */
  81#define INT_MASK_RXC    (1 << 6) /* receive control frame */
  82
  83#define INT_MASK_TX     (INT_MASK_TXF | INT_MASK_TXE)
  84#define INT_MASK_RX     (INT_MASK_RXF | INT_MASK_RXE)
  85
  86#define INT_MASK_ALL ( \
  87                INT_MASK_TXF | INT_MASK_TXE | \
  88                INT_MASK_RXF | INT_MASK_RXE | \
  89                INT_MASK_TXC | INT_MASK_RXC | \
  90                INT_MASK_BUSY \
  91        )
  92
  93/* packet length register */
  94#define PACKETLEN_MIN(min)              (((min) & 0xffff) << 16)
  95#define PACKETLEN_MAX(max)              (((max) & 0xffff) <<  0)
  96#define PACKETLEN_MIN_MAX(min, max)     (PACKETLEN_MIN(min) | \
  97                                        PACKETLEN_MAX(max))
  98
  99/* transmit buffer number register */
 100#define TX_BD_NUM_VAL(x)        (((x) <= 0x80) ? (x) : 0x80)
 101
 102/* control module mode register */
 103#define CTRLMODER_PASSALL       (1 << 0) /* pass all receive frames */
 104#define CTRLMODER_RXFLOW        (1 << 1) /* receive control flow */
 105#define CTRLMODER_TXFLOW        (1 << 2) /* transmit control flow */
 106
 107/* MII mode register */
 108#define MIIMODER_CLKDIV(x)      ((x) & 0xfe) /* needs to be an even number */
 109#define MIIMODER_NOPRE          (1 << 8) /* no preamble */
 110
 111/* MII command register */
 112#define MIICOMMAND_SCAN         (1 << 0) /* scan status */
 113#define MIICOMMAND_READ         (1 << 1) /* read status */
 114#define MIICOMMAND_WRITE        (1 << 2) /* write control data */
 115
 116/* MII address register */
 117#define MIIADDRESS_FIAD(x)              (((x) & 0x1f) << 0)
 118#define MIIADDRESS_RGAD(x)              (((x) & 0x1f) << 8)
 119#define MIIADDRESS_ADDR(phy, reg)       (MIIADDRESS_FIAD(phy) | \
 120                                        MIIADDRESS_RGAD(reg))
 121
 122/* MII transmit data register */
 123#define MIITX_DATA_VAL(x)       ((x) & 0xffff)
 124
 125/* MII receive data register */
 126#define MIIRX_DATA_VAL(x)       ((x) & 0xffff)
 127
 128/* MII status register */
 129#define MIISTATUS_LINKFAIL      (1 << 0)
 130#define MIISTATUS_BUSY          (1 << 1)
 131#define MIISTATUS_INVALID       (1 << 2)
 132
 133/* TX buffer descriptor */
 134#define TX_BD_CS                (1 <<  0) /* carrier sense lost */
 135#define TX_BD_DF                (1 <<  1) /* defer indication */
 136#define TX_BD_LC                (1 <<  2) /* late collision */
 137#define TX_BD_RL                (1 <<  3) /* retransmission limit */
 138#define TX_BD_RETRY_MASK        (0x00f0)
 139#define TX_BD_RETRY(x)          (((x) & 0x00f0) >>  4)
 140#define TX_BD_UR                (1 <<  8) /* transmitter underrun */
 141#define TX_BD_CRC               (1 << 11) /* TX CRC enable */
 142#define TX_BD_PAD               (1 << 12) /* pad enable for short packets */
 143#define TX_BD_WRAP              (1 << 13)
 144#define TX_BD_IRQ               (1 << 14) /* interrupt request enable */
 145#define TX_BD_READY             (1 << 15) /* TX buffer ready */
 146#define TX_BD_LEN(x)            (((x) & 0xffff) << 16)
 147#define TX_BD_LEN_MASK          (0xffff << 16)
 148
 149#define TX_BD_STATS             (TX_BD_CS | TX_BD_DF | TX_BD_LC | \
 150                                TX_BD_RL | TX_BD_RETRY_MASK | TX_BD_UR)
 151
 152/* RX buffer descriptor */
 153#define RX_BD_LC        (1 <<  0) /* late collision */
 154#define RX_BD_CRC       (1 <<  1) /* RX CRC error */
 155#define RX_BD_SF        (1 <<  2) /* short frame */
 156#define RX_BD_TL        (1 <<  3) /* too long */
 157#define RX_BD_DN        (1 <<  4) /* dribble nibble */
 158#define RX_BD_IS        (1 <<  5) /* invalid symbol */
 159#define RX_BD_OR        (1 <<  6) /* receiver overrun */
 160#define RX_BD_MISS      (1 <<  7)
 161#define RX_BD_CF        (1 <<  8) /* control frame */
 162#define RX_BD_WRAP      (1 << 13)
 163#define RX_BD_IRQ       (1 << 14) /* interrupt request enable */
 164#define RX_BD_EMPTY     (1 << 15)
 165#define RX_BD_LEN(x)    (((x) & 0xffff) << 16)
 166
 167#define RX_BD_STATS     (RX_BD_LC | RX_BD_CRC | RX_BD_SF | RX_BD_TL | \
 168                        RX_BD_DN | RX_BD_IS | RX_BD_OR | RX_BD_MISS)
 169
 170#define ETHOC_BUFSIZ            1536
 171#define ETHOC_ZLEN              64
 172#define ETHOC_BD_BASE           0x400
 173#define ETHOC_TIMEOUT           (HZ / 2)
 174#define ETHOC_MII_TIMEOUT       (1 + (HZ / 5))
 175
 176/**
 177 * struct ethoc - driver-private device structure
 178 * @iobase:     pointer to I/O memory region
 179 * @membase:    pointer to buffer memory region
 180 * @num_bd:     number of buffer descriptors
 181 * @num_tx:     number of send buffers
 182 * @cur_tx:     last send buffer written
 183 * @dty_tx:     last buffer actually sent
 184 * @num_rx:     number of receive buffers
 185 * @cur_rx:     current receive buffer
 186 * @vma:        pointer to array of virtual memory addresses for buffers
 187 * @netdev:     pointer to network device structure
 188 * @napi:       NAPI structure
 189 * @msg_enable: device state flags
 190 * @lock:       device lock
 191 * @mdio:       MDIO bus for PHY access
 192 * @phy_id:     address of attached PHY
 193 */
 194struct ethoc {
 195        void __iomem *iobase;
 196        void __iomem *membase;
 197        bool big_endian;
 198
 199        unsigned int num_bd;
 200        unsigned int num_tx;
 201        unsigned int cur_tx;
 202        unsigned int dty_tx;
 203
 204        unsigned int num_rx;
 205        unsigned int cur_rx;
 206
 207        void **vma;
 208
 209        struct net_device *netdev;
 210        struct napi_struct napi;
 211        u32 msg_enable;
 212
 213        spinlock_t lock;
 214
 215        struct mii_bus *mdio;
 216        struct clk *clk;
 217        s8 phy_id;
 218
 219        int old_link;
 220        int old_duplex;
 221};
 222
 223/**
 224 * struct ethoc_bd - buffer descriptor
 225 * @stat:       buffer statistics
 226 * @addr:       physical memory address
 227 */
 228struct ethoc_bd {
 229        u32 stat;
 230        u32 addr;
 231};
 232
 233static inline u32 ethoc_read(struct ethoc *dev, loff_t offset)
 234{
 235        if (dev->big_endian)
 236                return ioread32be(dev->iobase + offset);
 237        else
 238                return ioread32(dev->iobase + offset);
 239}
 240
 241static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data)
 242{
 243        if (dev->big_endian)
 244                iowrite32be(data, dev->iobase + offset);
 245        else
 246                iowrite32(data, dev->iobase + offset);
 247}
 248
 249static inline void ethoc_read_bd(struct ethoc *dev, int index,
 250                struct ethoc_bd *bd)
 251{
 252        loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
 253        bd->stat = ethoc_read(dev, offset + 0);
 254        bd->addr = ethoc_read(dev, offset + 4);
 255}
 256
 257static inline void ethoc_write_bd(struct ethoc *dev, int index,
 258                const struct ethoc_bd *bd)
 259{
 260        loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd));
 261        ethoc_write(dev, offset + 0, bd->stat);
 262        ethoc_write(dev, offset + 4, bd->addr);
 263}
 264
 265static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask)
 266{
 267        u32 imask = ethoc_read(dev, INT_MASK);
 268        imask |= mask;
 269        ethoc_write(dev, INT_MASK, imask);
 270}
 271
 272static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask)
 273{
 274        u32 imask = ethoc_read(dev, INT_MASK);
 275        imask &= ~mask;
 276        ethoc_write(dev, INT_MASK, imask);
 277}
 278
 279static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask)
 280{
 281        ethoc_write(dev, INT_SOURCE, mask);
 282}
 283
 284static inline void ethoc_enable_rx_and_tx(struct ethoc *dev)
 285{
 286        u32 mode = ethoc_read(dev, MODER);
 287        mode |= MODER_RXEN | MODER_TXEN;
 288        ethoc_write(dev, MODER, mode);
 289}
 290
 291static inline void ethoc_disable_rx_and_tx(struct ethoc *dev)
 292{
 293        u32 mode = ethoc_read(dev, MODER);
 294        mode &= ~(MODER_RXEN | MODER_TXEN);
 295        ethoc_write(dev, MODER, mode);
 296}
 297
 298static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
 299{
 300        struct ethoc_bd bd;
 301        int i;
 302        void *vma;
 303
 304        dev->cur_tx = 0;
 305        dev->dty_tx = 0;
 306        dev->cur_rx = 0;
 307
 308        ethoc_write(dev, TX_BD_NUM, dev->num_tx);
 309
 310        /* setup transmission buffers */
 311        bd.addr = mem_start;
 312        bd.stat = TX_BD_IRQ | TX_BD_CRC;
 313        vma = dev->membase;
 314
 315        for (i = 0; i < dev->num_tx; i++) {
 316                if (i == dev->num_tx - 1)
 317                        bd.stat |= TX_BD_WRAP;
 318
 319                ethoc_write_bd(dev, i, &bd);
 320                bd.addr += ETHOC_BUFSIZ;
 321
 322                dev->vma[i] = vma;
 323                vma += ETHOC_BUFSIZ;
 324        }
 325
 326        bd.stat = RX_BD_EMPTY | RX_BD_IRQ;
 327
 328        for (i = 0; i < dev->num_rx; i++) {
 329                if (i == dev->num_rx - 1)
 330                        bd.stat |= RX_BD_WRAP;
 331
 332                ethoc_write_bd(dev, dev->num_tx + i, &bd);
 333                bd.addr += ETHOC_BUFSIZ;
 334
 335                dev->vma[dev->num_tx + i] = vma;
 336                vma += ETHOC_BUFSIZ;
 337        }
 338
 339        return 0;
 340}
 341
 342static int ethoc_reset(struct ethoc *dev)
 343{
 344        u32 mode;
 345
 346        /* TODO: reset controller? */
 347
 348        ethoc_disable_rx_and_tx(dev);
 349
 350        /* TODO: setup registers */
 351
 352        /* enable FCS generation and automatic padding */
 353        mode = ethoc_read(dev, MODER);
 354        mode |= MODER_CRC | MODER_PAD;
 355        ethoc_write(dev, MODER, mode);
 356
 357        /* set full-duplex mode */
 358        mode = ethoc_read(dev, MODER);
 359        mode |= MODER_FULLD;
 360        ethoc_write(dev, MODER, mode);
 361        ethoc_write(dev, IPGT, 0x15);
 362
 363        ethoc_ack_irq(dev, INT_MASK_ALL);
 364        ethoc_enable_irq(dev, INT_MASK_ALL);
 365        ethoc_enable_rx_and_tx(dev);
 366        return 0;
 367}
 368
 369static unsigned int ethoc_update_rx_stats(struct ethoc *dev,
 370                struct ethoc_bd *bd)
 371{
 372        struct net_device *netdev = dev->netdev;
 373        unsigned int ret = 0;
 374
 375        if (bd->stat & RX_BD_TL) {
 376                dev_err(&netdev->dev, "RX: frame too long\n");
 377                netdev->stats.rx_length_errors++;
 378                ret++;
 379        }
 380
 381        if (bd->stat & RX_BD_SF) {
 382                dev_err(&netdev->dev, "RX: frame too short\n");
 383                netdev->stats.rx_length_errors++;
 384                ret++;
 385        }
 386
 387        if (bd->stat & RX_BD_DN) {
 388                dev_err(&netdev->dev, "RX: dribble nibble\n");
 389                netdev->stats.rx_frame_errors++;
 390        }
 391
 392        if (bd->stat & RX_BD_CRC) {
 393                dev_err(&netdev->dev, "RX: wrong CRC\n");
 394                netdev->stats.rx_crc_errors++;
 395                ret++;
 396        }
 397
 398        if (bd->stat & RX_BD_OR) {
 399                dev_err(&netdev->dev, "RX: overrun\n");
 400                netdev->stats.rx_over_errors++;
 401                ret++;
 402        }
 403
 404        if (bd->stat & RX_BD_MISS)
 405                netdev->stats.rx_missed_errors++;
 406
 407        if (bd->stat & RX_BD_LC) {
 408                dev_err(&netdev->dev, "RX: late collision\n");
 409                netdev->stats.collisions++;
 410                ret++;
 411        }
 412
 413        return ret;
 414}
 415
 416static int ethoc_rx(struct net_device *dev, int limit)
 417{
 418        struct ethoc *priv = netdev_priv(dev);
 419        int count;
 420
 421        for (count = 0; count < limit; ++count) {
 422                unsigned int entry;
 423                struct ethoc_bd bd;
 424
 425                entry = priv->num_tx + priv->cur_rx;
 426                ethoc_read_bd(priv, entry, &bd);
 427                if (bd.stat & RX_BD_EMPTY) {
 428                        ethoc_ack_irq(priv, INT_MASK_RX);
 429                        /* If packet (interrupt) came in between checking
 430                         * BD_EMTPY and clearing the interrupt source, then we
 431                         * risk missing the packet as the RX interrupt won't
 432                         * trigger right away when we reenable it; hence, check
 433                         * BD_EMTPY here again to make sure there isn't such a
 434                         * packet waiting for us...
 435                         */
 436                        ethoc_read_bd(priv, entry, &bd);
 437                        if (bd.stat & RX_BD_EMPTY)
 438                                break;
 439                }
 440
 441                if (ethoc_update_rx_stats(priv, &bd) == 0) {
 442                        int size = bd.stat >> 16;
 443                        struct sk_buff *skb;
 444
 445                        size -= 4; /* strip the CRC */
 446                        skb = netdev_alloc_skb_ip_align(dev, size);
 447
 448                        if (likely(skb)) {
 449                                void *src = priv->vma[entry];
 450                                memcpy_fromio(skb_put(skb, size), src, size);
 451                                skb->protocol = eth_type_trans(skb, dev);
 452                                dev->stats.rx_packets++;
 453                                dev->stats.rx_bytes += size;
 454                                netif_receive_skb(skb);
 455                        } else {
 456                                if (net_ratelimit())
 457                                        dev_warn(&dev->dev,
 458                                            "low on memory - packet dropped\n");
 459
 460                                dev->stats.rx_dropped++;
 461                                break;
 462                        }
 463                }
 464
 465                /* clear the buffer descriptor so it can be reused */
 466                bd.stat &= ~RX_BD_STATS;
 467                bd.stat |=  RX_BD_EMPTY;
 468                ethoc_write_bd(priv, entry, &bd);
 469                if (++priv->cur_rx == priv->num_rx)
 470                        priv->cur_rx = 0;
 471        }
 472
 473        return count;
 474}
 475
 476static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
 477{
 478        struct net_device *netdev = dev->netdev;
 479
 480        if (bd->stat & TX_BD_LC) {
 481                dev_err(&netdev->dev, "TX: late collision\n");
 482                netdev->stats.tx_window_errors++;
 483        }
 484
 485        if (bd->stat & TX_BD_RL) {
 486                dev_err(&netdev->dev, "TX: retransmit limit\n");
 487                netdev->stats.tx_aborted_errors++;
 488        }
 489
 490        if (bd->stat & TX_BD_UR) {
 491                dev_err(&netdev->dev, "TX: underrun\n");
 492                netdev->stats.tx_fifo_errors++;
 493        }
 494
 495        if (bd->stat & TX_BD_CS) {
 496                dev_err(&netdev->dev, "TX: carrier sense lost\n");
 497                netdev->stats.tx_carrier_errors++;
 498        }
 499
 500        if (bd->stat & TX_BD_STATS)
 501                netdev->stats.tx_errors++;
 502
 503        netdev->stats.collisions += (bd->stat >> 4) & 0xf;
 504        netdev->stats.tx_bytes += bd->stat >> 16;
 505        netdev->stats.tx_packets++;
 506}
 507
 508static int ethoc_tx(struct net_device *dev, int limit)
 509{
 510        struct ethoc *priv = netdev_priv(dev);
 511        int count;
 512        struct ethoc_bd bd;
 513
 514        for (count = 0; count < limit; ++count) {
 515                unsigned int entry;
 516
 517                entry = priv->dty_tx & (priv->num_tx-1);
 518
 519                ethoc_read_bd(priv, entry, &bd);
 520
 521                if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
 522                        ethoc_ack_irq(priv, INT_MASK_TX);
 523                        /* If interrupt came in between reading in the BD
 524                         * and clearing the interrupt source, then we risk
 525                         * missing the event as the TX interrupt won't trigger
 526                         * right away when we reenable it; hence, check
 527                         * BD_EMPTY here again to make sure there isn't such an
 528                         * event pending...
 529                         */
 530                        ethoc_read_bd(priv, entry, &bd);
 531                        if (bd.stat & TX_BD_READY ||
 532                            (priv->dty_tx == priv->cur_tx))
 533                                break;
 534                }
 535
 536                ethoc_update_tx_stats(priv, &bd);
 537                priv->dty_tx++;
 538        }
 539
 540        if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
 541                netif_wake_queue(dev);
 542
 543        return count;
 544}
 545
 546static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
 547{
 548        struct net_device *dev = dev_id;
 549        struct ethoc *priv = netdev_priv(dev);
 550        u32 pending;
 551        u32 mask;
 552
 553        /* Figure out what triggered the interrupt...
 554         * The tricky bit here is that the interrupt source bits get
 555         * set in INT_SOURCE for an event regardless of whether that
 556         * event is masked or not.  Thus, in order to figure out what
 557         * triggered the interrupt, we need to remove the sources
 558         * for all events that are currently masked.  This behaviour
 559         * is not particularly well documented but reasonable...
 560         */
 561        mask = ethoc_read(priv, INT_MASK);
 562        pending = ethoc_read(priv, INT_SOURCE);
 563        pending &= mask;
 564
 565        if (unlikely(pending == 0))
 566                return IRQ_NONE;
 567
 568        ethoc_ack_irq(priv, pending);
 569
 570        /* We always handle the dropped packet interrupt */
 571        if (pending & INT_MASK_BUSY) {
 572                dev_dbg(&dev->dev, "packet dropped\n");
 573                dev->stats.rx_dropped++;
 574        }
 575
 576        /* Handle receive/transmit event by switching to polling */
 577        if (pending & (INT_MASK_TX | INT_MASK_RX)) {
 578                ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
 579                napi_schedule(&priv->napi);
 580        }
 581
 582        return IRQ_HANDLED;
 583}
 584
 585static int ethoc_get_mac_address(struct net_device *dev, void *addr)
 586{
 587        struct ethoc *priv = netdev_priv(dev);
 588        u8 *mac = (u8 *)addr;
 589        u32 reg;
 590
 591        reg = ethoc_read(priv, MAC_ADDR0);
 592        mac[2] = (reg >> 24) & 0xff;
 593        mac[3] = (reg >> 16) & 0xff;
 594        mac[4] = (reg >>  8) & 0xff;
 595        mac[5] = (reg >>  0) & 0xff;
 596
 597        reg = ethoc_read(priv, MAC_ADDR1);
 598        mac[0] = (reg >>  8) & 0xff;
 599        mac[1] = (reg >>  0) & 0xff;
 600
 601        return 0;
 602}
 603
 604static int ethoc_poll(struct napi_struct *napi, int budget)
 605{
 606        struct ethoc *priv = container_of(napi, struct ethoc, napi);
 607        int rx_work_done = 0;
 608        int tx_work_done = 0;
 609
 610        rx_work_done = ethoc_rx(priv->netdev, budget);
 611        tx_work_done = ethoc_tx(priv->netdev, budget);
 612
 613        if (rx_work_done < budget && tx_work_done < budget) {
 614                napi_complete_done(napi, rx_work_done);
 615                ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
 616        }
 617
 618        return rx_work_done;
 619}
 620
 621static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
 622{
 623        struct ethoc *priv = bus->priv;
 624        int i;
 625
 626        ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
 627        ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
 628
 629        for (i = 0; i < 5; i++) {
 630                u32 status = ethoc_read(priv, MIISTATUS);
 631                if (!(status & MIISTATUS_BUSY)) {
 632                        u32 data = ethoc_read(priv, MIIRX_DATA);
 633                        /* reset MII command register */
 634                        ethoc_write(priv, MIICOMMAND, 0);
 635                        return data;
 636                }
 637                usleep_range(100, 200);
 638        }
 639
 640        return -EBUSY;
 641}
 642
 643static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 644{
 645        struct ethoc *priv = bus->priv;
 646        int i;
 647
 648        ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
 649        ethoc_write(priv, MIITX_DATA, val);
 650        ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
 651
 652        for (i = 0; i < 5; i++) {
 653                u32 stat = ethoc_read(priv, MIISTATUS);
 654                if (!(stat & MIISTATUS_BUSY)) {
 655                        /* reset MII command register */
 656                        ethoc_write(priv, MIICOMMAND, 0);
 657                        return 0;
 658                }
 659                usleep_range(100, 200);
 660        }
 661
 662        return -EBUSY;
 663}
 664
 665static void ethoc_mdio_poll(struct net_device *dev)
 666{
 667        struct ethoc *priv = netdev_priv(dev);
 668        struct phy_device *phydev = dev->phydev;
 669        bool changed = false;
 670        u32 mode;
 671
 672        if (priv->old_link != phydev->link) {
 673                changed = true;
 674                priv->old_link = phydev->link;
 675        }
 676
 677        if (priv->old_duplex != phydev->duplex) {
 678                changed = true;
 679                priv->old_duplex = phydev->duplex;
 680        }
 681
 682        if (!changed)
 683                return;
 684
 685        mode = ethoc_read(priv, MODER);
 686        if (phydev->duplex == DUPLEX_FULL)
 687                mode |= MODER_FULLD;
 688        else
 689                mode &= ~MODER_FULLD;
 690        ethoc_write(priv, MODER, mode);
 691
 692        phy_print_status(phydev);
 693}
 694
 695static int ethoc_mdio_probe(struct net_device *dev)
 696{
 697        struct ethoc *priv = netdev_priv(dev);
 698        struct phy_device *phy;
 699        int err;
 700
 701        if (priv->phy_id != -1)
 702                phy = mdiobus_get_phy(priv->mdio, priv->phy_id);
 703        else
 704                phy = phy_find_first(priv->mdio);
 705
 706        if (!phy) {
 707                dev_err(&dev->dev, "no PHY found\n");
 708                return -ENXIO;
 709        }
 710
 711        priv->old_duplex = -1;
 712        priv->old_link = -1;
 713
 714        err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
 715                                 PHY_INTERFACE_MODE_GMII);
 716        if (err) {
 717                dev_err(&dev->dev, "could not attach to PHY\n");
 718                return err;
 719        }
 720
 721        phy_set_max_speed(phy, SPEED_100);
 722
 723        return 0;
 724}
 725
 726static int ethoc_open(struct net_device *dev)
 727{
 728        struct ethoc *priv = netdev_priv(dev);
 729        int ret;
 730
 731        ret = request_irq(dev->irq, ethoc_interrupt, IRQF_SHARED,
 732                        dev->name, dev);
 733        if (ret)
 734                return ret;
 735
 736        napi_enable(&priv->napi);
 737
 738        ethoc_init_ring(priv, dev->mem_start);
 739        ethoc_reset(priv);
 740
 741        if (netif_queue_stopped(dev)) {
 742                dev_dbg(&dev->dev, " resuming queue\n");
 743                netif_wake_queue(dev);
 744        } else {
 745                dev_dbg(&dev->dev, " starting queue\n");
 746                netif_start_queue(dev);
 747        }
 748
 749        priv->old_link = -1;
 750        priv->old_duplex = -1;
 751
 752        phy_start(dev->phydev);
 753
 754        if (netif_msg_ifup(priv)) {
 755                dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
 756                                dev->base_addr, dev->mem_start, dev->mem_end);
 757        }
 758
 759        return 0;
 760}
 761
 762static int ethoc_stop(struct net_device *dev)
 763{
 764        struct ethoc *priv = netdev_priv(dev);
 765
 766        napi_disable(&priv->napi);
 767
 768        if (dev->phydev)
 769                phy_stop(dev->phydev);
 770
 771        ethoc_disable_rx_and_tx(priv);
 772        free_irq(dev->irq, dev);
 773
 774        if (!netif_queue_stopped(dev))
 775                netif_stop_queue(dev);
 776
 777        return 0;
 778}
 779
 780static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 781{
 782        struct ethoc *priv = netdev_priv(dev);
 783        struct mii_ioctl_data *mdio = if_mii(ifr);
 784        struct phy_device *phy = NULL;
 785
 786        if (!netif_running(dev))
 787                return -EINVAL;
 788
 789        if (cmd != SIOCGMIIPHY) {
 790                if (mdio->phy_id >= PHY_MAX_ADDR)
 791                        return -ERANGE;
 792
 793                phy = mdiobus_get_phy(priv->mdio, mdio->phy_id);
 794                if (!phy)
 795                        return -ENODEV;
 796        } else {
 797                phy = dev->phydev;
 798        }
 799
 800        return phy_mii_ioctl(phy, ifr, cmd);
 801}
 802
 803static void ethoc_do_set_mac_address(struct net_device *dev)
 804{
 805        struct ethoc *priv = netdev_priv(dev);
 806        unsigned char *mac = dev->dev_addr;
 807
 808        ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
 809                                     (mac[4] <<  8) | (mac[5] <<  0));
 810        ethoc_write(priv, MAC_ADDR1, (mac[0] <<  8) | (mac[1] <<  0));
 811}
 812
 813static int ethoc_set_mac_address(struct net_device *dev, void *p)
 814{
 815        const struct sockaddr *addr = p;
 816
 817        if (!is_valid_ether_addr(addr->sa_data))
 818                return -EADDRNOTAVAIL;
 819        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 820        ethoc_do_set_mac_address(dev);
 821        return 0;
 822}
 823
 824static void ethoc_set_multicast_list(struct net_device *dev)
 825{
 826        struct ethoc *priv = netdev_priv(dev);
 827        u32 mode = ethoc_read(priv, MODER);
 828        struct netdev_hw_addr *ha;
 829        u32 hash[2] = { 0, 0 };
 830
 831        /* set loopback mode if requested */
 832        if (dev->flags & IFF_LOOPBACK)
 833                mode |=  MODER_LOOP;
 834        else
 835                mode &= ~MODER_LOOP;
 836
 837        /* receive broadcast frames if requested */
 838        if (dev->flags & IFF_BROADCAST)
 839                mode &= ~MODER_BRO;
 840        else
 841                mode |=  MODER_BRO;
 842
 843        /* enable promiscuous mode if requested */
 844        if (dev->flags & IFF_PROMISC)
 845                mode |=  MODER_PRO;
 846        else
 847                mode &= ~MODER_PRO;
 848
 849        ethoc_write(priv, MODER, mode);
 850
 851        /* receive multicast frames */
 852        if (dev->flags & IFF_ALLMULTI) {
 853                hash[0] = 0xffffffff;
 854                hash[1] = 0xffffffff;
 855        } else {
 856                netdev_for_each_mc_addr(ha, dev) {
 857                        u32 crc = ether_crc(ETH_ALEN, ha->addr);
 858                        int bit = (crc >> 26) & 0x3f;
 859                        hash[bit >> 5] |= 1 << (bit & 0x1f);
 860                }
 861        }
 862
 863        ethoc_write(priv, ETH_HASH0, hash[0]);
 864        ethoc_write(priv, ETH_HASH1, hash[1]);
 865}
 866
 867static int ethoc_change_mtu(struct net_device *dev, int new_mtu)
 868{
 869        return -ENOSYS;
 870}
 871
 872static void ethoc_tx_timeout(struct net_device *dev)
 873{
 874        struct ethoc *priv = netdev_priv(dev);
 875        u32 pending = ethoc_read(priv, INT_SOURCE);
 876        if (likely(pending))
 877                ethoc_interrupt(dev->irq, dev);
 878}
 879
 880static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
 881{
 882        struct ethoc *priv = netdev_priv(dev);
 883        struct ethoc_bd bd;
 884        unsigned int entry;
 885        void *dest;
 886
 887        if (skb_put_padto(skb, ETHOC_ZLEN)) {
 888                dev->stats.tx_errors++;
 889                goto out_no_free;
 890        }
 891
 892        if (unlikely(skb->len > ETHOC_BUFSIZ)) {
 893                dev->stats.tx_errors++;
 894                goto out;
 895        }
 896
 897        entry = priv->cur_tx % priv->num_tx;
 898        spin_lock_irq(&priv->lock);
 899        priv->cur_tx++;
 900
 901        ethoc_read_bd(priv, entry, &bd);
 902        if (unlikely(skb->len < ETHOC_ZLEN))
 903                bd.stat |=  TX_BD_PAD;
 904        else
 905                bd.stat &= ~TX_BD_PAD;
 906
 907        dest = priv->vma[entry];
 908        memcpy_toio(dest, skb->data, skb->len);
 909
 910        bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK);
 911        bd.stat |= TX_BD_LEN(skb->len);
 912        ethoc_write_bd(priv, entry, &bd);
 913
 914        bd.stat |= TX_BD_READY;
 915        ethoc_write_bd(priv, entry, &bd);
 916
 917        if (priv->cur_tx == (priv->dty_tx + priv->num_tx)) {
 918                dev_dbg(&dev->dev, "stopping queue\n");
 919                netif_stop_queue(dev);
 920        }
 921
 922        spin_unlock_irq(&priv->lock);
 923        skb_tx_timestamp(skb);
 924out:
 925        dev_kfree_skb(skb);
 926out_no_free:
 927        return NETDEV_TX_OK;
 928}
 929
 930static int ethoc_get_regs_len(struct net_device *netdev)
 931{
 932        return ETH_END;
 933}
 934
 935static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
 936                           void *p)
 937{
 938        struct ethoc *priv = netdev_priv(dev);
 939        u32 *regs_buff = p;
 940        unsigned i;
 941
 942        regs->version = 0;
 943        for (i = 0; i < ETH_END / sizeof(u32); ++i)
 944                regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
 945}
 946
 947static void ethoc_get_ringparam(struct net_device *dev,
 948                                struct ethtool_ringparam *ring)
 949{
 950        struct ethoc *priv = netdev_priv(dev);
 951
 952        ring->rx_max_pending = priv->num_bd - 1;
 953        ring->rx_mini_max_pending = 0;
 954        ring->rx_jumbo_max_pending = 0;
 955        ring->tx_max_pending = priv->num_bd - 1;
 956
 957        ring->rx_pending = priv->num_rx;
 958        ring->rx_mini_pending = 0;
 959        ring->rx_jumbo_pending = 0;
 960        ring->tx_pending = priv->num_tx;
 961}
 962
 963static int ethoc_set_ringparam(struct net_device *dev,
 964                               struct ethtool_ringparam *ring)
 965{
 966        struct ethoc *priv = netdev_priv(dev);
 967
 968        if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
 969            ring->tx_pending + ring->rx_pending > priv->num_bd)
 970                return -EINVAL;
 971        if (ring->rx_mini_pending || ring->rx_jumbo_pending)
 972                return -EINVAL;
 973
 974        if (netif_running(dev)) {
 975                netif_tx_disable(dev);
 976                ethoc_disable_rx_and_tx(priv);
 977                ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
 978                synchronize_irq(dev->irq);
 979        }
 980
 981        priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
 982        priv->num_rx = ring->rx_pending;
 983        ethoc_init_ring(priv, dev->mem_start);
 984
 985        if (netif_running(dev)) {
 986                ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
 987                ethoc_enable_rx_and_tx(priv);
 988                netif_wake_queue(dev);
 989        }
 990        return 0;
 991}
 992
 993static const struct ethtool_ops ethoc_ethtool_ops = {
 994        .get_regs_len = ethoc_get_regs_len,
 995        .get_regs = ethoc_get_regs,
 996        .nway_reset = phy_ethtool_nway_reset,
 997        .get_link = ethtool_op_get_link,
 998        .get_ringparam = ethoc_get_ringparam,
 999        .set_ringparam = ethoc_set_ringparam,
1000        .get_ts_info = ethtool_op_get_ts_info,
1001        .get_link_ksettings = phy_ethtool_get_link_ksettings,
1002        .set_link_ksettings = phy_ethtool_set_link_ksettings,
1003};
1004
1005static const struct net_device_ops ethoc_netdev_ops = {
1006        .ndo_open = ethoc_open,
1007        .ndo_stop = ethoc_stop,
1008        .ndo_do_ioctl = ethoc_ioctl,
1009        .ndo_set_mac_address = ethoc_set_mac_address,
1010        .ndo_set_rx_mode = ethoc_set_multicast_list,
1011        .ndo_change_mtu = ethoc_change_mtu,
1012        .ndo_tx_timeout = ethoc_tx_timeout,
1013        .ndo_start_xmit = ethoc_start_xmit,
1014};
1015
1016/**
1017 * ethoc_probe - initialize OpenCores ethernet MAC
1018 * pdev:        platform device
1019 */
1020static int ethoc_probe(struct platform_device *pdev)
1021{
1022        struct net_device *netdev = NULL;
1023        struct resource *res = NULL;
1024        struct resource *mmio = NULL;
1025        struct resource *mem = NULL;
1026        struct ethoc *priv = NULL;
1027        int num_bd;
1028        int ret = 0;
1029        struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
1030        u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
1031
1032        /* allocate networking device */
1033        netdev = alloc_etherdev(sizeof(struct ethoc));
1034        if (!netdev) {
1035                ret = -ENOMEM;
1036                goto out;
1037        }
1038
1039        SET_NETDEV_DEV(netdev, &pdev->dev);
1040        platform_set_drvdata(pdev, netdev);
1041
1042        /* obtain I/O memory space */
1043        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1044        if (!res) {
1045                dev_err(&pdev->dev, "cannot obtain I/O memory space\n");
1046                ret = -ENXIO;
1047                goto free;
1048        }
1049
1050        mmio = devm_request_mem_region(&pdev->dev, res->start,
1051                        resource_size(res), res->name);
1052        if (!mmio) {
1053                dev_err(&pdev->dev, "cannot request I/O memory space\n");
1054                ret = -ENXIO;
1055                goto free;
1056        }
1057
1058        netdev->base_addr = mmio->start;
1059
1060        /* obtain buffer memory space */
1061        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1062        if (res) {
1063                mem = devm_request_mem_region(&pdev->dev, res->start,
1064                        resource_size(res), res->name);
1065                if (!mem) {
1066                        dev_err(&pdev->dev, "cannot request memory space\n");
1067                        ret = -ENXIO;
1068                        goto free;
1069                }
1070
1071                netdev->mem_start = mem->start;
1072                netdev->mem_end   = mem->end;
1073        }
1074
1075
1076        /* obtain device IRQ number */
1077        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1078        if (!res) {
1079                dev_err(&pdev->dev, "cannot obtain IRQ\n");
1080                ret = -ENXIO;
1081                goto free;
1082        }
1083
1084        netdev->irq = res->start;
1085
1086        /* setup driver-private data */
1087        priv = netdev_priv(netdev);
1088        priv->netdev = netdev;
1089
1090        priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
1091                        resource_size(mmio));
1092        if (!priv->iobase) {
1093                dev_err(&pdev->dev, "cannot remap I/O memory space\n");
1094                ret = -ENXIO;
1095                goto free;
1096        }
1097
1098        if (netdev->mem_end) {
1099                priv->membase = devm_ioremap_nocache(&pdev->dev,
1100                        netdev->mem_start, resource_size(mem));
1101                if (!priv->membase) {
1102                        dev_err(&pdev->dev, "cannot remap memory space\n");
1103                        ret = -ENXIO;
1104                        goto free;
1105                }
1106        } else {
1107                /* Allocate buffer memory */
1108                priv->membase = dmam_alloc_coherent(&pdev->dev,
1109                        buffer_size, (void *)&netdev->mem_start,
1110                        GFP_KERNEL);
1111                if (!priv->membase) {
1112                        dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
1113                                buffer_size);
1114                        ret = -ENOMEM;
1115                        goto free;
1116                }
1117                netdev->mem_end = netdev->mem_start + buffer_size;
1118        }
1119
1120        priv->big_endian = pdata ? pdata->big_endian :
1121                of_device_is_big_endian(pdev->dev.of_node);
1122
1123        /* calculate the number of TX/RX buffers, maximum 128 supported */
1124        num_bd = min_t(unsigned int,
1125                128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
1126        if (num_bd < 4) {
1127                ret = -ENODEV;
1128                goto free;
1129        }
1130        priv->num_bd = num_bd;
1131        /* num_tx must be a power of two */
1132        priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
1133        priv->num_rx = num_bd - priv->num_tx;
1134
1135        dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1136                priv->num_tx, priv->num_rx);
1137
1138        priv->vma = devm_kcalloc(&pdev->dev, num_bd, sizeof(void *),
1139                                 GFP_KERNEL);
1140        if (!priv->vma) {
1141                ret = -ENOMEM;
1142                goto free;
1143        }
1144
1145        /* Allow the platform setup code to pass in a MAC address. */
1146        if (pdata) {
1147                ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
1148                priv->phy_id = pdata->phy_id;
1149        } else {
1150                const void *mac;
1151
1152                mac = of_get_mac_address(pdev->dev.of_node);
1153                if (!IS_ERR(mac))
1154                        ether_addr_copy(netdev->dev_addr, mac);
1155                priv->phy_id = -1;
1156        }
1157
1158        /* Check that the given MAC address is valid. If it isn't, read the
1159         * current MAC from the controller.
1160         */
1161        if (!is_valid_ether_addr(netdev->dev_addr))
1162                ethoc_get_mac_address(netdev, netdev->dev_addr);
1163
1164        /* Check the MAC again for validity, if it still isn't choose and
1165         * program a random one.
1166         */
1167        if (!is_valid_ether_addr(netdev->dev_addr))
1168                eth_hw_addr_random(netdev);
1169
1170        ethoc_do_set_mac_address(netdev);
1171
1172        /* Allow the platform setup code to adjust MII management bus clock. */
1173        if (!eth_clkfreq) {
1174                struct clk *clk = devm_clk_get(&pdev->dev, NULL);
1175
1176                if (!IS_ERR(clk)) {
1177                        priv->clk = clk;
1178                        clk_prepare_enable(clk);
1179                        eth_clkfreq = clk_get_rate(clk);
1180                }
1181        }
1182        if (eth_clkfreq) {
1183                u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
1184
1185                if (!clkdiv)
1186                        clkdiv = 2;
1187                dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
1188                ethoc_write(priv, MIIMODER,
1189                            (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
1190                            clkdiv);
1191        }
1192
1193        /* register MII bus */
1194        priv->mdio = mdiobus_alloc();
1195        if (!priv->mdio) {
1196                ret = -ENOMEM;
1197                goto free2;
1198        }
1199
1200        priv->mdio->name = "ethoc-mdio";
1201        snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "%s-%d",
1202                        priv->mdio->name, pdev->id);
1203        priv->mdio->read = ethoc_mdio_read;
1204        priv->mdio->write = ethoc_mdio_write;
1205        priv->mdio->priv = priv;
1206
1207        ret = mdiobus_register(priv->mdio);
1208        if (ret) {
1209                dev_err(&netdev->dev, "failed to register MDIO bus\n");
1210                goto free2;
1211        }
1212
1213        ret = ethoc_mdio_probe(netdev);
1214        if (ret) {
1215                dev_err(&netdev->dev, "failed to probe MDIO bus\n");
1216                goto error;
1217        }
1218
1219        /* setup the net_device structure */
1220        netdev->netdev_ops = &ethoc_netdev_ops;
1221        netdev->watchdog_timeo = ETHOC_TIMEOUT;
1222        netdev->features |= 0;
1223        netdev->ethtool_ops = &ethoc_ethtool_ops;
1224
1225        /* setup NAPI */
1226        netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1227
1228        spin_lock_init(&priv->lock);
1229
1230        ret = register_netdev(netdev);
1231        if (ret < 0) {
1232                dev_err(&netdev->dev, "failed to register interface\n");
1233                goto error2;
1234        }
1235
1236        goto out;
1237
1238error2:
1239        netif_napi_del(&priv->napi);
1240error:
1241        mdiobus_unregister(priv->mdio);
1242        mdiobus_free(priv->mdio);
1243free2:
1244        clk_disable_unprepare(priv->clk);
1245free:
1246        free_netdev(netdev);
1247out:
1248        return ret;
1249}
1250
1251/**
1252 * ethoc_remove - shutdown OpenCores ethernet MAC
1253 * @pdev:       platform device
1254 */
1255static int ethoc_remove(struct platform_device *pdev)
1256{
1257        struct net_device *netdev = platform_get_drvdata(pdev);
1258        struct ethoc *priv = netdev_priv(netdev);
1259
1260        if (netdev) {
1261                netif_napi_del(&priv->napi);
1262                phy_disconnect(netdev->phydev);
1263
1264                if (priv->mdio) {
1265                        mdiobus_unregister(priv->mdio);
1266                        mdiobus_free(priv->mdio);
1267                }
1268                clk_disable_unprepare(priv->clk);
1269                unregister_netdev(netdev);
1270                free_netdev(netdev);
1271        }
1272
1273        return 0;
1274}
1275
1276#ifdef CONFIG_PM
1277static int ethoc_suspend(struct platform_device *pdev, pm_message_t state)
1278{
1279        return -ENOSYS;
1280}
1281
1282static int ethoc_resume(struct platform_device *pdev)
1283{
1284        return -ENOSYS;
1285}
1286#else
1287# define ethoc_suspend NULL
1288# define ethoc_resume  NULL
1289#endif
1290
1291static const struct of_device_id ethoc_match[] = {
1292        { .compatible = "opencores,ethoc", },
1293        {},
1294};
1295MODULE_DEVICE_TABLE(of, ethoc_match);
1296
1297static struct platform_driver ethoc_driver = {
1298        .probe   = ethoc_probe,
1299        .remove  = ethoc_remove,
1300        .suspend = ethoc_suspend,
1301        .resume  = ethoc_resume,
1302        .driver  = {
1303                .name = "ethoc",
1304                .of_match_table = ethoc_match,
1305        },
1306};
1307
1308module_platform_driver(ethoc_driver);
1309
1310MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
1311MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
1312MODULE_LICENSE("GPL v2");
1313
1314