linux/drivers/net/ethernet/korina.c
<<
>>
Prefs
   1/*
   2 *  Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
   3 *
   4 *  Copyright 2004 IDT Inc. (rischelp@idt.com)
   5 *  Copyright 2006 Felix Fietkau <nbd@openwrt.org>
   6 *  Copyright 2008 Florian Fainelli <florian@openwrt.org>
   7 *
   8 *  This program is free software; you can redistribute  it and/or modify it
   9 *  under  the terms of  the GNU General  Public License as published by the
  10 *  Free Software Foundation;  either version 2 of the  License, or (at your
  11 *  option) any later version.
  12 *
  13 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
  14 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
  15 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
  16 *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
  17 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  18 *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
  19 *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  20 *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
  21 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  22 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23 *
  24 *  You should have received a copy of the  GNU General Public License along
  25 *  with this program; if not, write  to the Free Software Foundation, Inc.,
  26 *  675 Mass Ave, Cambridge, MA 02139, USA.
  27 *
  28 *  Writing to a DMA status register:
  29 *
  30 *  When writing to the status register, you should mask the bit you have
  31 *  been testing the status register with. Both Tx and Rx DMA registers
  32 *  should stick to this procedure.
  33 */
  34
  35#include <linux/module.h>
  36#include <linux/kernel.h>
  37#include <linux/moduleparam.h>
  38#include <linux/sched.h>
  39#include <linux/ctype.h>
  40#include <linux/types.h>
  41#include <linux/interrupt.h>
  42#include <linux/init.h>
  43#include <linux/ioport.h>
  44#include <linux/in.h>
  45#include <linux/slab.h>
  46#include <linux/string.h>
  47#include <linux/delay.h>
  48#include <linux/netdevice.h>
  49#include <linux/etherdevice.h>
  50#include <linux/skbuff.h>
  51#include <linux/errno.h>
  52#include <linux/platform_device.h>
  53#include <linux/mii.h>
  54#include <linux/ethtool.h>
  55#include <linux/crc32.h>
  56
  57#include <asm/bootinfo.h>
  58#include <asm/bitops.h>
  59#include <asm/pgtable.h>
  60#include <asm/io.h>
  61#include <asm/dma.h>
  62
  63#include <asm/mach-rc32434/rb.h>
  64#include <asm/mach-rc32434/rc32434.h>
  65#include <asm/mach-rc32434/eth.h>
  66#include <asm/mach-rc32434/dma_v.h>
  67
  68#define DRV_NAME        "korina"
  69#define DRV_VERSION     "0.10"
  70#define DRV_RELDATE     "04Mar2008"
  71
  72#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
  73                                   ((dev)->dev_addr[1]))
  74#define STATION_ADDRESS_LOW(dev)  (((dev)->dev_addr[2] << 24) | \
  75                                   ((dev)->dev_addr[3] << 16) | \
  76                                   ((dev)->dev_addr[4] << 8)  | \
  77                                   ((dev)->dev_addr[5]))
  78
  79#define MII_CLOCK 1250000       /* no more than 2.5MHz */
  80
  81/* the following must be powers of two */
  82#define KORINA_NUM_RDS  64  /* number of receive descriptors */
  83#define KORINA_NUM_TDS  64  /* number of transmit descriptors */
  84
  85/* KORINA_RBSIZE is the hardware's default maximum receive
  86 * frame size in bytes. Having this hardcoded means that there
  87 * is no support for MTU sizes greater than 1500. */
  88#define KORINA_RBSIZE   1536 /* size of one resource buffer = Ether MTU */
  89#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
  90#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
  91#define RD_RING_SIZE    (KORINA_NUM_RDS * sizeof(struct dma_desc))
  92#define TD_RING_SIZE    (KORINA_NUM_TDS * sizeof(struct dma_desc))
  93
  94#define TX_TIMEOUT      (6000 * HZ / 1000)
  95
  96enum chain_status { desc_filled, desc_empty };
  97#define IS_DMA_FINISHED(X)   (((X) & (DMA_DESC_FINI)) != 0)
  98#define IS_DMA_DONE(X)   (((X) & (DMA_DESC_DONE)) != 0)
  99#define RCVPKT_LENGTH(X)     (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
 100
 101/* Information that need to be kept for each board. */
 102struct korina_private {
 103        struct eth_regs *eth_regs;
 104        struct dma_reg *rx_dma_regs;
 105        struct dma_reg *tx_dma_regs;
 106        struct dma_desc *td_ring; /* transmit descriptor ring */
 107        struct dma_desc *rd_ring; /* receive descriptor ring  */
 108
 109        struct sk_buff *tx_skb[KORINA_NUM_TDS];
 110        struct sk_buff *rx_skb[KORINA_NUM_RDS];
 111
 112        int rx_next_done;
 113        int rx_chain_head;
 114        int rx_chain_tail;
 115        enum chain_status rx_chain_status;
 116
 117        int tx_next_done;
 118        int tx_chain_head;
 119        int tx_chain_tail;
 120        enum chain_status tx_chain_status;
 121        int tx_count;
 122        int tx_full;
 123
 124        int rx_irq;
 125        int tx_irq;
 126        int ovr_irq;
 127        int und_irq;
 128
 129        spinlock_t lock;        /* NIC xmit lock */
 130
 131        int dma_halt_cnt;
 132        int dma_run_cnt;
 133        struct napi_struct napi;
 134        struct timer_list media_check_timer;
 135        struct mii_if_info mii_if;
 136        struct work_struct restart_task;
 137        struct net_device *dev;
 138        int phy_addr;
 139};
 140
 141extern unsigned int idt_cpu_freq;
 142
 143static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
 144{
 145        writel(0, &ch->dmandptr);
 146        writel(dma_addr, &ch->dmadptr);
 147}
 148
 149static inline void korina_abort_dma(struct net_device *dev,
 150                                        struct dma_reg *ch)
 151{
 152       if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
 153               writel(0x10, &ch->dmac);
 154
 155               while (!(readl(&ch->dmas) & DMA_STAT_HALT))
 156                       dev->trans_start = jiffies;
 157
 158               writel(0, &ch->dmas);
 159       }
 160
 161       writel(0, &ch->dmadptr);
 162       writel(0, &ch->dmandptr);
 163}
 164
 165static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
 166{
 167        writel(dma_addr, &ch->dmandptr);
 168}
 169
 170static void korina_abort_tx(struct net_device *dev)
 171{
 172        struct korina_private *lp = netdev_priv(dev);
 173
 174        korina_abort_dma(dev, lp->tx_dma_regs);
 175}
 176
 177static void korina_abort_rx(struct net_device *dev)
 178{
 179        struct korina_private *lp = netdev_priv(dev);
 180
 181        korina_abort_dma(dev, lp->rx_dma_regs);
 182}
 183
 184static void korina_start_rx(struct korina_private *lp,
 185                                        struct dma_desc *rd)
 186{
 187        korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 188}
 189
 190static void korina_chain_rx(struct korina_private *lp,
 191                                        struct dma_desc *rd)
 192{
 193        korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
 194}
 195
 196/* transmit packet */
 197static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
 198{
 199        struct korina_private *lp = netdev_priv(dev);
 200        unsigned long flags;
 201        u32 length;
 202        u32 chain_prev, chain_next;
 203        struct dma_desc *td;
 204
 205        spin_lock_irqsave(&lp->lock, flags);
 206
 207        td = &lp->td_ring[lp->tx_chain_tail];
 208
 209        /* stop queue when full, drop pkts if queue already full */
 210        if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
 211                lp->tx_full = 1;
 212
 213                if (lp->tx_count == (KORINA_NUM_TDS - 2))
 214                        netif_stop_queue(dev);
 215                else {
 216                        dev->stats.tx_dropped++;
 217                        dev_kfree_skb_any(skb);
 218                        spin_unlock_irqrestore(&lp->lock, flags);
 219
 220                        return NETDEV_TX_BUSY;
 221                }
 222        }
 223
 224        lp->tx_count++;
 225
 226        lp->tx_skb[lp->tx_chain_tail] = skb;
 227
 228        length = skb->len;
 229        dma_cache_wback((u32)skb->data, skb->len);
 230
 231        /* Setup the transmit descriptor. */
 232        dma_cache_inv((u32) td, sizeof(*td));
 233        td->ca = CPHYSADDR(skb->data);
 234        chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
 235        chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;
 236
 237        if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
 238                if (lp->tx_chain_status == desc_empty) {
 239                        /* Update tail */
 240                        td->control = DMA_COUNT(length) |
 241                                        DMA_DESC_COF | DMA_DESC_IOF;
 242                        /* Move tail */
 243                        lp->tx_chain_tail = chain_next;
 244                        /* Write to NDPTR */
 245                        writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 246                                        &lp->tx_dma_regs->dmandptr);
 247                        /* Move head to tail */
 248                        lp->tx_chain_head = lp->tx_chain_tail;
 249                } else {
 250                        /* Update tail */
 251                        td->control = DMA_COUNT(length) |
 252                                        DMA_DESC_COF | DMA_DESC_IOF;
 253                        /* Link to prev */
 254                        lp->td_ring[chain_prev].control &=
 255                                        ~DMA_DESC_COF;
 256                        /* Link to prev */
 257                        lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 258                        /* Move tail */
 259                        lp->tx_chain_tail = chain_next;
 260                        /* Write to NDPTR */
 261                        writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 262                                        &(lp->tx_dma_regs->dmandptr));
 263                        /* Move head to tail */
 264                        lp->tx_chain_head = lp->tx_chain_tail;
 265                        lp->tx_chain_status = desc_empty;
 266                }
 267        } else {
 268                if (lp->tx_chain_status == desc_empty) {
 269                        /* Update tail */
 270                        td->control = DMA_COUNT(length) |
 271                                        DMA_DESC_COF | DMA_DESC_IOF;
 272                        /* Move tail */
 273                        lp->tx_chain_tail = chain_next;
 274                        lp->tx_chain_status = desc_filled;
 275                } else {
 276                        /* Update tail */
 277                        td->control = DMA_COUNT(length) |
 278                                        DMA_DESC_COF | DMA_DESC_IOF;
 279                        lp->td_ring[chain_prev].control &=
 280                                        ~DMA_DESC_COF;
 281                        lp->td_ring[chain_prev].link =  CPHYSADDR(td);
 282                        lp->tx_chain_tail = chain_next;
 283                }
 284        }
 285        dma_cache_wback((u32) td, sizeof(*td));
 286
 287        dev->trans_start = jiffies;
 288        spin_unlock_irqrestore(&lp->lock, flags);
 289
 290        return NETDEV_TX_OK;
 291}
 292
 293static int mdio_read(struct net_device *dev, int mii_id, int reg)
 294{
 295        struct korina_private *lp = netdev_priv(dev);
 296        int ret;
 297
 298        mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 299
 300        writel(0, &lp->eth_regs->miimcfg);
 301        writel(0, &lp->eth_regs->miimcmd);
 302        writel(mii_id | reg, &lp->eth_regs->miimaddr);
 303        writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 304
 305        ret = (int)(readl(&lp->eth_regs->miimrdd));
 306        return ret;
 307}
 308
 309static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
 310{
 311        struct korina_private *lp = netdev_priv(dev);
 312
 313        mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
 314
 315        writel(0, &lp->eth_regs->miimcfg);
 316        writel(1, &lp->eth_regs->miimcmd);
 317        writel(mii_id | reg, &lp->eth_regs->miimaddr);
 318        writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
 319        writel(val, &lp->eth_regs->miimwtd);
 320}
 321
 322/* Ethernet Rx DMA interrupt */
 323static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
 324{
 325        struct net_device *dev = dev_id;
 326        struct korina_private *lp = netdev_priv(dev);
 327        u32 dmas, dmasm;
 328        irqreturn_t retval;
 329
 330        dmas = readl(&lp->rx_dma_regs->dmas);
 331        if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
 332                dmasm = readl(&lp->rx_dma_regs->dmasm);
 333                writel(dmasm | (DMA_STAT_DONE |
 334                                DMA_STAT_HALT | DMA_STAT_ERR),
 335                                &lp->rx_dma_regs->dmasm);
 336
 337                napi_schedule(&lp->napi);
 338
 339                if (dmas & DMA_STAT_ERR)
 340                        printk(KERN_ERR "%s: DMA error\n", dev->name);
 341
 342                retval = IRQ_HANDLED;
 343        } else
 344                retval = IRQ_NONE;
 345
 346        return retval;
 347}
 348
 349static int korina_rx(struct net_device *dev, int limit)
 350{
 351        struct korina_private *lp = netdev_priv(dev);
 352        struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
 353        struct sk_buff *skb, *skb_new;
 354        u8 *pkt_buf;
 355        u32 devcs, pkt_len, dmas;
 356        int count;
 357
 358        dma_cache_inv((u32)rd, sizeof(*rd));
 359
 360        for (count = 0; count < limit; count++) {
 361                skb = lp->rx_skb[lp->rx_next_done];
 362                skb_new = NULL;
 363
 364                devcs = rd->devcs;
 365
 366                if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
 367                        break;
 368
 369                /* Update statistics counters */
 370                if (devcs & ETH_RX_CRC)
 371                        dev->stats.rx_crc_errors++;
 372                if (devcs & ETH_RX_LOR)
 373                        dev->stats.rx_length_errors++;
 374                if (devcs & ETH_RX_LE)
 375                        dev->stats.rx_length_errors++;
 376                if (devcs & ETH_RX_OVR)
 377                        dev->stats.rx_fifo_errors++;
 378                if (devcs & ETH_RX_CV)
 379                        dev->stats.rx_frame_errors++;
 380                if (devcs & ETH_RX_CES)
 381                        dev->stats.rx_length_errors++;
 382                if (devcs & ETH_RX_MP)
 383                        dev->stats.multicast++;
 384
 385                if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
 386                        /* check that this is a whole packet
 387                         * WARNING: DMA_FD bit incorrectly set
 388                         * in Rc32434 (errata ref #077) */
 389                        dev->stats.rx_errors++;
 390                        dev->stats.rx_dropped++;
 391                } else if ((devcs & ETH_RX_ROK)) {
 392                        pkt_len = RCVPKT_LENGTH(devcs);
 393
 394                        /* must be the (first and) last
 395                         * descriptor then */
 396                        pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
 397
 398                        /* invalidate the cache */
 399                        dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
 400
 401                        /* Malloc up new buffer. */
 402                        skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 403
 404                        if (!skb_new)
 405                                break;
 406                        /* Do not count the CRC */
 407                        skb_put(skb, pkt_len - 4);
 408                        skb->protocol = eth_type_trans(skb, dev);
 409
 410                        /* Pass the packet to upper layers */
 411                        netif_receive_skb(skb);
 412                        dev->stats.rx_packets++;
 413                        dev->stats.rx_bytes += pkt_len;
 414
 415                        /* Update the mcast stats */
 416                        if (devcs & ETH_RX_MP)
 417                                dev->stats.multicast++;
 418
 419                        lp->rx_skb[lp->rx_next_done] = skb_new;
 420                }
 421
 422                rd->devcs = 0;
 423
 424                /* Restore descriptor's curr_addr */
 425                if (skb_new)
 426                        rd->ca = CPHYSADDR(skb_new->data);
 427                else
 428                        rd->ca = CPHYSADDR(skb->data);
 429
 430                rd->control = DMA_COUNT(KORINA_RBSIZE) |
 431                        DMA_DESC_COD | DMA_DESC_IOD;
 432                lp->rd_ring[(lp->rx_next_done - 1) &
 433                        KORINA_RDS_MASK].control &=
 434                        ~DMA_DESC_COD;
 435
 436                lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
 437                dma_cache_wback((u32)rd, sizeof(*rd));
 438                rd = &lp->rd_ring[lp->rx_next_done];
 439                writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
 440        }
 441
 442        dmas = readl(&lp->rx_dma_regs->dmas);
 443
 444        if (dmas & DMA_STAT_HALT) {
 445                writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
 446                                &lp->rx_dma_regs->dmas);
 447
 448                lp->dma_halt_cnt++;
 449                rd->devcs = 0;
 450                skb = lp->rx_skb[lp->rx_next_done];
 451                rd->ca = CPHYSADDR(skb->data);
 452                dma_cache_wback((u32)rd, sizeof(*rd));
 453                korina_chain_rx(lp, rd);
 454        }
 455
 456        return count;
 457}
 458
 459static int korina_poll(struct napi_struct *napi, int budget)
 460{
 461        struct korina_private *lp =
 462                container_of(napi, struct korina_private, napi);
 463        struct net_device *dev = lp->dev;
 464        int work_done;
 465
 466        work_done = korina_rx(dev, budget);
 467        if (work_done < budget) {
 468                napi_complete(napi);
 469
 470                writel(readl(&lp->rx_dma_regs->dmasm) &
 471                        ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 472                        &lp->rx_dma_regs->dmasm);
 473        }
 474        return work_done;
 475}
 476
 477/*
 478 * Set or clear the multicast filter for this adaptor.
 479 */
 480static void korina_multicast_list(struct net_device *dev)
 481{
 482        struct korina_private *lp = netdev_priv(dev);
 483        unsigned long flags;
 484        struct netdev_hw_addr *ha;
 485        u32 recognise = ETH_ARC_AB;     /* always accept broadcasts */
 486        int i;
 487
 488        /* Set promiscuous mode */
 489        if (dev->flags & IFF_PROMISC)
 490                recognise |= ETH_ARC_PRO;
 491
 492        else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
 493                /* All multicast and broadcast */
 494                recognise |= ETH_ARC_AM;
 495
 496        /* Build the hash table */
 497        if (netdev_mc_count(dev) > 4) {
 498                u16 hash_table[4];
 499                u32 crc;
 500
 501                for (i = 0; i < 4; i++)
 502                        hash_table[i] = 0;
 503
 504                netdev_for_each_mc_addr(ha, dev) {
 505                        crc = ether_crc_le(6, ha->addr);
 506                        crc >>= 26;
 507                        hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
 508                }
 509                /* Accept filtered multicast */
 510                recognise |= ETH_ARC_AFM;
 511
 512                /* Fill the MAC hash tables with their values */
 513                writel((u32)(hash_table[1] << 16 | hash_table[0]),
 514                                        &lp->eth_regs->ethhash0);
 515                writel((u32)(hash_table[3] << 16 | hash_table[2]),
 516                                        &lp->eth_regs->ethhash1);
 517        }
 518
 519        spin_lock_irqsave(&lp->lock, flags);
 520        writel(recognise, &lp->eth_regs->etharc);
 521        spin_unlock_irqrestore(&lp->lock, flags);
 522}
 523
 524static void korina_tx(struct net_device *dev)
 525{
 526        struct korina_private *lp = netdev_priv(dev);
 527        struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
 528        u32 devcs;
 529        u32 dmas;
 530
 531        spin_lock(&lp->lock);
 532
 533        /* Process all desc that are done */
 534        while (IS_DMA_FINISHED(td->control)) {
 535                if (lp->tx_full == 1) {
 536                        netif_wake_queue(dev);
 537                        lp->tx_full = 0;
 538                }
 539
 540                devcs = lp->td_ring[lp->tx_next_done].devcs;
 541                if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
 542                                (ETH_TX_FD | ETH_TX_LD)) {
 543                        dev->stats.tx_errors++;
 544                        dev->stats.tx_dropped++;
 545
 546                        /* Should never happen */
 547                        printk(KERN_ERR "%s: split tx ignored\n",
 548                                                        dev->name);
 549                } else if (devcs & ETH_TX_TOK) {
 550                        dev->stats.tx_packets++;
 551                        dev->stats.tx_bytes +=
 552                                        lp->tx_skb[lp->tx_next_done]->len;
 553                } else {
 554                        dev->stats.tx_errors++;
 555                        dev->stats.tx_dropped++;
 556
 557                        /* Underflow */
 558                        if (devcs & ETH_TX_UND)
 559                                dev->stats.tx_fifo_errors++;
 560
 561                        /* Oversized frame */
 562                        if (devcs & ETH_TX_OF)
 563                                dev->stats.tx_aborted_errors++;
 564
 565                        /* Excessive deferrals */
 566                        if (devcs & ETH_TX_ED)
 567                                dev->stats.tx_carrier_errors++;
 568
 569                        /* Collisions: medium busy */
 570                        if (devcs & ETH_TX_EC)
 571                                dev->stats.collisions++;
 572
 573                        /* Late collision */
 574                        if (devcs & ETH_TX_LC)
 575                                dev->stats.tx_window_errors++;
 576                }
 577
 578                /* We must always free the original skb */
 579                if (lp->tx_skb[lp->tx_next_done]) {
 580                        dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
 581                        lp->tx_skb[lp->tx_next_done] = NULL;
 582                }
 583
 584                lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
 585                lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
 586                lp->td_ring[lp->tx_next_done].link = 0;
 587                lp->td_ring[lp->tx_next_done].ca = 0;
 588                lp->tx_count--;
 589
 590                /* Go on to next transmission */
 591                lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
 592                td = &lp->td_ring[lp->tx_next_done];
 593
 594        }
 595
 596        /* Clear the DMA status register */
 597        dmas = readl(&lp->tx_dma_regs->dmas);
 598        writel(~dmas, &lp->tx_dma_regs->dmas);
 599
 600        writel(readl(&lp->tx_dma_regs->dmasm) &
 601                        ~(DMA_STAT_FINI | DMA_STAT_ERR),
 602                        &lp->tx_dma_regs->dmasm);
 603
 604        spin_unlock(&lp->lock);
 605}
 606
 607static irqreturn_t
 608korina_tx_dma_interrupt(int irq, void *dev_id)
 609{
 610        struct net_device *dev = dev_id;
 611        struct korina_private *lp = netdev_priv(dev);
 612        u32 dmas, dmasm;
 613        irqreturn_t retval;
 614
 615        dmas = readl(&lp->tx_dma_regs->dmas);
 616
 617        if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
 618                dmasm = readl(&lp->tx_dma_regs->dmasm);
 619                writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
 620                                &lp->tx_dma_regs->dmasm);
 621
 622                korina_tx(dev);
 623
 624                if (lp->tx_chain_status == desc_filled &&
 625                        (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
 626                        writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
 627                                &(lp->tx_dma_regs->dmandptr));
 628                        lp->tx_chain_status = desc_empty;
 629                        lp->tx_chain_head = lp->tx_chain_tail;
 630                        dev->trans_start = jiffies;
 631                }
 632                if (dmas & DMA_STAT_ERR)
 633                        printk(KERN_ERR "%s: DMA error\n", dev->name);
 634
 635                retval = IRQ_HANDLED;
 636        } else
 637                retval = IRQ_NONE;
 638
 639        return retval;
 640}
 641
 642
 643static void korina_check_media(struct net_device *dev, unsigned int init_media)
 644{
 645        struct korina_private *lp = netdev_priv(dev);
 646
 647        mii_check_media(&lp->mii_if, 0, init_media);
 648
 649        if (lp->mii_if.full_duplex)
 650                writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
 651                                                &lp->eth_regs->ethmac2);
 652        else
 653                writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
 654                                                &lp->eth_regs->ethmac2);
 655}
 656
 657static void korina_poll_media(unsigned long data)
 658{
 659        struct net_device *dev = (struct net_device *) data;
 660        struct korina_private *lp = netdev_priv(dev);
 661
 662        korina_check_media(dev, 0);
 663        mod_timer(&lp->media_check_timer, jiffies + HZ);
 664}
 665
 666static void korina_set_carrier(struct mii_if_info *mii)
 667{
 668        if (mii->force_media) {
 669                /* autoneg is off: Link is always assumed to be up */
 670                if (!netif_carrier_ok(mii->dev))
 671                        netif_carrier_on(mii->dev);
 672        } else  /* Let MMI library update carrier status */
 673                korina_check_media(mii->dev, 0);
 674}
 675
 676static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 677{
 678        struct korina_private *lp = netdev_priv(dev);
 679        struct mii_ioctl_data *data = if_mii(rq);
 680        int rc;
 681
 682        if (!netif_running(dev))
 683                return -EINVAL;
 684        spin_lock_irq(&lp->lock);
 685        rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
 686        spin_unlock_irq(&lp->lock);
 687        korina_set_carrier(&lp->mii_if);
 688
 689        return rc;
 690}
 691
 692/* ethtool helpers */
 693static void netdev_get_drvinfo(struct net_device *dev,
 694                        struct ethtool_drvinfo *info)
 695{
 696        struct korina_private *lp = netdev_priv(dev);
 697
 698        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
 699        strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 700        strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
 701}
 702
 703static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 704{
 705        struct korina_private *lp = netdev_priv(dev);
 706        int rc;
 707
 708        spin_lock_irq(&lp->lock);
 709        rc = mii_ethtool_gset(&lp->mii_if, cmd);
 710        spin_unlock_irq(&lp->lock);
 711
 712        return rc;
 713}
 714
 715static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 716{
 717        struct korina_private *lp = netdev_priv(dev);
 718        int rc;
 719
 720        spin_lock_irq(&lp->lock);
 721        rc = mii_ethtool_sset(&lp->mii_if, cmd);
 722        spin_unlock_irq(&lp->lock);
 723        korina_set_carrier(&lp->mii_if);
 724
 725        return rc;
 726}
 727
 728static u32 netdev_get_link(struct net_device *dev)
 729{
 730        struct korina_private *lp = netdev_priv(dev);
 731
 732        return mii_link_ok(&lp->mii_if);
 733}
 734
 735static const struct ethtool_ops netdev_ethtool_ops = {
 736        .get_drvinfo            = netdev_get_drvinfo,
 737        .get_settings           = netdev_get_settings,
 738        .set_settings           = netdev_set_settings,
 739        .get_link               = netdev_get_link,
 740};
 741
 742static int korina_alloc_ring(struct net_device *dev)
 743{
 744        struct korina_private *lp = netdev_priv(dev);
 745        struct sk_buff *skb;
 746        int i;
 747
 748        /* Initialize the transmit descriptors */
 749        for (i = 0; i < KORINA_NUM_TDS; i++) {
 750                lp->td_ring[i].control = DMA_DESC_IOF;
 751                lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
 752                lp->td_ring[i].ca = 0;
 753                lp->td_ring[i].link = 0;
 754        }
 755        lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
 756                        lp->tx_full = lp->tx_count = 0;
 757        lp->tx_chain_status = desc_empty;
 758
 759        /* Initialize the receive descriptors */
 760        for (i = 0; i < KORINA_NUM_RDS; i++) {
 761                skb = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
 762                if (!skb)
 763                        return -ENOMEM;
 764                lp->rx_skb[i] = skb;
 765                lp->rd_ring[i].control = DMA_DESC_IOD |
 766                                DMA_COUNT(KORINA_RBSIZE);
 767                lp->rd_ring[i].devcs = 0;
 768                lp->rd_ring[i].ca = CPHYSADDR(skb->data);
 769                lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
 770        }
 771
 772        /* loop back receive descriptors, so the last
 773         * descriptor points to the first one */
 774        lp->rd_ring[i - 1].link = CPHYSADDR(&lp->rd_ring[0]);
 775        lp->rd_ring[i - 1].control |= DMA_DESC_COD;
 776
 777        lp->rx_next_done  = 0;
 778        lp->rx_chain_head = 0;
 779        lp->rx_chain_tail = 0;
 780        lp->rx_chain_status = desc_empty;
 781
 782        return 0;
 783}
 784
 785static void korina_free_ring(struct net_device *dev)
 786{
 787        struct korina_private *lp = netdev_priv(dev);
 788        int i;
 789
 790        for (i = 0; i < KORINA_NUM_RDS; i++) {
 791                lp->rd_ring[i].control = 0;
 792                if (lp->rx_skb[i])
 793                        dev_kfree_skb_any(lp->rx_skb[i]);
 794                lp->rx_skb[i] = NULL;
 795        }
 796
 797        for (i = 0; i < KORINA_NUM_TDS; i++) {
 798                lp->td_ring[i].control = 0;
 799                if (lp->tx_skb[i])
 800                        dev_kfree_skb_any(lp->tx_skb[i]);
 801                lp->tx_skb[i] = NULL;
 802        }
 803}
 804
 805/*
 806 * Initialize the RC32434 ethernet controller.
 807 */
 808static int korina_init(struct net_device *dev)
 809{
 810        struct korina_private *lp = netdev_priv(dev);
 811
 812        /* Disable DMA */
 813        korina_abort_tx(dev);
 814        korina_abort_rx(dev);
 815
 816        /* reset ethernet logic */
 817        writel(0, &lp->eth_regs->ethintfc);
 818        while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
 819                dev->trans_start = jiffies;
 820
 821        /* Enable Ethernet Interface */
 822        writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
 823
 824        /* Allocate rings */
 825        if (korina_alloc_ring(dev)) {
 826                printk(KERN_ERR "%s: descriptor allocation failed\n", dev->name);
 827                korina_free_ring(dev);
 828                return -ENOMEM;
 829        }
 830
 831        writel(0, &lp->rx_dma_regs->dmas);
 832        /* Start Rx DMA */
 833        korina_start_rx(lp, &lp->rd_ring[0]);
 834
 835        writel(readl(&lp->tx_dma_regs->dmasm) &
 836                        ~(DMA_STAT_FINI | DMA_STAT_ERR),
 837                        &lp->tx_dma_regs->dmasm);
 838        writel(readl(&lp->rx_dma_regs->dmasm) &
 839                        ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
 840                        &lp->rx_dma_regs->dmasm);
 841
 842        /* Accept only packets destined for this Ethernet device address */
 843        writel(ETH_ARC_AB, &lp->eth_regs->etharc);
 844
 845        /* Set all Ether station address registers to their initial values */
 846        writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
 847        writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
 848
 849        writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
 850        writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
 851
 852        writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
 853        writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
 854
 855        writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
 856        writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
 857
 858
 859        /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
 860        writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
 861                        &lp->eth_regs->ethmac2);
 862
 863        /* Back to back inter-packet-gap */
 864        writel(0x15, &lp->eth_regs->ethipgt);
 865        /* Non - Back to back inter-packet-gap */
 866        writel(0x12, &lp->eth_regs->ethipgr);
 867
 868        /* Management Clock Prescaler Divisor
 869         * Clock independent setting */
 870        writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
 871                       &lp->eth_regs->ethmcp);
 872
 873        /* don't transmit until fifo contains 48b */
 874        writel(48, &lp->eth_regs->ethfifott);
 875
 876        writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
 877
 878        napi_enable(&lp->napi);
 879        netif_start_queue(dev);
 880
 881        return 0;
 882}
 883
 884/*
 885 * Restart the RC32434 ethernet controller.
 886 */
 887static void korina_restart_task(struct work_struct *work)
 888{
 889        struct korina_private *lp = container_of(work,
 890                        struct korina_private, restart_task);
 891        struct net_device *dev = lp->dev;
 892
 893        /*
 894         * Disable interrupts
 895         */
 896        disable_irq(lp->rx_irq);
 897        disable_irq(lp->tx_irq);
 898        disable_irq(lp->ovr_irq);
 899        disable_irq(lp->und_irq);
 900
 901        writel(readl(&lp->tx_dma_regs->dmasm) |
 902                                DMA_STAT_FINI | DMA_STAT_ERR,
 903                                &lp->tx_dma_regs->dmasm);
 904        writel(readl(&lp->rx_dma_regs->dmasm) |
 905                                DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
 906                                &lp->rx_dma_regs->dmasm);
 907
 908        korina_free_ring(dev);
 909
 910        napi_disable(&lp->napi);
 911
 912        if (korina_init(dev) < 0) {
 913                printk(KERN_ERR "%s: cannot restart device\n", dev->name);
 914                return;
 915        }
 916        korina_multicast_list(dev);
 917
 918        enable_irq(lp->und_irq);
 919        enable_irq(lp->ovr_irq);
 920        enable_irq(lp->tx_irq);
 921        enable_irq(lp->rx_irq);
 922}
 923
 924static void korina_clear_and_restart(struct net_device *dev, u32 value)
 925{
 926        struct korina_private *lp = netdev_priv(dev);
 927
 928        netif_stop_queue(dev);
 929        writel(value, &lp->eth_regs->ethintfc);
 930        schedule_work(&lp->restart_task);
 931}
 932
 933/* Ethernet Tx Underflow interrupt */
 934static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
 935{
 936        struct net_device *dev = dev_id;
 937        struct korina_private *lp = netdev_priv(dev);
 938        unsigned int und;
 939
 940        spin_lock(&lp->lock);
 941
 942        und = readl(&lp->eth_regs->ethintfc);
 943
 944        if (und & ETH_INT_FC_UND)
 945                korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
 946
 947        spin_unlock(&lp->lock);
 948
 949        return IRQ_HANDLED;
 950}
 951
 952static void korina_tx_timeout(struct net_device *dev)
 953{
 954        struct korina_private *lp = netdev_priv(dev);
 955
 956        schedule_work(&lp->restart_task);
 957}
 958
 959/* Ethernet Rx Overflow interrupt */
 960static irqreturn_t
 961korina_ovr_interrupt(int irq, void *dev_id)
 962{
 963        struct net_device *dev = dev_id;
 964        struct korina_private *lp = netdev_priv(dev);
 965        unsigned int ovr;
 966
 967        spin_lock(&lp->lock);
 968        ovr = readl(&lp->eth_regs->ethintfc);
 969
 970        if (ovr & ETH_INT_FC_OVR)
 971                korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
 972
 973        spin_unlock(&lp->lock);
 974
 975        return IRQ_HANDLED;
 976}
 977
 978#ifdef CONFIG_NET_POLL_CONTROLLER
 979static void korina_poll_controller(struct net_device *dev)
 980{
 981        disable_irq(dev->irq);
 982        korina_tx_dma_interrupt(dev->irq, dev);
 983        enable_irq(dev->irq);
 984}
 985#endif
 986
 987static int korina_open(struct net_device *dev)
 988{
 989        struct korina_private *lp = netdev_priv(dev);
 990        int ret;
 991
 992        /* Initialize */
 993        ret = korina_init(dev);
 994        if (ret < 0) {
 995                printk(KERN_ERR "%s: cannot open device\n", dev->name);
 996                goto out;
 997        }
 998
 999        /* Install the interrupt handler
1000         * that handles the Done Finished
1001         * Ovr and Und Events */
1002        ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt,
1003                        IRQF_DISABLED, "Korina ethernet Rx", dev);
1004        if (ret < 0) {
1005                printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n",
1006                    dev->name, lp->rx_irq);
1007                goto err_release;
1008        }
1009        ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt,
1010                        IRQF_DISABLED, "Korina ethernet Tx", dev);
1011        if (ret < 0) {
1012                printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n",
1013                    dev->name, lp->tx_irq);
1014                goto err_free_rx_irq;
1015        }
1016
1017        /* Install handler for overrun error. */
1018        ret = request_irq(lp->ovr_irq, korina_ovr_interrupt,
1019                        IRQF_DISABLED, "Ethernet Overflow", dev);
1020        if (ret < 0) {
1021                printk(KERN_ERR "%s: unable to get OVR IRQ %d\n",
1022                    dev->name, lp->ovr_irq);
1023                goto err_free_tx_irq;
1024        }
1025
1026        /* Install handler for underflow error. */
1027        ret = request_irq(lp->und_irq, korina_und_interrupt,
1028                        IRQF_DISABLED, "Ethernet Underflow", dev);
1029        if (ret < 0) {
1030                printk(KERN_ERR "%s: unable to get UND IRQ %d\n",
1031                    dev->name, lp->und_irq);
1032                goto err_free_ovr_irq;
1033        }
1034        mod_timer(&lp->media_check_timer, jiffies + 1);
1035out:
1036        return ret;
1037
1038err_free_ovr_irq:
1039        free_irq(lp->ovr_irq, dev);
1040err_free_tx_irq:
1041        free_irq(lp->tx_irq, dev);
1042err_free_rx_irq:
1043        free_irq(lp->rx_irq, dev);
1044err_release:
1045        korina_free_ring(dev);
1046        goto out;
1047}
1048
1049static int korina_close(struct net_device *dev)
1050{
1051        struct korina_private *lp = netdev_priv(dev);
1052        u32 tmp;
1053
1054        del_timer(&lp->media_check_timer);
1055
1056        /* Disable interrupts */
1057        disable_irq(lp->rx_irq);
1058        disable_irq(lp->tx_irq);
1059        disable_irq(lp->ovr_irq);
1060        disable_irq(lp->und_irq);
1061
1062        korina_abort_tx(dev);
1063        tmp = readl(&lp->tx_dma_regs->dmasm);
1064        tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1065        writel(tmp, &lp->tx_dma_regs->dmasm);
1066
1067        korina_abort_rx(dev);
1068        tmp = readl(&lp->rx_dma_regs->dmasm);
1069        tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1070        writel(tmp, &lp->rx_dma_regs->dmasm);
1071
1072        korina_free_ring(dev);
1073
1074        napi_disable(&lp->napi);
1075
1076        cancel_work_sync(&lp->restart_task);
1077
1078        free_irq(lp->rx_irq, dev);
1079        free_irq(lp->tx_irq, dev);
1080        free_irq(lp->ovr_irq, dev);
1081        free_irq(lp->und_irq, dev);
1082
1083        return 0;
1084}
1085
1086static const struct net_device_ops korina_netdev_ops = {
1087        .ndo_open               = korina_open,
1088        .ndo_stop               = korina_close,
1089        .ndo_start_xmit         = korina_send_packet,
1090        .ndo_set_rx_mode        = korina_multicast_list,
1091        .ndo_tx_timeout         = korina_tx_timeout,
1092        .ndo_do_ioctl           = korina_ioctl,
1093        .ndo_change_mtu         = eth_change_mtu,
1094        .ndo_validate_addr      = eth_validate_addr,
1095        .ndo_set_mac_address    = eth_mac_addr,
1096#ifdef CONFIG_NET_POLL_CONTROLLER
1097        .ndo_poll_controller    = korina_poll_controller,
1098#endif
1099};
1100
1101static int korina_probe(struct platform_device *pdev)
1102{
1103        struct korina_device *bif = platform_get_drvdata(pdev);
1104        struct korina_private *lp;
1105        struct net_device *dev;
1106        struct resource *r;
1107        int rc;
1108
1109        dev = alloc_etherdev(sizeof(struct korina_private));
1110        if (!dev)
1111                return -ENOMEM;
1112
1113        SET_NETDEV_DEV(dev, &pdev->dev);
1114        lp = netdev_priv(dev);
1115
1116        bif->dev = dev;
1117        memcpy(dev->dev_addr, bif->mac, 6);
1118
1119        lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1120        lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
1121        lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
1122        lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
1123
1124        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1125        dev->base_addr = r->start;
1126        lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
1127        if (!lp->eth_regs) {
1128                printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
1129                rc = -ENXIO;
1130                goto probe_err_out;
1131        }
1132
1133        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1134        lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1135        if (!lp->rx_dma_regs) {
1136                printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
1137                rc = -ENXIO;
1138                goto probe_err_dma_rx;
1139        }
1140
1141        r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1142        lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1143        if (!lp->tx_dma_regs) {
1144                printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
1145                rc = -ENXIO;
1146                goto probe_err_dma_tx;
1147        }
1148
1149        lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1150        if (!lp->td_ring) {
1151                rc = -ENXIO;
1152                goto probe_err_td_ring;
1153        }
1154
1155        dma_cache_inv((unsigned long)(lp->td_ring),
1156                        TD_RING_SIZE + RD_RING_SIZE);
1157
1158        /* now convert TD_RING pointer to KSEG1 */
1159        lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
1160        lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
1161
1162        spin_lock_init(&lp->lock);
1163        /* just use the rx dma irq */
1164        dev->irq = lp->rx_irq;
1165        lp->dev = dev;
1166
1167        dev->netdev_ops = &korina_netdev_ops;
1168        dev->ethtool_ops = &netdev_ethtool_ops;
1169        dev->watchdog_timeo = TX_TIMEOUT;
1170        netif_napi_add(dev, &lp->napi, korina_poll, 64);
1171
1172        lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
1173        lp->mii_if.dev = dev;
1174        lp->mii_if.mdio_read = mdio_read;
1175        lp->mii_if.mdio_write = mdio_write;
1176        lp->mii_if.phy_id = lp->phy_addr;
1177        lp->mii_if.phy_id_mask = 0x1f;
1178        lp->mii_if.reg_num_mask = 0x1f;
1179
1180        rc = register_netdev(dev);
1181        if (rc < 0) {
1182                printk(KERN_ERR DRV_NAME
1183                        ": cannot register net device: %d\n", rc);
1184                goto probe_err_register;
1185        }
1186        setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev);
1187
1188        INIT_WORK(&lp->restart_task, korina_restart_task);
1189
1190        printk(KERN_INFO "%s: " DRV_NAME "-" DRV_VERSION " " DRV_RELDATE "\n",
1191                        dev->name);
1192out:
1193        return rc;
1194
1195probe_err_register:
1196        kfree(lp->td_ring);
1197probe_err_td_ring:
1198        iounmap(lp->tx_dma_regs);
1199probe_err_dma_tx:
1200        iounmap(lp->rx_dma_regs);
1201probe_err_dma_rx:
1202        iounmap(lp->eth_regs);
1203probe_err_out:
1204        free_netdev(dev);
1205        goto out;
1206}
1207
1208static int korina_remove(struct platform_device *pdev)
1209{
1210        struct korina_device *bif = platform_get_drvdata(pdev);
1211        struct korina_private *lp = netdev_priv(bif->dev);
1212
1213        iounmap(lp->eth_regs);
1214        iounmap(lp->rx_dma_regs);
1215        iounmap(lp->tx_dma_regs);
1216
1217        platform_set_drvdata(pdev, NULL);
1218        unregister_netdev(bif->dev);
1219        free_netdev(bif->dev);
1220
1221        return 0;
1222}
1223
1224static struct platform_driver korina_driver = {
1225        .driver.name = "korina",
1226        .probe = korina_probe,
1227        .remove = korina_remove,
1228};
1229
1230module_platform_driver(korina_driver);
1231
1232MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1233MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1234MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1235MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1236MODULE_LICENSE("GPL");
1237