linux/drivers/net/sh_eth.c
<<
>>
Prefs
   1/*
   2 *  SuperH Ethernet device driver
   3 *
   4 *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
   5 *  Copyright (C) 2008-2009 Renesas Solutions Corp.
   6 *
   7 *  This program is free software; you can redistribute it and/or modify it
   8 *  under the terms and conditions of the GNU General Public License,
   9 *  version 2, as published by the Free Software Foundation.
  10 *
  11 *  This program is distributed in the hope it will be useful, but WITHOUT
  12 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 *  more details.
  15 *  You should have received a copy of the GNU General Public License along with
  16 *  this program; if not, write to the Free Software Foundation, Inc.,
  17 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18 *
  19 *  The full GNU General Public License is included in this distribution in
  20 *  the file called "COPYING".
  21 */
  22
  23#include <linux/init.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/etherdevice.h>
  26#include <linux/delay.h>
  27#include <linux/platform_device.h>
  28#include <linux/mdio-bitbang.h>
  29#include <linux/netdevice.h>
  30#include <linux/phy.h>
  31#include <linux/cache.h>
  32#include <linux/io.h>
  33#include <linux/pm_runtime.h>
  34#include <linux/slab.h>
  35#include <asm/cacheflush.h>
  36
  37#include "sh_eth.h"
  38
  39/* There is CPU dependent code */
  40#if defined(CONFIG_CPU_SUBTYPE_SH7724)
  41#define SH_ETH_RESET_DEFAULT    1
  42static void sh_eth_set_duplex(struct net_device *ndev)
  43{
  44        struct sh_eth_private *mdp = netdev_priv(ndev);
  45        u32 ioaddr = ndev->base_addr;
  46
  47        if (mdp->duplex) /* Full */
  48                writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
  49        else            /* Half */
  50                writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
  51}
  52
  53static void sh_eth_set_rate(struct net_device *ndev)
  54{
  55        struct sh_eth_private *mdp = netdev_priv(ndev);
  56        u32 ioaddr = ndev->base_addr;
  57
  58        switch (mdp->speed) {
  59        case 10: /* 10BASE */
  60                writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
  61                break;
  62        case 100:/* 100BASE */
  63                writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
  64                break;
  65        default:
  66                break;
  67        }
  68}
  69
  70/* SH7724 */
  71static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  72        .set_duplex     = sh_eth_set_duplex,
  73        .set_rate       = sh_eth_set_rate,
  74
  75        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
  76        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
  77        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
  78
  79        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
  80        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
  81                          EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
  82        .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
  83
  84        .apr            = 1,
  85        .mpr            = 1,
  86        .tpauser        = 1,
  87        .hw_swap        = 1,
  88        .rpadir         = 1,
  89        .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
  90};
  91#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
  92#define SH_ETH_RESET_DEFAULT    1
  93static void sh_eth_set_duplex(struct net_device *ndev)
  94{
  95        struct sh_eth_private *mdp = netdev_priv(ndev);
  96        u32 ioaddr = ndev->base_addr;
  97
  98        if (mdp->duplex) /* Full */
  99                writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
 100        else            /* Half */
 101                writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 102}
 103
 104static void sh_eth_set_rate(struct net_device *ndev)
 105{
 106        struct sh_eth_private *mdp = netdev_priv(ndev);
 107        u32 ioaddr = ndev->base_addr;
 108
 109        switch (mdp->speed) {
 110        case 10: /* 10BASE */
 111                writel(0, ioaddr + RTRATE);
 112                break;
 113        case 100:/* 100BASE */
 114                writel(1, ioaddr + RTRATE);
 115                break;
 116        default:
 117                break;
 118        }
 119}
 120
 121/* SH7757 */
 122static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 123        .set_duplex             = sh_eth_set_duplex,
 124        .set_rate               = sh_eth_set_rate,
 125
 126        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 127        .rmcr_value     = 0x00000001,
 128
 129        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 130        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
 131                          EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
 132        .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 133
 134        .apr            = 1,
 135        .mpr            = 1,
 136        .tpauser        = 1,
 137        .hw_swap        = 1,
 138        .no_ade         = 1,
 139};
 140
 141#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 142#define SH_ETH_HAS_TSU  1
 143static void sh_eth_chip_reset(struct net_device *ndev)
 144{
 145        /* reset device */
 146        writel(ARSTR_ARSTR, ARSTR);
 147        mdelay(1);
 148}
 149
 150static void sh_eth_reset(struct net_device *ndev)
 151{
 152        u32 ioaddr = ndev->base_addr;
 153        int cnt = 100;
 154
 155        writel(EDSR_ENALL, ioaddr + EDSR);
 156        writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
 157        while (cnt > 0) {
 158                if (!(readl(ioaddr + EDMR) & 0x3))
 159                        break;
 160                mdelay(1);
 161                cnt--;
 162        }
 163        if (cnt == 0)
 164                printk(KERN_ERR "Device reset fail\n");
 165
 166        /* Table Init */
 167        writel(0x0, ioaddr + TDLAR);
 168        writel(0x0, ioaddr + TDFAR);
 169        writel(0x0, ioaddr + TDFXR);
 170        writel(0x0, ioaddr + TDFFR);
 171        writel(0x0, ioaddr + RDLAR);
 172        writel(0x0, ioaddr + RDFAR);
 173        writel(0x0, ioaddr + RDFXR);
 174        writel(0x0, ioaddr + RDFFR);
 175}
 176
 177static void sh_eth_set_duplex(struct net_device *ndev)
 178{
 179        struct sh_eth_private *mdp = netdev_priv(ndev);
 180        u32 ioaddr = ndev->base_addr;
 181
 182        if (mdp->duplex) /* Full */
 183                writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
 184        else            /* Half */
 185                writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 186}
 187
 188static void sh_eth_set_rate(struct net_device *ndev)
 189{
 190        struct sh_eth_private *mdp = netdev_priv(ndev);
 191        u32 ioaddr = ndev->base_addr;
 192
 193        switch (mdp->speed) {
 194        case 10: /* 10BASE */
 195                writel(GECMR_10, ioaddr + GECMR);
 196                break;
 197        case 100:/* 100BASE */
 198                writel(GECMR_100, ioaddr + GECMR);
 199                break;
 200        case 1000: /* 1000BASE */
 201                writel(GECMR_1000, ioaddr + GECMR);
 202                break;
 203        default:
 204                break;
 205        }
 206}
 207
 208/* sh7763 */
 209static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 210        .chip_reset     = sh_eth_chip_reset,
 211        .set_duplex     = sh_eth_set_duplex,
 212        .set_rate       = sh_eth_set_rate,
 213
 214        .ecsr_value     = ECSR_ICD | ECSR_MPD,
 215        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 216        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 217
 218        .tx_check       = EESR_TC1 | EESR_FTC,
 219        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
 220                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
 221                          EESR_ECI,
 222        .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 223                          EESR_TFE,
 224
 225        .apr            = 1,
 226        .mpr            = 1,
 227        .tpauser        = 1,
 228        .bculr          = 1,
 229        .hw_swap        = 1,
 230        .no_trimd       = 1,
 231        .no_ade         = 1,
 232};
 233
 234#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 235#define SH_ETH_RESET_DEFAULT    1
 236static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 237        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 238
 239        .apr            = 1,
 240        .mpr            = 1,
 241        .tpauser        = 1,
 242        .hw_swap        = 1,
 243};
 244#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
 245#define SH_ETH_RESET_DEFAULT    1
 246#define SH_ETH_HAS_TSU  1
 247static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 248        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 249};
 250#endif
 251
 252static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 253{
 254        if (!cd->ecsr_value)
 255                cd->ecsr_value = DEFAULT_ECSR_INIT;
 256
 257        if (!cd->ecsipr_value)
 258                cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
 259
 260        if (!cd->fcftr_value)
 261                cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
 262                                  DEFAULT_FIFO_F_D_RFD;
 263
 264        if (!cd->fdr_value)
 265                cd->fdr_value = DEFAULT_FDR_INIT;
 266
 267        if (!cd->rmcr_value)
 268                cd->rmcr_value = DEFAULT_RMCR_VALUE;
 269
 270        if (!cd->tx_check)
 271                cd->tx_check = DEFAULT_TX_CHECK;
 272
 273        if (!cd->eesr_err_check)
 274                cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
 275
 276        if (!cd->tx_error_check)
 277                cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
 278}
 279
 280#if defined(SH_ETH_RESET_DEFAULT)
 281/* Chip Reset */
 282static void sh_eth_reset(struct net_device *ndev)
 283{
 284        u32 ioaddr = ndev->base_addr;
 285
 286        writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
 287        mdelay(3);
 288        writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
 289}
 290#endif
 291
 292#if defined(CONFIG_CPU_SH4)
 293static void sh_eth_set_receive_align(struct sk_buff *skb)
 294{
 295        int reserve;
 296
 297        reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
 298        if (reserve)
 299                skb_reserve(skb, reserve);
 300}
 301#else
 302static void sh_eth_set_receive_align(struct sk_buff *skb)
 303{
 304        skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
 305}
 306#endif
 307
 308
 309/* CPU <-> EDMAC endian convert */
 310static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
 311{
 312        switch (mdp->edmac_endian) {
 313        case EDMAC_LITTLE_ENDIAN:
 314                return cpu_to_le32(x);
 315        case EDMAC_BIG_ENDIAN:
 316                return cpu_to_be32(x);
 317        }
 318        return x;
 319}
 320
 321static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
 322{
 323        switch (mdp->edmac_endian) {
 324        case EDMAC_LITTLE_ENDIAN:
 325                return le32_to_cpu(x);
 326        case EDMAC_BIG_ENDIAN:
 327                return be32_to_cpu(x);
 328        }
 329        return x;
 330}
 331
 332/*
 333 * Program the hardware MAC address from dev->dev_addr.
 334 */
 335static void update_mac_address(struct net_device *ndev)
 336{
 337        u32 ioaddr = ndev->base_addr;
 338
 339        writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 340                  (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
 341                  ioaddr + MAHR);
 342        writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
 343                  ioaddr + MALR);
 344}
 345
 346/*
 347 * Get MAC address from SuperH MAC address register
 348 *
 349 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
 350 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
 351 * When you want use this device, you must set MAC address in bootloader.
 352 *
 353 */
 354static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 355{
 356        u32 ioaddr = ndev->base_addr;
 357
 358        if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
 359                memcpy(ndev->dev_addr, mac, 6);
 360        } else {
 361                ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
 362                ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
 363                ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
 364                ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
 365                ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
 366                ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
 367        }
 368}
 369
 370struct bb_info {
 371        struct mdiobb_ctrl ctrl;
 372        u32 addr;
 373        u32 mmd_msk;/* MMD */
 374        u32 mdo_msk;
 375        u32 mdi_msk;
 376        u32 mdc_msk;
 377};
 378
 379/* PHY bit set */
 380static void bb_set(u32 addr, u32 msk)
 381{
 382        writel(readl(addr) | msk, addr);
 383}
 384
 385/* PHY bit clear */
 386static void bb_clr(u32 addr, u32 msk)
 387{
 388        writel((readl(addr) & ~msk), addr);
 389}
 390
 391/* PHY bit read */
 392static int bb_read(u32 addr, u32 msk)
 393{
 394        return (readl(addr) & msk) != 0;
 395}
 396
 397/* Data I/O pin control */
 398static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 399{
 400        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 401        if (bit)
 402                bb_set(bitbang->addr, bitbang->mmd_msk);
 403        else
 404                bb_clr(bitbang->addr, bitbang->mmd_msk);
 405}
 406
 407/* Set bit data*/
 408static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
 409{
 410        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 411
 412        if (bit)
 413                bb_set(bitbang->addr, bitbang->mdo_msk);
 414        else
 415                bb_clr(bitbang->addr, bitbang->mdo_msk);
 416}
 417
 418/* Get bit data*/
 419static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
 420{
 421        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 422        return bb_read(bitbang->addr, bitbang->mdi_msk);
 423}
 424
 425/* MDC pin control */
 426static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 427{
 428        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 429
 430        if (bit)
 431                bb_set(bitbang->addr, bitbang->mdc_msk);
 432        else
 433                bb_clr(bitbang->addr, bitbang->mdc_msk);
 434}
 435
 436/* mdio bus control struct */
 437static struct mdiobb_ops bb_ops = {
 438        .owner = THIS_MODULE,
 439        .set_mdc = sh_mdc_ctrl,
 440        .set_mdio_dir = sh_mmd_ctrl,
 441        .set_mdio_data = sh_set_mdio,
 442        .get_mdio_data = sh_get_mdio,
 443};
 444
 445/* free skb and descriptor buffer */
 446static void sh_eth_ring_free(struct net_device *ndev)
 447{
 448        struct sh_eth_private *mdp = netdev_priv(ndev);
 449        int i;
 450
 451        /* Free Rx skb ringbuffer */
 452        if (mdp->rx_skbuff) {
 453                for (i = 0; i < RX_RING_SIZE; i++) {
 454                        if (mdp->rx_skbuff[i])
 455                                dev_kfree_skb(mdp->rx_skbuff[i]);
 456                }
 457        }
 458        kfree(mdp->rx_skbuff);
 459
 460        /* Free Tx skb ringbuffer */
 461        if (mdp->tx_skbuff) {
 462                for (i = 0; i < TX_RING_SIZE; i++) {
 463                        if (mdp->tx_skbuff[i])
 464                                dev_kfree_skb(mdp->tx_skbuff[i]);
 465                }
 466        }
 467        kfree(mdp->tx_skbuff);
 468}
 469
 470/* format skb and descriptor buffer */
 471static void sh_eth_ring_format(struct net_device *ndev)
 472{
 473        u32 ioaddr = ndev->base_addr;
 474        struct sh_eth_private *mdp = netdev_priv(ndev);
 475        int i;
 476        struct sk_buff *skb;
 477        struct sh_eth_rxdesc *rxdesc = NULL;
 478        struct sh_eth_txdesc *txdesc = NULL;
 479        int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
 480        int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
 481
 482        mdp->cur_rx = mdp->cur_tx = 0;
 483        mdp->dirty_rx = mdp->dirty_tx = 0;
 484
 485        memset(mdp->rx_ring, 0, rx_ringsize);
 486
 487        /* build Rx ring buffer */
 488        for (i = 0; i < RX_RING_SIZE; i++) {
 489                /* skb */
 490                mdp->rx_skbuff[i] = NULL;
 491                skb = dev_alloc_skb(mdp->rx_buf_sz);
 492                mdp->rx_skbuff[i] = skb;
 493                if (skb == NULL)
 494                        break;
 495                dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
 496                                DMA_FROM_DEVICE);
 497                skb->dev = ndev; /* Mark as being used by this device. */
 498                sh_eth_set_receive_align(skb);
 499
 500                /* RX descriptor */
 501                rxdesc = &mdp->rx_ring[i];
 502                rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
 503                rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 504
 505                /* The size of the buffer is 16 byte boundary. */
 506                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 507                /* Rx descriptor address set */
 508                if (i == 0) {
 509                        writel(mdp->rx_desc_dma, ioaddr + RDLAR);
 510#if defined(CONFIG_CPU_SUBTYPE_SH7763)
 511                        writel(mdp->rx_desc_dma, ioaddr + RDFAR);
 512#endif
 513                }
 514        }
 515
 516        mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
 517
 518        /* Mark the last entry as wrapping the ring. */
 519        rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
 520
 521        memset(mdp->tx_ring, 0, tx_ringsize);
 522
 523        /* build Tx ring buffer */
 524        for (i = 0; i < TX_RING_SIZE; i++) {
 525                mdp->tx_skbuff[i] = NULL;
 526                txdesc = &mdp->tx_ring[i];
 527                txdesc->status = cpu_to_edmac(mdp, TD_TFP);
 528                txdesc->buffer_length = 0;
 529                if (i == 0) {
 530                        /* Tx descriptor address set */
 531                        writel(mdp->tx_desc_dma, ioaddr + TDLAR);
 532#if defined(CONFIG_CPU_SUBTYPE_SH7763)
 533                        writel(mdp->tx_desc_dma, ioaddr + TDFAR);
 534#endif
 535                }
 536        }
 537
 538        txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
 539}
 540
 541/* Get skb and descriptor buffer */
 542static int sh_eth_ring_init(struct net_device *ndev)
 543{
 544        struct sh_eth_private *mdp = netdev_priv(ndev);
 545        int rx_ringsize, tx_ringsize, ret = 0;
 546
 547        /*
 548         * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
 549         * card needs room to do 8 byte alignment, +2 so we can reserve
 550         * the first 2 bytes, and +16 gets room for the status word from the
 551         * card.
 552         */
 553        mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
 554                          (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
 555        if (mdp->cd->rpadir)
 556                mdp->rx_buf_sz += NET_IP_ALIGN;
 557
 558        /* Allocate RX and TX skb rings */
 559        mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
 560                                GFP_KERNEL);
 561        if (!mdp->rx_skbuff) {
 562                dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
 563                ret = -ENOMEM;
 564                return ret;
 565        }
 566
 567        mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
 568                                GFP_KERNEL);
 569        if (!mdp->tx_skbuff) {
 570                dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
 571                ret = -ENOMEM;
 572                goto skb_ring_free;
 573        }
 574
 575        /* Allocate all Rx descriptors. */
 576        rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
 577        mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
 578                        GFP_KERNEL);
 579
 580        if (!mdp->rx_ring) {
 581                dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
 582                        rx_ringsize);
 583                ret = -ENOMEM;
 584                goto desc_ring_free;
 585        }
 586
 587        mdp->dirty_rx = 0;
 588
 589        /* Allocate all Tx descriptors. */
 590        tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
 591        mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
 592                        GFP_KERNEL);
 593        if (!mdp->tx_ring) {
 594                dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
 595                        tx_ringsize);
 596                ret = -ENOMEM;
 597                goto desc_ring_free;
 598        }
 599        return ret;
 600
 601desc_ring_free:
 602        /* free DMA buffer */
 603        dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
 604
 605skb_ring_free:
 606        /* Free Rx and Tx skb ring buffer */
 607        sh_eth_ring_free(ndev);
 608
 609        return ret;
 610}
 611
 612static int sh_eth_dev_init(struct net_device *ndev)
 613{
 614        int ret = 0;
 615        struct sh_eth_private *mdp = netdev_priv(ndev);
 616        u32 ioaddr = ndev->base_addr;
 617        u_int32_t rx_int_var, tx_int_var;
 618        u32 val;
 619
 620        /* Soft Reset */
 621        sh_eth_reset(ndev);
 622
 623        /* Descriptor format */
 624        sh_eth_ring_format(ndev);
 625        if (mdp->cd->rpadir)
 626                writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
 627
 628        /* all sh_eth int mask */
 629        writel(0, ioaddr + EESIPR);
 630
 631#if defined(__LITTLE_ENDIAN__)
 632        if (mdp->cd->hw_swap)
 633                writel(EDMR_EL, ioaddr + EDMR);
 634        else
 635#endif
 636                writel(0, ioaddr + EDMR);
 637
 638        /* FIFO size set */
 639        writel(mdp->cd->fdr_value, ioaddr + FDR);
 640        writel(0, ioaddr + TFTR);
 641
 642        /* Frame recv control */
 643        writel(mdp->cd->rmcr_value, ioaddr + RMCR);
 644
 645        rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
 646        tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
 647        writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
 648
 649        if (mdp->cd->bculr)
 650                writel(0x800, ioaddr + BCULR);  /* Burst sycle set */
 651
 652        writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
 653
 654        if (!mdp->cd->no_trimd)
 655                writel(0, ioaddr + TRIMD);
 656
 657        /* Recv frame limit set register */
 658        writel(RFLR_VALUE, ioaddr + RFLR);
 659
 660        writel(readl(ioaddr + EESR), ioaddr + EESR);
 661        writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
 662
 663        /* PAUSE Prohibition */
 664        val = (readl(ioaddr + ECMR) & ECMR_DM) |
 665                ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
 666
 667        writel(val, ioaddr + ECMR);
 668
 669        if (mdp->cd->set_rate)
 670                mdp->cd->set_rate(ndev);
 671
 672        /* E-MAC Status Register clear */
 673        writel(mdp->cd->ecsr_value, ioaddr + ECSR);
 674
 675        /* E-MAC Interrupt Enable register */
 676        writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
 677
 678        /* Set MAC address */
 679        update_mac_address(ndev);
 680
 681        /* mask reset */
 682        if (mdp->cd->apr)
 683                writel(APR_AP, ioaddr + APR);
 684        if (mdp->cd->mpr)
 685                writel(MPR_MP, ioaddr + MPR);
 686        if (mdp->cd->tpauser)
 687                writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
 688
 689        /* Setting the Rx mode will start the Rx process. */
 690        writel(EDRRR_R, ioaddr + EDRRR);
 691
 692        netif_start_queue(ndev);
 693
 694        return ret;
 695}
 696
 697/* free Tx skb function */
 698static int sh_eth_txfree(struct net_device *ndev)
 699{
 700        struct sh_eth_private *mdp = netdev_priv(ndev);
 701        struct sh_eth_txdesc *txdesc;
 702        int freeNum = 0;
 703        int entry = 0;
 704
 705        for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
 706                entry = mdp->dirty_tx % TX_RING_SIZE;
 707                txdesc = &mdp->tx_ring[entry];
 708                if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
 709                        break;
 710                /* Free the original skb. */
 711                if (mdp->tx_skbuff[entry]) {
 712                        dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
 713                        mdp->tx_skbuff[entry] = NULL;
 714                        freeNum++;
 715                }
 716                txdesc->status = cpu_to_edmac(mdp, TD_TFP);
 717                if (entry >= TX_RING_SIZE - 1)
 718                        txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
 719
 720                mdp->stats.tx_packets++;
 721                mdp->stats.tx_bytes += txdesc->buffer_length;
 722        }
 723        return freeNum;
 724}
 725
 726/* Packet receive function */
 727static int sh_eth_rx(struct net_device *ndev)
 728{
 729        struct sh_eth_private *mdp = netdev_priv(ndev);
 730        struct sh_eth_rxdesc *rxdesc;
 731
 732        int entry = mdp->cur_rx % RX_RING_SIZE;
 733        int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
 734        struct sk_buff *skb;
 735        u16 pkt_len = 0;
 736        u32 desc_status;
 737
 738        rxdesc = &mdp->rx_ring[entry];
 739        while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
 740                desc_status = edmac_to_cpu(mdp, rxdesc->status);
 741                pkt_len = rxdesc->frame_length;
 742
 743                if (--boguscnt < 0)
 744                        break;
 745
 746                if (!(desc_status & RDFEND))
 747                        mdp->stats.rx_length_errors++;
 748
 749                if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
 750                                   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
 751                        mdp->stats.rx_errors++;
 752                        if (desc_status & RD_RFS1)
 753                                mdp->stats.rx_crc_errors++;
 754                        if (desc_status & RD_RFS2)
 755                                mdp->stats.rx_frame_errors++;
 756                        if (desc_status & RD_RFS3)
 757                                mdp->stats.rx_length_errors++;
 758                        if (desc_status & RD_RFS4)
 759                                mdp->stats.rx_length_errors++;
 760                        if (desc_status & RD_RFS6)
 761                                mdp->stats.rx_missed_errors++;
 762                        if (desc_status & RD_RFS10)
 763                                mdp->stats.rx_over_errors++;
 764                } else {
 765                        if (!mdp->cd->hw_swap)
 766                                sh_eth_soft_swap(
 767                                        phys_to_virt(ALIGN(rxdesc->addr, 4)),
 768                                        pkt_len + 2);
 769                        skb = mdp->rx_skbuff[entry];
 770                        mdp->rx_skbuff[entry] = NULL;
 771                        if (mdp->cd->rpadir)
 772                                skb_reserve(skb, NET_IP_ALIGN);
 773                        skb_put(skb, pkt_len);
 774                        skb->protocol = eth_type_trans(skb, ndev);
 775                        netif_rx(skb);
 776                        mdp->stats.rx_packets++;
 777                        mdp->stats.rx_bytes += pkt_len;
 778                }
 779                rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
 780                entry = (++mdp->cur_rx) % RX_RING_SIZE;
 781                rxdesc = &mdp->rx_ring[entry];
 782        }
 783
 784        /* Refill the Rx ring buffers. */
 785        for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
 786                entry = mdp->dirty_rx % RX_RING_SIZE;
 787                rxdesc = &mdp->rx_ring[entry];
 788                /* The size of the buffer is 16 byte boundary. */
 789                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 790
 791                if (mdp->rx_skbuff[entry] == NULL) {
 792                        skb = dev_alloc_skb(mdp->rx_buf_sz);
 793                        mdp->rx_skbuff[entry] = skb;
 794                        if (skb == NULL)
 795                                break;  /* Better luck next round. */
 796                        dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
 797                                        DMA_FROM_DEVICE);
 798                        skb->dev = ndev;
 799                        sh_eth_set_receive_align(skb);
 800
 801                        skb_checksum_none_assert(skb);
 802                        rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
 803                }
 804                if (entry >= RX_RING_SIZE - 1)
 805                        rxdesc->status |=
 806                                cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
 807                else
 808                        rxdesc->status |=
 809                                cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 810        }
 811
 812        /* Restart Rx engine if stopped. */
 813        /* If we don't need to check status, don't. -KDU */
 814        if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
 815                writel(EDRRR_R, ndev->base_addr + EDRRR);
 816
 817        return 0;
 818}
 819
 820/* error control function */
 821static void sh_eth_error(struct net_device *ndev, int intr_status)
 822{
 823        struct sh_eth_private *mdp = netdev_priv(ndev);
 824        u32 ioaddr = ndev->base_addr;
 825        u32 felic_stat;
 826        u32 link_stat;
 827        u32 mask;
 828
 829        if (intr_status & EESR_ECI) {
 830                felic_stat = readl(ioaddr + ECSR);
 831                writel(felic_stat, ioaddr + ECSR);      /* clear int */
 832                if (felic_stat & ECSR_ICD)
 833                        mdp->stats.tx_carrier_errors++;
 834                if (felic_stat & ECSR_LCHNG) {
 835                        /* Link Changed */
 836                        if (mdp->cd->no_psr || mdp->no_ether_link) {
 837                                if (mdp->link == PHY_DOWN)
 838                                        link_stat = 0;
 839                                else
 840                                        link_stat = PHY_ST_LINK;
 841                        } else {
 842                                link_stat = (readl(ioaddr + PSR));
 843                                if (mdp->ether_link_active_low)
 844                                        link_stat = ~link_stat;
 845                        }
 846                        if (!(link_stat & PHY_ST_LINK)) {
 847                                /* Link Down : disable tx and rx */
 848                                writel(readl(ioaddr + ECMR) &
 849                                          ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
 850                        } else {
 851                                /* Link Up */
 852                                writel(readl(ioaddr + EESIPR) &
 853                                          ~DMAC_M_ECI, ioaddr + EESIPR);
 854                                /*clear int */
 855                                writel(readl(ioaddr + ECSR),
 856                                          ioaddr + ECSR);
 857                                writel(readl(ioaddr + EESIPR) |
 858                                          DMAC_M_ECI, ioaddr + EESIPR);
 859                                /* enable tx and rx */
 860                                writel(readl(ioaddr + ECMR) |
 861                                          (ECMR_RE | ECMR_TE), ioaddr + ECMR);
 862                        }
 863                }
 864        }
 865
 866        if (intr_status & EESR_TWB) {
 867                /* Write buck end. unused write back interrupt */
 868                if (intr_status & EESR_TABT)    /* Transmit Abort int */
 869                        mdp->stats.tx_aborted_errors++;
 870        }
 871
 872        if (intr_status & EESR_RABT) {
 873                /* Receive Abort int */
 874                if (intr_status & EESR_RFRMER) {
 875                        /* Receive Frame Overflow int */
 876                        mdp->stats.rx_frame_errors++;
 877                        dev_err(&ndev->dev, "Receive Frame Overflow\n");
 878                }
 879        }
 880
 881        if (!mdp->cd->no_ade) {
 882                if (intr_status & EESR_ADE && intr_status & EESR_TDE &&
 883                    intr_status & EESR_TFE)
 884                        mdp->stats.tx_fifo_errors++;
 885        }
 886
 887        if (intr_status & EESR_RDE) {
 888                /* Receive Descriptor Empty int */
 889                mdp->stats.rx_over_errors++;
 890
 891                if (readl(ioaddr + EDRRR) ^ EDRRR_R)
 892                        writel(EDRRR_R, ioaddr + EDRRR);
 893                dev_err(&ndev->dev, "Receive Descriptor Empty\n");
 894        }
 895        if (intr_status & EESR_RFE) {
 896                /* Receive FIFO Overflow int */
 897                mdp->stats.rx_fifo_errors++;
 898                dev_err(&ndev->dev, "Receive FIFO Overflow\n");
 899        }
 900
 901        mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
 902        if (mdp->cd->no_ade)
 903                mask &= ~EESR_ADE;
 904        if (intr_status & mask) {
 905                /* Tx error */
 906                u32 edtrr = readl(ndev->base_addr + EDTRR);
 907                /* dmesg */
 908                dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
 909                                intr_status, mdp->cur_tx);
 910                dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
 911                                mdp->dirty_tx, (u32) ndev->state, edtrr);
 912                /* dirty buffer free */
 913                sh_eth_txfree(ndev);
 914
 915                /* SH7712 BUG */
 916                if (edtrr ^ EDTRR_TRNS) {
 917                        /* tx dma start */
 918                        writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
 919                }
 920                /* wakeup */
 921                netif_wake_queue(ndev);
 922        }
 923}
 924
 925static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
 926{
 927        struct net_device *ndev = netdev;
 928        struct sh_eth_private *mdp = netdev_priv(ndev);
 929        struct sh_eth_cpu_data *cd = mdp->cd;
 930        irqreturn_t ret = IRQ_NONE;
 931        u32 ioaddr, intr_status = 0;
 932
 933        ioaddr = ndev->base_addr;
 934        spin_lock(&mdp->lock);
 935
 936        /* Get interrpt stat */
 937        intr_status = readl(ioaddr + EESR);
 938        /* Clear interrupt */
 939        if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
 940                        EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
 941                        cd->tx_check | cd->eesr_err_check)) {
 942                writel(intr_status, ioaddr + EESR);
 943                ret = IRQ_HANDLED;
 944        } else
 945                goto other_irq;
 946
 947        if (intr_status & (EESR_FRC | /* Frame recv*/
 948                        EESR_RMAF | /* Multi cast address recv*/
 949                        EESR_RRF  | /* Bit frame recv */
 950                        EESR_RTLF | /* Long frame recv*/
 951                        EESR_RTSF | /* short frame recv */
 952                        EESR_PRE  | /* PHY-LSI recv error */
 953                        EESR_CERF)){ /* recv frame CRC error */
 954                sh_eth_rx(ndev);
 955        }
 956
 957        /* Tx Check */
 958        if (intr_status & cd->tx_check) {
 959                sh_eth_txfree(ndev);
 960                netif_wake_queue(ndev);
 961        }
 962
 963        if (intr_status & cd->eesr_err_check)
 964                sh_eth_error(ndev, intr_status);
 965
 966other_irq:
 967        spin_unlock(&mdp->lock);
 968
 969        return ret;
 970}
 971
 972static void sh_eth_timer(unsigned long data)
 973{
 974        struct net_device *ndev = (struct net_device *)data;
 975        struct sh_eth_private *mdp = netdev_priv(ndev);
 976
 977        mod_timer(&mdp->timer, jiffies + (10 * HZ));
 978}
 979
 980/* PHY state control function */
 981static void sh_eth_adjust_link(struct net_device *ndev)
 982{
 983        struct sh_eth_private *mdp = netdev_priv(ndev);
 984        struct phy_device *phydev = mdp->phydev;
 985        u32 ioaddr = ndev->base_addr;
 986        int new_state = 0;
 987
 988        if (phydev->link != PHY_DOWN) {
 989                if (phydev->duplex != mdp->duplex) {
 990                        new_state = 1;
 991                        mdp->duplex = phydev->duplex;
 992                        if (mdp->cd->set_duplex)
 993                                mdp->cd->set_duplex(ndev);
 994                }
 995
 996                if (phydev->speed != mdp->speed) {
 997                        new_state = 1;
 998                        mdp->speed = phydev->speed;
 999                        if (mdp->cd->set_rate)
1000                                mdp->cd->set_rate(ndev);
1001                }
1002                if (mdp->link == PHY_DOWN) {
1003                        writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
1004                                        | ECMR_DM, ioaddr + ECMR);
1005                        new_state = 1;
1006                        mdp->link = phydev->link;
1007                }
1008        } else if (mdp->link) {
1009                new_state = 1;
1010                mdp->link = PHY_DOWN;
1011                mdp->speed = 0;
1012                mdp->duplex = -1;
1013        }
1014
1015        if (new_state)
1016                phy_print_status(phydev);
1017}
1018
1019/* PHY init function */
1020static int sh_eth_phy_init(struct net_device *ndev)
1021{
1022        struct sh_eth_private *mdp = netdev_priv(ndev);
1023        char phy_id[MII_BUS_ID_SIZE + 3];
1024        struct phy_device *phydev = NULL;
1025
1026        snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1027                mdp->mii_bus->id , mdp->phy_id);
1028
1029        mdp->link = PHY_DOWN;
1030        mdp->speed = 0;
1031        mdp->duplex = -1;
1032
1033        /* Try connect to PHY */
1034        phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1035                                0, PHY_INTERFACE_MODE_MII);
1036        if (IS_ERR(phydev)) {
1037                dev_err(&ndev->dev, "phy_connect failed\n");
1038                return PTR_ERR(phydev);
1039        }
1040
1041        dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1042                phydev->addr, phydev->drv->name);
1043
1044        mdp->phydev = phydev;
1045
1046        return 0;
1047}
1048
1049/* PHY control start function */
1050static int sh_eth_phy_start(struct net_device *ndev)
1051{
1052        struct sh_eth_private *mdp = netdev_priv(ndev);
1053        int ret;
1054
1055        ret = sh_eth_phy_init(ndev);
1056        if (ret)
1057                return ret;
1058
1059        /* reset phy - this also wakes it from PDOWN */
1060        phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1061        phy_start(mdp->phydev);
1062
1063        return 0;
1064}
1065
1066/* network device open function */
1067static int sh_eth_open(struct net_device *ndev)
1068{
1069        int ret = 0;
1070        struct sh_eth_private *mdp = netdev_priv(ndev);
1071
1072        pm_runtime_get_sync(&mdp->pdev->dev);
1073
1074        ret = request_irq(ndev->irq, sh_eth_interrupt,
1075#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1076    defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1077    defined(CONFIG_CPU_SUBTYPE_SH7757)
1078                                IRQF_SHARED,
1079#else
1080                                0,
1081#endif
1082                                ndev->name, ndev);
1083        if (ret) {
1084                dev_err(&ndev->dev, "Can not assign IRQ number\n");
1085                return ret;
1086        }
1087
1088        /* Descriptor set */
1089        ret = sh_eth_ring_init(ndev);
1090        if (ret)
1091                goto out_free_irq;
1092
1093        /* device init */
1094        ret = sh_eth_dev_init(ndev);
1095        if (ret)
1096                goto out_free_irq;
1097
1098        /* PHY control start*/
1099        ret = sh_eth_phy_start(ndev);
1100        if (ret)
1101                goto out_free_irq;
1102
1103        /* Set the timer to check for link beat. */
1104        init_timer(&mdp->timer);
1105        mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1106        setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1107
1108        return ret;
1109
1110out_free_irq:
1111        free_irq(ndev->irq, ndev);
1112        pm_runtime_put_sync(&mdp->pdev->dev);
1113        return ret;
1114}
1115
1116/* Timeout function */
1117static void sh_eth_tx_timeout(struct net_device *ndev)
1118{
1119        struct sh_eth_private *mdp = netdev_priv(ndev);
1120        u32 ioaddr = ndev->base_addr;
1121        struct sh_eth_rxdesc *rxdesc;
1122        int i;
1123
1124        netif_stop_queue(ndev);
1125
1126        /* worning message out. */
1127        printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
1128               " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129
1130        /* tx_errors count up */
1131        mdp->stats.tx_errors++;
1132
1133        /* timer off */
1134        del_timer_sync(&mdp->timer);
1135
1136        /* Free all the skbuffs in the Rx queue. */
1137        for (i = 0; i < RX_RING_SIZE; i++) {
1138                rxdesc = &mdp->rx_ring[i];
1139                rxdesc->status = 0;
1140                rxdesc->addr = 0xBADF00D0;
1141                if (mdp->rx_skbuff[i])
1142                        dev_kfree_skb(mdp->rx_skbuff[i]);
1143                mdp->rx_skbuff[i] = NULL;
1144        }
1145        for (i = 0; i < TX_RING_SIZE; i++) {
1146                if (mdp->tx_skbuff[i])
1147                        dev_kfree_skb(mdp->tx_skbuff[i]);
1148                mdp->tx_skbuff[i] = NULL;
1149        }
1150
1151        /* device init */
1152        sh_eth_dev_init(ndev);
1153
1154        /* timer on */
1155        mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1156        add_timer(&mdp->timer);
1157}
1158
1159/* Packet transmit function */
1160static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1161{
1162        struct sh_eth_private *mdp = netdev_priv(ndev);
1163        struct sh_eth_txdesc *txdesc;
1164        u32 entry;
1165        unsigned long flags;
1166
1167        spin_lock_irqsave(&mdp->lock, flags);
1168        if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1169                if (!sh_eth_txfree(ndev)) {
1170                        netif_stop_queue(ndev);
1171                        spin_unlock_irqrestore(&mdp->lock, flags);
1172                        return NETDEV_TX_BUSY;
1173                }
1174        }
1175        spin_unlock_irqrestore(&mdp->lock, flags);
1176
1177        entry = mdp->cur_tx % TX_RING_SIZE;
1178        mdp->tx_skbuff[entry] = skb;
1179        txdesc = &mdp->tx_ring[entry];
1180        txdesc->addr = virt_to_phys(skb->data);
1181        /* soft swap. */
1182        if (!mdp->cd->hw_swap)
1183                sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1184                                 skb->len + 2);
1185        /* write back */
1186        __flush_purge_region(skb->data, skb->len);
1187        if (skb->len < ETHERSMALL)
1188                txdesc->buffer_length = ETHERSMALL;
1189        else
1190                txdesc->buffer_length = skb->len;
1191
1192        if (entry >= TX_RING_SIZE - 1)
1193                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1194        else
1195                txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1196
1197        mdp->cur_tx++;
1198
1199        if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1200                writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
1201
1202        return NETDEV_TX_OK;
1203}
1204
1205/* device close function */
1206static int sh_eth_close(struct net_device *ndev)
1207{
1208        struct sh_eth_private *mdp = netdev_priv(ndev);
1209        u32 ioaddr = ndev->base_addr;
1210        int ringsize;
1211
1212        netif_stop_queue(ndev);
1213
1214        /* Disable interrupts by clearing the interrupt mask. */
1215        writel(0x0000, ioaddr + EESIPR);
1216
1217        /* Stop the chip's Tx and Rx processes. */
1218        writel(0, ioaddr + EDTRR);
1219        writel(0, ioaddr + EDRRR);
1220
1221        /* PHY Disconnect */
1222        if (mdp->phydev) {
1223                phy_stop(mdp->phydev);
1224                phy_disconnect(mdp->phydev);
1225        }
1226
1227        free_irq(ndev->irq, ndev);
1228
1229        del_timer_sync(&mdp->timer);
1230
1231        /* Free all the skbuffs in the Rx queue. */
1232        sh_eth_ring_free(ndev);
1233
1234        /* free DMA buffer */
1235        ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1236        dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1237
1238        /* free DMA buffer */
1239        ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1240        dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1241
1242        pm_runtime_put_sync(&mdp->pdev->dev);
1243
1244        return 0;
1245}
1246
1247static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1248{
1249        struct sh_eth_private *mdp = netdev_priv(ndev);
1250        u32 ioaddr = ndev->base_addr;
1251
1252        pm_runtime_get_sync(&mdp->pdev->dev);
1253
1254        mdp->stats.tx_dropped += readl(ioaddr + TROCR);
1255        writel(0, ioaddr + TROCR);      /* (write clear) */
1256        mdp->stats.collisions += readl(ioaddr + CDCR);
1257        writel(0, ioaddr + CDCR);       /* (write clear) */
1258        mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
1259        writel(0, ioaddr + LCCR);       /* (write clear) */
1260#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1261        mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
1262        writel(0, ioaddr + CERCR);      /* (write clear) */
1263        mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
1264        writel(0, ioaddr + CEECR);      /* (write clear) */
1265#else
1266        mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
1267        writel(0, ioaddr + CNDCR);      /* (write clear) */
1268#endif
1269        pm_runtime_put_sync(&mdp->pdev->dev);
1270
1271        return &mdp->stats;
1272}
1273
1274/* ioctl to device funciotn*/
1275static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1276                                int cmd)
1277{
1278        struct sh_eth_private *mdp = netdev_priv(ndev);
1279        struct phy_device *phydev = mdp->phydev;
1280
1281        if (!netif_running(ndev))
1282                return -EINVAL;
1283
1284        if (!phydev)
1285                return -ENODEV;
1286
1287        return phy_mii_ioctl(phydev, rq, cmd);
1288}
1289
1290#if defined(SH_ETH_HAS_TSU)
1291/* Multicast reception directions set */
1292static void sh_eth_set_multicast_list(struct net_device *ndev)
1293{
1294        u32 ioaddr = ndev->base_addr;
1295
1296        if (ndev->flags & IFF_PROMISC) {
1297                /* Set promiscuous. */
1298                writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1299                          ioaddr + ECMR);
1300        } else {
1301                /* Normal, unicast/broadcast-only mode. */
1302                writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1303                          ioaddr + ECMR);
1304        }
1305}
1306
1307/* SuperH's TSU register init function */
1308static void sh_eth_tsu_init(u32 ioaddr)
1309{
1310        writel(0, ioaddr + TSU_FWEN0);  /* Disable forward(0->1) */
1311        writel(0, ioaddr + TSU_FWEN1);  /* Disable forward(1->0) */
1312        writel(0, ioaddr + TSU_FCM);    /* forward fifo 3k-3k */
1313        writel(0xc, ioaddr + TSU_BSYSL0);
1314        writel(0xc, ioaddr + TSU_BSYSL1);
1315        writel(0, ioaddr + TSU_PRISL0);
1316        writel(0, ioaddr + TSU_PRISL1);
1317        writel(0, ioaddr + TSU_FWSL0);
1318        writel(0, ioaddr + TSU_FWSL1);
1319        writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1320#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1321        writel(0, ioaddr + TSU_QTAG0);  /* Disable QTAG(0->1) */
1322        writel(0, ioaddr + TSU_QTAG1);  /* Disable QTAG(1->0) */
1323#else
1324        writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
1325        writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
1326#endif
1327        writel(0, ioaddr + TSU_FWSR);   /* all interrupt status clear */
1328        writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
1329        writel(0, ioaddr + TSU_TEN);    /* Disable all CAM entry */
1330        writel(0, ioaddr + TSU_POST1);  /* Disable CAM entry [ 0- 7] */
1331        writel(0, ioaddr + TSU_POST2);  /* Disable CAM entry [ 8-15] */
1332        writel(0, ioaddr + TSU_POST3);  /* Disable CAM entry [16-23] */
1333        writel(0, ioaddr + TSU_POST4);  /* Disable CAM entry [24-31] */
1334}
1335#endif /* SH_ETH_HAS_TSU */
1336
1337/* MDIO bus release function */
1338static int sh_mdio_release(struct net_device *ndev)
1339{
1340        struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1341
1342        /* unregister mdio bus */
1343        mdiobus_unregister(bus);
1344
1345        /* remove mdio bus info from net_device */
1346        dev_set_drvdata(&ndev->dev, NULL);
1347
1348        /* free interrupts memory */
1349        kfree(bus->irq);
1350
1351        /* free bitbang info */
1352        free_mdio_bitbang(bus);
1353
1354        return 0;
1355}
1356
1357/* MDIO bus init function */
1358static int sh_mdio_init(struct net_device *ndev, int id)
1359{
1360        int ret, i;
1361        struct bb_info *bitbang;
1362        struct sh_eth_private *mdp = netdev_priv(ndev);
1363
1364        /* create bit control struct for PHY */
1365        bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1366        if (!bitbang) {
1367                ret = -ENOMEM;
1368                goto out;
1369        }
1370
1371        /* bitbang init */
1372        bitbang->addr = ndev->base_addr + PIR;
1373        bitbang->mdi_msk = 0x08;
1374        bitbang->mdo_msk = 0x04;
1375        bitbang->mmd_msk = 0x02;/* MMD */
1376        bitbang->mdc_msk = 0x01;
1377        bitbang->ctrl.ops = &bb_ops;
1378
1379        /* MII controller setting */
1380        mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1381        if (!mdp->mii_bus) {
1382                ret = -ENOMEM;
1383                goto out_free_bitbang;
1384        }
1385
1386        /* Hook up MII support for ethtool */
1387        mdp->mii_bus->name = "sh_mii";
1388        mdp->mii_bus->parent = &ndev->dev;
1389        snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
1390
1391        /* PHY IRQ */
1392        mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1393        if (!mdp->mii_bus->irq) {
1394                ret = -ENOMEM;
1395                goto out_free_bus;
1396        }
1397
1398        for (i = 0; i < PHY_MAX_ADDR; i++)
1399                mdp->mii_bus->irq[i] = PHY_POLL;
1400
1401        /* regist mdio bus */
1402        ret = mdiobus_register(mdp->mii_bus);
1403        if (ret)
1404                goto out_free_irq;
1405
1406        dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1407
1408        return 0;
1409
1410out_free_irq:
1411        kfree(mdp->mii_bus->irq);
1412
1413out_free_bus:
1414        free_mdio_bitbang(mdp->mii_bus);
1415
1416out_free_bitbang:
1417        kfree(bitbang);
1418
1419out:
1420        return ret;
1421}
1422
1423static const struct net_device_ops sh_eth_netdev_ops = {
1424        .ndo_open               = sh_eth_open,
1425        .ndo_stop               = sh_eth_close,
1426        .ndo_start_xmit         = sh_eth_start_xmit,
1427        .ndo_get_stats          = sh_eth_get_stats,
1428#if defined(SH_ETH_HAS_TSU)
1429        .ndo_set_multicast_list = sh_eth_set_multicast_list,
1430#endif
1431        .ndo_tx_timeout         = sh_eth_tx_timeout,
1432        .ndo_do_ioctl           = sh_eth_do_ioctl,
1433        .ndo_validate_addr      = eth_validate_addr,
1434        .ndo_set_mac_address    = eth_mac_addr,
1435        .ndo_change_mtu         = eth_change_mtu,
1436};
1437
1438static int sh_eth_drv_probe(struct platform_device *pdev)
1439{
1440        int ret, devno = 0;
1441        struct resource *res;
1442        struct net_device *ndev = NULL;
1443        struct sh_eth_private *mdp;
1444        struct sh_eth_plat_data *pd;
1445
1446        /* get base addr */
1447        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1448        if (unlikely(res == NULL)) {
1449                dev_err(&pdev->dev, "invalid resource\n");
1450                ret = -EINVAL;
1451                goto out;
1452        }
1453
1454        ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1455        if (!ndev) {
1456                dev_err(&pdev->dev, "Could not allocate device.\n");
1457                ret = -ENOMEM;
1458                goto out;
1459        }
1460
1461        /* The sh Ether-specific entries in the device structure. */
1462        ndev->base_addr = res->start;
1463        devno = pdev->id;
1464        if (devno < 0)
1465                devno = 0;
1466
1467        ndev->dma = -1;
1468        ret = platform_get_irq(pdev, 0);
1469        if (ret < 0) {
1470                ret = -ENODEV;
1471                goto out_release;
1472        }
1473        ndev->irq = ret;
1474
1475        SET_NETDEV_DEV(ndev, &pdev->dev);
1476
1477        /* Fill in the fields of the device structure with ethernet values. */
1478        ether_setup(ndev);
1479
1480        mdp = netdev_priv(ndev);
1481        spin_lock_init(&mdp->lock);
1482        mdp->pdev = pdev;
1483        pm_runtime_enable(&pdev->dev);
1484        pm_runtime_resume(&pdev->dev);
1485
1486        pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1487        /* get PHY ID */
1488        mdp->phy_id = pd->phy;
1489        /* EDMAC endian */
1490        mdp->edmac_endian = pd->edmac_endian;
1491        mdp->no_ether_link = pd->no_ether_link;
1492        mdp->ether_link_active_low = pd->ether_link_active_low;
1493
1494        /* set cpu data */
1495        mdp->cd = &sh_eth_my_cpu_data;
1496        sh_eth_set_default_cpu_data(mdp->cd);
1497
1498        /* set function */
1499        ndev->netdev_ops = &sh_eth_netdev_ops;
1500        ndev->watchdog_timeo = TX_TIMEOUT;
1501
1502        mdp->post_rx = POST_RX >> (devno << 1);
1503        mdp->post_fw = POST_FW >> (devno << 1);
1504
1505        /* read and set MAC address */
1506        read_mac_address(ndev, pd->mac_addr);
1507
1508        /* First device only init */
1509        if (!devno) {
1510                if (mdp->cd->chip_reset)
1511                        mdp->cd->chip_reset(ndev);
1512
1513#if defined(SH_ETH_HAS_TSU)
1514                /* TSU init (Init only)*/
1515                sh_eth_tsu_init(SH_TSU_ADDR);
1516#endif
1517        }
1518
1519        /* network device register */
1520        ret = register_netdev(ndev);
1521        if (ret)
1522                goto out_release;
1523
1524        /* mdio bus init */
1525        ret = sh_mdio_init(ndev, pdev->id);
1526        if (ret)
1527                goto out_unregister;
1528
1529        /* print device infomation */
1530        pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1531               (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1532
1533        platform_set_drvdata(pdev, ndev);
1534
1535        return ret;
1536
1537out_unregister:
1538        unregister_netdev(ndev);
1539
1540out_release:
1541        /* net_dev free */
1542        if (ndev)
1543                free_netdev(ndev);
1544
1545out:
1546        return ret;
1547}
1548
1549static int sh_eth_drv_remove(struct platform_device *pdev)
1550{
1551        struct net_device *ndev = platform_get_drvdata(pdev);
1552
1553        sh_mdio_release(ndev);
1554        unregister_netdev(ndev);
1555        pm_runtime_disable(&pdev->dev);
1556        free_netdev(ndev);
1557        platform_set_drvdata(pdev, NULL);
1558
1559        return 0;
1560}
1561
1562static int sh_eth_runtime_nop(struct device *dev)
1563{
1564        /*
1565         * Runtime PM callback shared between ->runtime_suspend()
1566         * and ->runtime_resume(). Simply returns success.
1567         *
1568         * This driver re-initializes all registers after
1569         * pm_runtime_get_sync() anyway so there is no need
1570         * to save and restore registers here.
1571         */
1572        return 0;
1573}
1574
1575static struct dev_pm_ops sh_eth_dev_pm_ops = {
1576        .runtime_suspend = sh_eth_runtime_nop,
1577        .runtime_resume = sh_eth_runtime_nop,
1578};
1579
1580static struct platform_driver sh_eth_driver = {
1581        .probe = sh_eth_drv_probe,
1582        .remove = sh_eth_drv_remove,
1583        .driver = {
1584                   .name = CARDNAME,
1585                   .pm = &sh_eth_dev_pm_ops,
1586        },
1587};
1588
1589static int __init sh_eth_init(void)
1590{
1591        return platform_driver_register(&sh_eth_driver);
1592}
1593
1594static void __exit sh_eth_cleanup(void)
1595{
1596        platform_driver_unregister(&sh_eth_driver);
1597}
1598
1599module_init(sh_eth_init);
1600module_exit(sh_eth_cleanup);
1601
1602MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1603MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1604MODULE_LICENSE("GPL v2");
1605