linux/drivers/net/ethernet/renesas/sh_eth.c
<<
>>
Prefs
   1/*
   2 *  SuperH Ethernet device driver
   3 *
   4 *  Copyright (C) 2006-2008 Nobuhiro Iwamatsu
   5 *  Copyright (C) 2008-2009 Renesas Solutions Corp.
   6 *
   7 *  This program is free software; you can redistribute it and/or modify it
   8 *  under the terms and conditions of the GNU General Public License,
   9 *  version 2, as published by the Free Software Foundation.
  10 *
  11 *  This program is distributed in the hope it will be useful, but WITHOUT
  12 *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13 *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  14 *  more details.
  15 *  You should have received a copy of the GNU General Public License along with
  16 *  this program; if not, write to the Free Software Foundation, Inc.,
  17 *  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  18 *
  19 *  The full GNU General Public License is included in this distribution in
  20 *  the file called "COPYING".
  21 */
  22
  23#include <linux/init.h>
  24#include <linux/module.h>
  25#include <linux/kernel.h>
  26#include <linux/spinlock.h>
  27#include <linux/interrupt.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/etherdevice.h>
  30#include <linux/delay.h>
  31#include <linux/platform_device.h>
  32#include <linux/mdio-bitbang.h>
  33#include <linux/netdevice.h>
  34#include <linux/phy.h>
  35#include <linux/cache.h>
  36#include <linux/io.h>
  37#include <linux/interrupt.h>
  38#include <linux/pm_runtime.h>
  39#include <linux/slab.h>
  40#include <linux/ethtool.h>
  41#include <linux/if_vlan.h>
  42#include <linux/sh_eth.h>
  43
  44#include "sh_eth.h"
  45
  46#define SH_ETH_DEF_MSG_ENABLE \
  47                (NETIF_MSG_LINK | \
  48                NETIF_MSG_TIMER | \
  49                NETIF_MSG_RX_ERR| \
  50                NETIF_MSG_TX_ERR)
  51
  52/* There is CPU dependent code */
  53#if defined(CONFIG_CPU_SUBTYPE_SH7724)
  54#define SH_ETH_RESET_DEFAULT    1
  55static void sh_eth_set_duplex(struct net_device *ndev)
  56{
  57        struct sh_eth_private *mdp = netdev_priv(ndev);
  58
  59        if (mdp->duplex) /* Full */
  60                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
  61        else            /* Half */
  62                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
  63}
  64
  65static void sh_eth_set_rate(struct net_device *ndev)
  66{
  67        struct sh_eth_private *mdp = netdev_priv(ndev);
  68
  69        switch (mdp->speed) {
  70        case 10: /* 10BASE */
  71                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
  72                break;
  73        case 100:/* 100BASE */
  74                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
  75                break;
  76        default:
  77                break;
  78        }
  79}
  80
  81/* SH7724 */
  82static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
  83        .set_duplex     = sh_eth_set_duplex,
  84        .set_rate       = sh_eth_set_rate,
  85
  86        .ecsr_value     = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
  87        .ecsipr_value   = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
  88        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
  89
  90        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
  91        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
  92                          EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
  93        .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
  94
  95        .apr            = 1,
  96        .mpr            = 1,
  97        .tpauser        = 1,
  98        .hw_swap        = 1,
  99        .rpadir         = 1,
 100        .rpadir_value   = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
 101};
 102#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
 103#define SH_ETH_HAS_BOTH_MODULES 1
 104#define SH_ETH_HAS_TSU  1
 105static void sh_eth_set_duplex(struct net_device *ndev)
 106{
 107        struct sh_eth_private *mdp = netdev_priv(ndev);
 108
 109        if (mdp->duplex) /* Full */
 110                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 111        else            /* Half */
 112                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 113}
 114
 115static void sh_eth_set_rate(struct net_device *ndev)
 116{
 117        struct sh_eth_private *mdp = netdev_priv(ndev);
 118
 119        switch (mdp->speed) {
 120        case 10: /* 10BASE */
 121                sh_eth_write(ndev, 0, RTRATE);
 122                break;
 123        case 100:/* 100BASE */
 124                sh_eth_write(ndev, 1, RTRATE);
 125                break;
 126        default:
 127                break;
 128        }
 129}
 130
 131/* SH7757 */
 132static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 133        .set_duplex             = sh_eth_set_duplex,
 134        .set_rate               = sh_eth_set_rate,
 135
 136        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 137        .rmcr_value     = 0x00000001,
 138
 139        .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
 140        .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
 141                          EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
 142        .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 143
 144        .apr            = 1,
 145        .mpr            = 1,
 146        .tpauser        = 1,
 147        .hw_swap        = 1,
 148        .no_ade         = 1,
 149        .rpadir         = 1,
 150        .rpadir_value   = 2 << 16,
 151};
 152
 153#define SH_GIGA_ETH_BASE        0xfee00000
 154#define GIGA_MALR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
 155#define GIGA_MAHR(port)         (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
 156static void sh_eth_chip_reset_giga(struct net_device *ndev)
 157{
 158        int i;
 159        unsigned long mahr[2], malr[2];
 160
 161        /* save MAHR and MALR */
 162        for (i = 0; i < 2; i++) {
 163                malr[i] = ioread32((void *)GIGA_MALR(i));
 164                mahr[i] = ioread32((void *)GIGA_MAHR(i));
 165        }
 166
 167        /* reset device */
 168        iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
 169        mdelay(1);
 170
 171        /* restore MAHR and MALR */
 172        for (i = 0; i < 2; i++) {
 173                iowrite32(malr[i], (void *)GIGA_MALR(i));
 174                iowrite32(mahr[i], (void *)GIGA_MAHR(i));
 175        }
 176}
 177
 178static int sh_eth_is_gether(struct sh_eth_private *mdp);
 179static void sh_eth_reset(struct net_device *ndev)
 180{
 181        struct sh_eth_private *mdp = netdev_priv(ndev);
 182        int cnt = 100;
 183
 184        if (sh_eth_is_gether(mdp)) {
 185                sh_eth_write(ndev, 0x03, EDSR);
 186                sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
 187                                EDMR);
 188                while (cnt > 0) {
 189                        if (!(sh_eth_read(ndev, EDMR) & 0x3))
 190                                break;
 191                        mdelay(1);
 192                        cnt--;
 193                }
 194                if (cnt < 0)
 195                        printk(KERN_ERR "Device reset fail\n");
 196
 197                /* Table Init */
 198                sh_eth_write(ndev, 0x0, TDLAR);
 199                sh_eth_write(ndev, 0x0, TDFAR);
 200                sh_eth_write(ndev, 0x0, TDFXR);
 201                sh_eth_write(ndev, 0x0, TDFFR);
 202                sh_eth_write(ndev, 0x0, RDLAR);
 203                sh_eth_write(ndev, 0x0, RDFAR);
 204                sh_eth_write(ndev, 0x0, RDFXR);
 205                sh_eth_write(ndev, 0x0, RDFFR);
 206        } else {
 207                sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
 208                                EDMR);
 209                mdelay(3);
 210                sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
 211                                EDMR);
 212        }
 213}
 214
 215static void sh_eth_set_duplex_giga(struct net_device *ndev)
 216{
 217        struct sh_eth_private *mdp = netdev_priv(ndev);
 218
 219        if (mdp->duplex) /* Full */
 220                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 221        else            /* Half */
 222                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 223}
 224
 225static void sh_eth_set_rate_giga(struct net_device *ndev)
 226{
 227        struct sh_eth_private *mdp = netdev_priv(ndev);
 228
 229        switch (mdp->speed) {
 230        case 10: /* 10BASE */
 231                sh_eth_write(ndev, 0x00000000, GECMR);
 232                break;
 233        case 100:/* 100BASE */
 234                sh_eth_write(ndev, 0x00000010, GECMR);
 235                break;
 236        case 1000: /* 1000BASE */
 237                sh_eth_write(ndev, 0x00000020, GECMR);
 238                break;
 239        default:
 240                break;
 241        }
 242}
 243
 244/* SH7757(GETHERC) */
 245static struct sh_eth_cpu_data sh_eth_my_cpu_data_giga = {
 246        .chip_reset     = sh_eth_chip_reset_giga,
 247        .set_duplex     = sh_eth_set_duplex_giga,
 248        .set_rate       = sh_eth_set_rate_giga,
 249
 250        .ecsr_value     = ECSR_ICD | ECSR_MPD,
 251        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 252        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 253
 254        .tx_check       = EESR_TC1 | EESR_FTC,
 255        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
 256                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
 257                          EESR_ECI,
 258        .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 259                          EESR_TFE,
 260        .fdr_value      = 0x0000072f,
 261        .rmcr_value     = 0x00000001,
 262
 263        .apr            = 1,
 264        .mpr            = 1,
 265        .tpauser        = 1,
 266        .bculr          = 1,
 267        .hw_swap        = 1,
 268        .rpadir         = 1,
 269        .rpadir_value   = 2 << 16,
 270        .no_trimd       = 1,
 271        .no_ade         = 1,
 272};
 273
 274static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
 275{
 276        if (sh_eth_is_gether(mdp))
 277                return &sh_eth_my_cpu_data_giga;
 278        else
 279                return &sh_eth_my_cpu_data;
 280}
 281
 282#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
 283#define SH_ETH_HAS_TSU  1
 284static void sh_eth_chip_reset(struct net_device *ndev)
 285{
 286        struct sh_eth_private *mdp = netdev_priv(ndev);
 287
 288        /* reset device */
 289        sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
 290        mdelay(1);
 291}
 292
 293static void sh_eth_reset(struct net_device *ndev)
 294{
 295        int cnt = 100;
 296
 297        sh_eth_write(ndev, EDSR_ENALL, EDSR);
 298        sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
 299        while (cnt > 0) {
 300                if (!(sh_eth_read(ndev, EDMR) & 0x3))
 301                        break;
 302                mdelay(1);
 303                cnt--;
 304        }
 305        if (cnt == 0)
 306                printk(KERN_ERR "Device reset fail\n");
 307
 308        /* Table Init */
 309        sh_eth_write(ndev, 0x0, TDLAR);
 310        sh_eth_write(ndev, 0x0, TDFAR);
 311        sh_eth_write(ndev, 0x0, TDFXR);
 312        sh_eth_write(ndev, 0x0, TDFFR);
 313        sh_eth_write(ndev, 0x0, RDLAR);
 314        sh_eth_write(ndev, 0x0, RDFAR);
 315        sh_eth_write(ndev, 0x0, RDFXR);
 316        sh_eth_write(ndev, 0x0, RDFFR);
 317}
 318
 319static void sh_eth_set_duplex(struct net_device *ndev)
 320{
 321        struct sh_eth_private *mdp = netdev_priv(ndev);
 322
 323        if (mdp->duplex) /* Full */
 324                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
 325        else            /* Half */
 326                sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
 327}
 328
 329static void sh_eth_set_rate(struct net_device *ndev)
 330{
 331        struct sh_eth_private *mdp = netdev_priv(ndev);
 332
 333        switch (mdp->speed) {
 334        case 10: /* 10BASE */
 335                sh_eth_write(ndev, GECMR_10, GECMR);
 336                break;
 337        case 100:/* 100BASE */
 338                sh_eth_write(ndev, GECMR_100, GECMR);
 339                break;
 340        case 1000: /* 1000BASE */
 341                sh_eth_write(ndev, GECMR_1000, GECMR);
 342                break;
 343        default:
 344                break;
 345        }
 346}
 347
 348/* sh7763 */
 349static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 350        .chip_reset     = sh_eth_chip_reset,
 351        .set_duplex     = sh_eth_set_duplex,
 352        .set_rate       = sh_eth_set_rate,
 353
 354        .ecsr_value     = ECSR_ICD | ECSR_MPD,
 355        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
 356        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 357
 358        .tx_check       = EESR_TC1 | EESR_FTC,
 359        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
 360                          EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
 361                          EESR_ECI,
 362        .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 363                          EESR_TFE,
 364
 365        .apr            = 1,
 366        .mpr            = 1,
 367        .tpauser        = 1,
 368        .bculr          = 1,
 369        .hw_swap        = 1,
 370        .no_trimd       = 1,
 371        .no_ade         = 1,
 372        .tsu            = 1,
 373};
 374
 375#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
 376#define SH_ETH_RESET_DEFAULT    1
 377static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 378        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 379
 380        .apr            = 1,
 381        .mpr            = 1,
 382        .tpauser        = 1,
 383        .hw_swap        = 1,
 384};
 385#elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
 386#define SH_ETH_RESET_DEFAULT    1
 387#define SH_ETH_HAS_TSU  1
 388static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
 389        .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 390        .tsu            = 1,
 391};
 392#endif
 393
 394static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
 395{
 396        if (!cd->ecsr_value)
 397                cd->ecsr_value = DEFAULT_ECSR_INIT;
 398
 399        if (!cd->ecsipr_value)
 400                cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
 401
 402        if (!cd->fcftr_value)
 403                cd->fcftr_value = DEFAULT_FIFO_F_D_RFF | \
 404                                  DEFAULT_FIFO_F_D_RFD;
 405
 406        if (!cd->fdr_value)
 407                cd->fdr_value = DEFAULT_FDR_INIT;
 408
 409        if (!cd->rmcr_value)
 410                cd->rmcr_value = DEFAULT_RMCR_VALUE;
 411
 412        if (!cd->tx_check)
 413                cd->tx_check = DEFAULT_TX_CHECK;
 414
 415        if (!cd->eesr_err_check)
 416                cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
 417
 418        if (!cd->tx_error_check)
 419                cd->tx_error_check = DEFAULT_TX_ERROR_CHECK;
 420}
 421
 422#if defined(SH_ETH_RESET_DEFAULT)
 423/* Chip Reset */
 424static void sh_eth_reset(struct net_device *ndev)
 425{
 426        sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
 427        mdelay(3);
 428        sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
 429}
 430#endif
 431
 432#if defined(CONFIG_CPU_SH4)
 433static void sh_eth_set_receive_align(struct sk_buff *skb)
 434{
 435        int reserve;
 436
 437        reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
 438        if (reserve)
 439                skb_reserve(skb, reserve);
 440}
 441#else
 442static void sh_eth_set_receive_align(struct sk_buff *skb)
 443{
 444        skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
 445}
 446#endif
 447
 448
 449/* CPU <-> EDMAC endian convert */
 450static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
 451{
 452        switch (mdp->edmac_endian) {
 453        case EDMAC_LITTLE_ENDIAN:
 454                return cpu_to_le32(x);
 455        case EDMAC_BIG_ENDIAN:
 456                return cpu_to_be32(x);
 457        }
 458        return x;
 459}
 460
 461static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
 462{
 463        switch (mdp->edmac_endian) {
 464        case EDMAC_LITTLE_ENDIAN:
 465                return le32_to_cpu(x);
 466        case EDMAC_BIG_ENDIAN:
 467                return be32_to_cpu(x);
 468        }
 469        return x;
 470}
 471
 472/*
 473 * Program the hardware MAC address from dev->dev_addr.
 474 */
 475static void update_mac_address(struct net_device *ndev)
 476{
 477        sh_eth_write(ndev,
 478                (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 479                (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
 480        sh_eth_write(ndev,
 481                (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
 482}
 483
 484/*
 485 * Get MAC address from SuperH MAC address register
 486 *
 487 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
 488 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
 489 * When you want use this device, you must set MAC address in bootloader.
 490 *
 491 */
 492static void read_mac_address(struct net_device *ndev, unsigned char *mac)
 493{
 494        if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
 495                memcpy(ndev->dev_addr, mac, 6);
 496        } else {
 497                ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
 498                ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
 499                ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
 500                ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
 501                ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
 502                ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
 503        }
 504}
 505
 506static int sh_eth_is_gether(struct sh_eth_private *mdp)
 507{
 508        if (mdp->reg_offset == sh_eth_offset_gigabit)
 509                return 1;
 510        else
 511                return 0;
 512}
 513
 514static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
 515{
 516        if (sh_eth_is_gether(mdp))
 517                return EDTRR_TRNS_GETHER;
 518        else
 519                return EDTRR_TRNS_ETHER;
 520}
 521
 522struct bb_info {
 523        void (*set_gate)(void *addr);
 524        struct mdiobb_ctrl ctrl;
 525        void *addr;
 526        u32 mmd_msk;/* MMD */
 527        u32 mdo_msk;
 528        u32 mdi_msk;
 529        u32 mdc_msk;
 530};
 531
 532/* PHY bit set */
 533static void bb_set(void *addr, u32 msk)
 534{
 535        iowrite32(ioread32(addr) | msk, addr);
 536}
 537
 538/* PHY bit clear */
 539static void bb_clr(void *addr, u32 msk)
 540{
 541        iowrite32((ioread32(addr) & ~msk), addr);
 542}
 543
 544/* PHY bit read */
 545static int bb_read(void *addr, u32 msk)
 546{
 547        return (ioread32(addr) & msk) != 0;
 548}
 549
 550/* Data I/O pin control */
 551static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 552{
 553        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 554
 555        if (bitbang->set_gate)
 556                bitbang->set_gate(bitbang->addr);
 557
 558        if (bit)
 559                bb_set(bitbang->addr, bitbang->mmd_msk);
 560        else
 561                bb_clr(bitbang->addr, bitbang->mmd_msk);
 562}
 563
 564/* Set bit data*/
 565static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
 566{
 567        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 568
 569        if (bitbang->set_gate)
 570                bitbang->set_gate(bitbang->addr);
 571
 572        if (bit)
 573                bb_set(bitbang->addr, bitbang->mdo_msk);
 574        else
 575                bb_clr(bitbang->addr, bitbang->mdo_msk);
 576}
 577
 578/* Get bit data*/
 579static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
 580{
 581        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 582
 583        if (bitbang->set_gate)
 584                bitbang->set_gate(bitbang->addr);
 585
 586        return bb_read(bitbang->addr, bitbang->mdi_msk);
 587}
 588
 589/* MDC pin control */
 590static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
 591{
 592        struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
 593
 594        if (bitbang->set_gate)
 595                bitbang->set_gate(bitbang->addr);
 596
 597        if (bit)
 598                bb_set(bitbang->addr, bitbang->mdc_msk);
 599        else
 600                bb_clr(bitbang->addr, bitbang->mdc_msk);
 601}
 602
 603/* mdio bus control struct */
 604static struct mdiobb_ops bb_ops = {
 605        .owner = THIS_MODULE,
 606        .set_mdc = sh_mdc_ctrl,
 607        .set_mdio_dir = sh_mmd_ctrl,
 608        .set_mdio_data = sh_set_mdio,
 609        .get_mdio_data = sh_get_mdio,
 610};
 611
 612/* free skb and descriptor buffer */
 613static void sh_eth_ring_free(struct net_device *ndev)
 614{
 615        struct sh_eth_private *mdp = netdev_priv(ndev);
 616        int i;
 617
 618        /* Free Rx skb ringbuffer */
 619        if (mdp->rx_skbuff) {
 620                for (i = 0; i < RX_RING_SIZE; i++) {
 621                        if (mdp->rx_skbuff[i])
 622                                dev_kfree_skb(mdp->rx_skbuff[i]);
 623                }
 624        }
 625        kfree(mdp->rx_skbuff);
 626
 627        /* Free Tx skb ringbuffer */
 628        if (mdp->tx_skbuff) {
 629                for (i = 0; i < TX_RING_SIZE; i++) {
 630                        if (mdp->tx_skbuff[i])
 631                                dev_kfree_skb(mdp->tx_skbuff[i]);
 632                }
 633        }
 634        kfree(mdp->tx_skbuff);
 635}
 636
 637/* format skb and descriptor buffer */
 638static void sh_eth_ring_format(struct net_device *ndev)
 639{
 640        struct sh_eth_private *mdp = netdev_priv(ndev);
 641        int i;
 642        struct sk_buff *skb;
 643        struct sh_eth_rxdesc *rxdesc = NULL;
 644        struct sh_eth_txdesc *txdesc = NULL;
 645        int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
 646        int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
 647
 648        mdp->cur_rx = mdp->cur_tx = 0;
 649        mdp->dirty_rx = mdp->dirty_tx = 0;
 650
 651        memset(mdp->rx_ring, 0, rx_ringsize);
 652
 653        /* build Rx ring buffer */
 654        for (i = 0; i < RX_RING_SIZE; i++) {
 655                /* skb */
 656                mdp->rx_skbuff[i] = NULL;
 657                skb = dev_alloc_skb(mdp->rx_buf_sz);
 658                mdp->rx_skbuff[i] = skb;
 659                if (skb == NULL)
 660                        break;
 661                dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
 662                                DMA_FROM_DEVICE);
 663                skb->dev = ndev; /* Mark as being used by this device. */
 664                sh_eth_set_receive_align(skb);
 665
 666                /* RX descriptor */
 667                rxdesc = &mdp->rx_ring[i];
 668                rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
 669                rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 670
 671                /* The size of the buffer is 16 byte boundary. */
 672                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 673                /* Rx descriptor address set */
 674                if (i == 0) {
 675                        sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
 676                        if (sh_eth_is_gether(mdp))
 677                                sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
 678                }
 679        }
 680
 681        mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
 682
 683        /* Mark the last entry as wrapping the ring. */
 684        rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
 685
 686        memset(mdp->tx_ring, 0, tx_ringsize);
 687
 688        /* build Tx ring buffer */
 689        for (i = 0; i < TX_RING_SIZE; i++) {
 690                mdp->tx_skbuff[i] = NULL;
 691                txdesc = &mdp->tx_ring[i];
 692                txdesc->status = cpu_to_edmac(mdp, TD_TFP);
 693                txdesc->buffer_length = 0;
 694                if (i == 0) {
 695                        /* Tx descriptor address set */
 696                        sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
 697                        if (sh_eth_is_gether(mdp))
 698                                sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
 699                }
 700        }
 701
 702        txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
 703}
 704
 705/* Get skb and descriptor buffer */
 706static int sh_eth_ring_init(struct net_device *ndev)
 707{
 708        struct sh_eth_private *mdp = netdev_priv(ndev);
 709        int rx_ringsize, tx_ringsize, ret = 0;
 710
 711        /*
 712         * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
 713         * card needs room to do 8 byte alignment, +2 so we can reserve
 714         * the first 2 bytes, and +16 gets room for the status word from the
 715         * card.
 716         */
 717        mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
 718                          (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
 719        if (mdp->cd->rpadir)
 720                mdp->rx_buf_sz += NET_IP_ALIGN;
 721
 722        /* Allocate RX and TX skb rings */
 723        mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
 724                                GFP_KERNEL);
 725        if (!mdp->rx_skbuff) {
 726                dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
 727                ret = -ENOMEM;
 728                return ret;
 729        }
 730
 731        mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
 732                                GFP_KERNEL);
 733        if (!mdp->tx_skbuff) {
 734                dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
 735                ret = -ENOMEM;
 736                goto skb_ring_free;
 737        }
 738
 739        /* Allocate all Rx descriptors. */
 740        rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
 741        mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
 742                        GFP_KERNEL);
 743
 744        if (!mdp->rx_ring) {
 745                dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
 746                        rx_ringsize);
 747                ret = -ENOMEM;
 748                goto desc_ring_free;
 749        }
 750
 751        mdp->dirty_rx = 0;
 752
 753        /* Allocate all Tx descriptors. */
 754        tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
 755        mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
 756                        GFP_KERNEL);
 757        if (!mdp->tx_ring) {
 758                dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
 759                        tx_ringsize);
 760                ret = -ENOMEM;
 761                goto desc_ring_free;
 762        }
 763        return ret;
 764
 765desc_ring_free:
 766        /* free DMA buffer */
 767        dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
 768
 769skb_ring_free:
 770        /* Free Rx and Tx skb ring buffer */
 771        sh_eth_ring_free(ndev);
 772
 773        return ret;
 774}
 775
 776static int sh_eth_dev_init(struct net_device *ndev)
 777{
 778        int ret = 0;
 779        struct sh_eth_private *mdp = netdev_priv(ndev);
 780        u_int32_t rx_int_var, tx_int_var;
 781        u32 val;
 782
 783        /* Soft Reset */
 784        sh_eth_reset(ndev);
 785
 786        /* Descriptor format */
 787        sh_eth_ring_format(ndev);
 788        if (mdp->cd->rpadir)
 789                sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
 790
 791        /* all sh_eth int mask */
 792        sh_eth_write(ndev, 0, EESIPR);
 793
 794#if defined(__LITTLE_ENDIAN__)
 795        if (mdp->cd->hw_swap)
 796                sh_eth_write(ndev, EDMR_EL, EDMR);
 797        else
 798#endif
 799                sh_eth_write(ndev, 0, EDMR);
 800
 801        /* FIFO size set */
 802        sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
 803        sh_eth_write(ndev, 0, TFTR);
 804
 805        /* Frame recv control */
 806        sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
 807
 808        rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
 809        tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
 810        sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
 811
 812        if (mdp->cd->bculr)
 813                sh_eth_write(ndev, 0x800, BCULR);       /* Burst sycle set */
 814
 815        sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
 816
 817        if (!mdp->cd->no_trimd)
 818                sh_eth_write(ndev, 0, TRIMD);
 819
 820        /* Recv frame limit set register */
 821        sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
 822                     RFLR);
 823
 824        sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
 825        sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 826
 827        /* PAUSE Prohibition */
 828        val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
 829                ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
 830
 831        sh_eth_write(ndev, val, ECMR);
 832
 833        if (mdp->cd->set_rate)
 834                mdp->cd->set_rate(ndev);
 835
 836        /* E-MAC Status Register clear */
 837        sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
 838
 839        /* E-MAC Interrupt Enable register */
 840        sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
 841
 842        /* Set MAC address */
 843        update_mac_address(ndev);
 844
 845        /* mask reset */
 846        if (mdp->cd->apr)
 847                sh_eth_write(ndev, APR_AP, APR);
 848        if (mdp->cd->mpr)
 849                sh_eth_write(ndev, MPR_MP, MPR);
 850        if (mdp->cd->tpauser)
 851                sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
 852
 853        /* Setting the Rx mode will start the Rx process. */
 854        sh_eth_write(ndev, EDRRR_R, EDRRR);
 855
 856        netif_start_queue(ndev);
 857
 858        return ret;
 859}
 860
 861/* free Tx skb function */
 862static int sh_eth_txfree(struct net_device *ndev)
 863{
 864        struct sh_eth_private *mdp = netdev_priv(ndev);
 865        struct sh_eth_txdesc *txdesc;
 866        int freeNum = 0;
 867        int entry = 0;
 868
 869        for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
 870                entry = mdp->dirty_tx % TX_RING_SIZE;
 871                txdesc = &mdp->tx_ring[entry];
 872                if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
 873                        break;
 874                /* Free the original skb. */
 875                if (mdp->tx_skbuff[entry]) {
 876                        dma_unmap_single(&ndev->dev, txdesc->addr,
 877                                         txdesc->buffer_length, DMA_TO_DEVICE);
 878                        dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
 879                        mdp->tx_skbuff[entry] = NULL;
 880                        freeNum++;
 881                }
 882                txdesc->status = cpu_to_edmac(mdp, TD_TFP);
 883                if (entry >= TX_RING_SIZE - 1)
 884                        txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
 885
 886                mdp->stats.tx_packets++;
 887                mdp->stats.tx_bytes += txdesc->buffer_length;
 888        }
 889        return freeNum;
 890}
 891
 892/* Packet receive function */
 893static int sh_eth_rx(struct net_device *ndev)
 894{
 895        struct sh_eth_private *mdp = netdev_priv(ndev);
 896        struct sh_eth_rxdesc *rxdesc;
 897
 898        int entry = mdp->cur_rx % RX_RING_SIZE;
 899        int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
 900        struct sk_buff *skb;
 901        u16 pkt_len = 0;
 902        u32 desc_status;
 903
 904        rxdesc = &mdp->rx_ring[entry];
 905        while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
 906                desc_status = edmac_to_cpu(mdp, rxdesc->status);
 907                pkt_len = rxdesc->frame_length;
 908
 909                if (--boguscnt < 0)
 910                        break;
 911
 912                if (!(desc_status & RDFEND))
 913                        mdp->stats.rx_length_errors++;
 914
 915                if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
 916                                   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
 917                        mdp->stats.rx_errors++;
 918                        if (desc_status & RD_RFS1)
 919                                mdp->stats.rx_crc_errors++;
 920                        if (desc_status & RD_RFS2)
 921                                mdp->stats.rx_frame_errors++;
 922                        if (desc_status & RD_RFS3)
 923                                mdp->stats.rx_length_errors++;
 924                        if (desc_status & RD_RFS4)
 925                                mdp->stats.rx_length_errors++;
 926                        if (desc_status & RD_RFS6)
 927                                mdp->stats.rx_missed_errors++;
 928                        if (desc_status & RD_RFS10)
 929                                mdp->stats.rx_over_errors++;
 930                } else {
 931                        if (!mdp->cd->hw_swap)
 932                                sh_eth_soft_swap(
 933                                        phys_to_virt(ALIGN(rxdesc->addr, 4)),
 934                                        pkt_len + 2);
 935                        skb = mdp->rx_skbuff[entry];
 936                        mdp->rx_skbuff[entry] = NULL;
 937                        if (mdp->cd->rpadir)
 938                                skb_reserve(skb, NET_IP_ALIGN);
 939                        skb_put(skb, pkt_len);
 940                        skb->protocol = eth_type_trans(skb, ndev);
 941                        netif_rx(skb);
 942                        mdp->stats.rx_packets++;
 943                        mdp->stats.rx_bytes += pkt_len;
 944                }
 945                rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
 946                entry = (++mdp->cur_rx) % RX_RING_SIZE;
 947                rxdesc = &mdp->rx_ring[entry];
 948        }
 949
 950        /* Refill the Rx ring buffers. */
 951        for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
 952                entry = mdp->dirty_rx % RX_RING_SIZE;
 953                rxdesc = &mdp->rx_ring[entry];
 954                /* The size of the buffer is 16 byte boundary. */
 955                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 956
 957                if (mdp->rx_skbuff[entry] == NULL) {
 958                        skb = dev_alloc_skb(mdp->rx_buf_sz);
 959                        mdp->rx_skbuff[entry] = skb;
 960                        if (skb == NULL)
 961                                break;  /* Better luck next round. */
 962                        dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
 963                                        DMA_FROM_DEVICE);
 964                        skb->dev = ndev;
 965                        sh_eth_set_receive_align(skb);
 966
 967                        skb_checksum_none_assert(skb);
 968                        rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
 969                }
 970                if (entry >= RX_RING_SIZE - 1)
 971                        rxdesc->status |=
 972                                cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
 973                else
 974                        rxdesc->status |=
 975                                cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 976        }
 977
 978        /* Restart Rx engine if stopped. */
 979        /* If we don't need to check status, don't. -KDU */
 980        if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
 981                sh_eth_write(ndev, EDRRR_R, EDRRR);
 982
 983        return 0;
 984}
 985
 986static void sh_eth_rcv_snd_disable(struct net_device *ndev)
 987{
 988        /* disable tx and rx */
 989        sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
 990                ~(ECMR_RE | ECMR_TE), ECMR);
 991}
 992
 993static void sh_eth_rcv_snd_enable(struct net_device *ndev)
 994{
 995        /* enable tx and rx */
 996        sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
 997                (ECMR_RE | ECMR_TE), ECMR);
 998}
 999
1000/* error control function */
1001static void sh_eth_error(struct net_device *ndev, int intr_status)
1002{
1003        struct sh_eth_private *mdp = netdev_priv(ndev);
1004        u32 felic_stat;
1005        u32 link_stat;
1006        u32 mask;
1007
1008        if (intr_status & EESR_ECI) {
1009                felic_stat = sh_eth_read(ndev, ECSR);
1010                sh_eth_write(ndev, felic_stat, ECSR);   /* clear int */
1011                if (felic_stat & ECSR_ICD)
1012                        mdp->stats.tx_carrier_errors++;
1013                if (felic_stat & ECSR_LCHNG) {
1014                        /* Link Changed */
1015                        if (mdp->cd->no_psr || mdp->no_ether_link) {
1016                                if (mdp->link == PHY_DOWN)
1017                                        link_stat = 0;
1018                                else
1019                                        link_stat = PHY_ST_LINK;
1020                        } else {
1021                                link_stat = (sh_eth_read(ndev, PSR));
1022                                if (mdp->ether_link_active_low)
1023                                        link_stat = ~link_stat;
1024                        }
1025                        if (!(link_stat & PHY_ST_LINK))
1026                                sh_eth_rcv_snd_disable(ndev);
1027                        else {
1028                                /* Link Up */
1029                                sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
1030                                          ~DMAC_M_ECI, EESIPR);
1031                                /*clear int */
1032                                sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
1033                                          ECSR);
1034                                sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
1035                                          DMAC_M_ECI, EESIPR);
1036                                /* enable tx and rx */
1037                                sh_eth_rcv_snd_enable(ndev);
1038                        }
1039                }
1040        }
1041
1042        if (intr_status & EESR_TWB) {
1043                /* Write buck end. unused write back interrupt */
1044                if (intr_status & EESR_TABT)    /* Transmit Abort int */
1045                        mdp->stats.tx_aborted_errors++;
1046                        if (netif_msg_tx_err(mdp))
1047                                dev_err(&ndev->dev, "Transmit Abort\n");
1048        }
1049
1050        if (intr_status & EESR_RABT) {
1051                /* Receive Abort int */
1052                if (intr_status & EESR_RFRMER) {
1053                        /* Receive Frame Overflow int */
1054                        mdp->stats.rx_frame_errors++;
1055                        if (netif_msg_rx_err(mdp))
1056                                dev_err(&ndev->dev, "Receive Abort\n");
1057                }
1058        }
1059
1060        if (intr_status & EESR_TDE) {
1061                /* Transmit Descriptor Empty int */
1062                mdp->stats.tx_fifo_errors++;
1063                if (netif_msg_tx_err(mdp))
1064                        dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1065        }
1066
1067        if (intr_status & EESR_TFE) {
1068                /* FIFO under flow */
1069                mdp->stats.tx_fifo_errors++;
1070                if (netif_msg_tx_err(mdp))
1071                        dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1072        }
1073
1074        if (intr_status & EESR_RDE) {
1075                /* Receive Descriptor Empty int */
1076                mdp->stats.rx_over_errors++;
1077
1078                if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1079                        sh_eth_write(ndev, EDRRR_R, EDRRR);
1080                if (netif_msg_rx_err(mdp))
1081                        dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1082        }
1083
1084        if (intr_status & EESR_RFE) {
1085                /* Receive FIFO Overflow int */
1086                mdp->stats.rx_fifo_errors++;
1087                if (netif_msg_rx_err(mdp))
1088                        dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1089        }
1090
1091        if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1092                /* Address Error */
1093                mdp->stats.tx_fifo_errors++;
1094                if (netif_msg_tx_err(mdp))
1095                        dev_err(&ndev->dev, "Address Error\n");
1096        }
1097
1098        mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1099        if (mdp->cd->no_ade)
1100                mask &= ~EESR_ADE;
1101        if (intr_status & mask) {
1102                /* Tx error */
1103                u32 edtrr = sh_eth_read(ndev, EDTRR);
1104                /* dmesg */
1105                dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
1106                                intr_status, mdp->cur_tx);
1107                dev_err(&ndev->dev, "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1108                                mdp->dirty_tx, (u32) ndev->state, edtrr);
1109                /* dirty buffer free */
1110                sh_eth_txfree(ndev);
1111
1112                /* SH7712 BUG */
1113                if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
1114                        /* tx dma start */
1115                        sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1116                }
1117                /* wakeup */
1118                netif_wake_queue(ndev);
1119        }
1120}
1121
1122static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1123{
1124        struct net_device *ndev = netdev;
1125        struct sh_eth_private *mdp = netdev_priv(ndev);
1126        struct sh_eth_cpu_data *cd = mdp->cd;
1127        irqreturn_t ret = IRQ_NONE;
1128        u32 intr_status = 0;
1129
1130        spin_lock(&mdp->lock);
1131
1132        /* Get interrpt stat */
1133        intr_status = sh_eth_read(ndev, EESR);
1134        /* Clear interrupt */
1135        if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
1136                        EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
1137                        cd->tx_check | cd->eesr_err_check)) {
1138                sh_eth_write(ndev, intr_status, EESR);
1139                ret = IRQ_HANDLED;
1140        } else
1141                goto other_irq;
1142
1143        if (intr_status & (EESR_FRC | /* Frame recv*/
1144                        EESR_RMAF | /* Multi cast address recv*/
1145                        EESR_RRF  | /* Bit frame recv */
1146                        EESR_RTLF | /* Long frame recv*/
1147                        EESR_RTSF | /* short frame recv */
1148                        EESR_PRE  | /* PHY-LSI recv error */
1149                        EESR_CERF)){ /* recv frame CRC error */
1150                sh_eth_rx(ndev);
1151        }
1152
1153        /* Tx Check */
1154        if (intr_status & cd->tx_check) {
1155                sh_eth_txfree(ndev);
1156                netif_wake_queue(ndev);
1157        }
1158
1159        if (intr_status & cd->eesr_err_check)
1160                sh_eth_error(ndev, intr_status);
1161
1162other_irq:
1163        spin_unlock(&mdp->lock);
1164
1165        return ret;
1166}
1167
1168static void sh_eth_timer(unsigned long data)
1169{
1170        struct net_device *ndev = (struct net_device *)data;
1171        struct sh_eth_private *mdp = netdev_priv(ndev);
1172
1173        mod_timer(&mdp->timer, jiffies + (10 * HZ));
1174}
1175
1176/* PHY state control function */
1177static void sh_eth_adjust_link(struct net_device *ndev)
1178{
1179        struct sh_eth_private *mdp = netdev_priv(ndev);
1180        struct phy_device *phydev = mdp->phydev;
1181        int new_state = 0;
1182
1183        if (phydev->link != PHY_DOWN) {
1184                if (phydev->duplex != mdp->duplex) {
1185                        new_state = 1;
1186                        mdp->duplex = phydev->duplex;
1187                        if (mdp->cd->set_duplex)
1188                                mdp->cd->set_duplex(ndev);
1189                }
1190
1191                if (phydev->speed != mdp->speed) {
1192                        new_state = 1;
1193                        mdp->speed = phydev->speed;
1194                        if (mdp->cd->set_rate)
1195                                mdp->cd->set_rate(ndev);
1196                }
1197                if (mdp->link == PHY_DOWN) {
1198                        sh_eth_write(ndev,
1199                                (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
1200                        new_state = 1;
1201                        mdp->link = phydev->link;
1202                }
1203        } else if (mdp->link) {
1204                new_state = 1;
1205                mdp->link = PHY_DOWN;
1206                mdp->speed = 0;
1207                mdp->duplex = -1;
1208        }
1209
1210        if (new_state && netif_msg_link(mdp))
1211                phy_print_status(phydev);
1212}
1213
1214/* PHY init function */
1215static int sh_eth_phy_init(struct net_device *ndev)
1216{
1217        struct sh_eth_private *mdp = netdev_priv(ndev);
1218        char phy_id[MII_BUS_ID_SIZE + 3];
1219        struct phy_device *phydev = NULL;
1220
1221        snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1222                mdp->mii_bus->id , mdp->phy_id);
1223
1224        mdp->link = PHY_DOWN;
1225        mdp->speed = 0;
1226        mdp->duplex = -1;
1227
1228        /* Try connect to PHY */
1229        phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1230                                0, mdp->phy_interface);
1231        if (IS_ERR(phydev)) {
1232                dev_err(&ndev->dev, "phy_connect failed\n");
1233                return PTR_ERR(phydev);
1234        }
1235
1236        dev_info(&ndev->dev, "attached phy %i to driver %s\n",
1237                phydev->addr, phydev->drv->name);
1238
1239        mdp->phydev = phydev;
1240
1241        return 0;
1242}
1243
1244/* PHY control start function */
1245static int sh_eth_phy_start(struct net_device *ndev)
1246{
1247        struct sh_eth_private *mdp = netdev_priv(ndev);
1248        int ret;
1249
1250        ret = sh_eth_phy_init(ndev);
1251        if (ret)
1252                return ret;
1253
1254        /* reset phy - this also wakes it from PDOWN */
1255        phy_write(mdp->phydev, MII_BMCR, BMCR_RESET);
1256        phy_start(mdp->phydev);
1257
1258        return 0;
1259}
1260
1261static int sh_eth_get_settings(struct net_device *ndev,
1262                        struct ethtool_cmd *ecmd)
1263{
1264        struct sh_eth_private *mdp = netdev_priv(ndev);
1265        unsigned long flags;
1266        int ret;
1267
1268        spin_lock_irqsave(&mdp->lock, flags);
1269        ret = phy_ethtool_gset(mdp->phydev, ecmd);
1270        spin_unlock_irqrestore(&mdp->lock, flags);
1271
1272        return ret;
1273}
1274
1275static int sh_eth_set_settings(struct net_device *ndev,
1276                struct ethtool_cmd *ecmd)
1277{
1278        struct sh_eth_private *mdp = netdev_priv(ndev);
1279        unsigned long flags;
1280        int ret;
1281
1282        spin_lock_irqsave(&mdp->lock, flags);
1283
1284        /* disable tx and rx */
1285        sh_eth_rcv_snd_disable(ndev);
1286
1287        ret = phy_ethtool_sset(mdp->phydev, ecmd);
1288        if (ret)
1289                goto error_exit;
1290
1291        if (ecmd->duplex == DUPLEX_FULL)
1292                mdp->duplex = 1;
1293        else
1294                mdp->duplex = 0;
1295
1296        if (mdp->cd->set_duplex)
1297                mdp->cd->set_duplex(ndev);
1298
1299error_exit:
1300        mdelay(1);
1301
1302        /* enable tx and rx */
1303        sh_eth_rcv_snd_enable(ndev);
1304
1305        spin_unlock_irqrestore(&mdp->lock, flags);
1306
1307        return ret;
1308}
1309
1310static int sh_eth_nway_reset(struct net_device *ndev)
1311{
1312        struct sh_eth_private *mdp = netdev_priv(ndev);
1313        unsigned long flags;
1314        int ret;
1315
1316        spin_lock_irqsave(&mdp->lock, flags);
1317        ret = phy_start_aneg(mdp->phydev);
1318        spin_unlock_irqrestore(&mdp->lock, flags);
1319
1320        return ret;
1321}
1322
1323static u32 sh_eth_get_msglevel(struct net_device *ndev)
1324{
1325        struct sh_eth_private *mdp = netdev_priv(ndev);
1326        return mdp->msg_enable;
1327}
1328
1329static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1330{
1331        struct sh_eth_private *mdp = netdev_priv(ndev);
1332        mdp->msg_enable = value;
1333}
1334
1335static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1336        "rx_current", "tx_current",
1337        "rx_dirty", "tx_dirty",
1338};
1339#define SH_ETH_STATS_LEN  ARRAY_SIZE(sh_eth_gstrings_stats)
1340
1341static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1342{
1343        switch (sset) {
1344        case ETH_SS_STATS:
1345                return SH_ETH_STATS_LEN;
1346        default:
1347                return -EOPNOTSUPP;
1348        }
1349}
1350
1351static void sh_eth_get_ethtool_stats(struct net_device *ndev,
1352                        struct ethtool_stats *stats, u64 *data)
1353{
1354        struct sh_eth_private *mdp = netdev_priv(ndev);
1355        int i = 0;
1356
1357        /* device-specific stats */
1358        data[i++] = mdp->cur_rx;
1359        data[i++] = mdp->cur_tx;
1360        data[i++] = mdp->dirty_rx;
1361        data[i++] = mdp->dirty_tx;
1362}
1363
1364static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1365{
1366        switch (stringset) {
1367        case ETH_SS_STATS:
1368                memcpy(data, *sh_eth_gstrings_stats,
1369                                        sizeof(sh_eth_gstrings_stats));
1370                break;
1371        }
1372}
1373
1374static const struct ethtool_ops sh_eth_ethtool_ops = {
1375        .get_settings   = sh_eth_get_settings,
1376        .set_settings   = sh_eth_set_settings,
1377        .nway_reset     = sh_eth_nway_reset,
1378        .get_msglevel   = sh_eth_get_msglevel,
1379        .set_msglevel   = sh_eth_set_msglevel,
1380        .get_link       = ethtool_op_get_link,
1381        .get_strings    = sh_eth_get_strings,
1382        .get_ethtool_stats  = sh_eth_get_ethtool_stats,
1383        .get_sset_count     = sh_eth_get_sset_count,
1384};
1385
1386/* network device open function */
1387static int sh_eth_open(struct net_device *ndev)
1388{
1389        int ret = 0;
1390        struct sh_eth_private *mdp = netdev_priv(ndev);
1391
1392        pm_runtime_get_sync(&mdp->pdev->dev);
1393
1394        ret = request_irq(ndev->irq, sh_eth_interrupt,
1395#if defined(CONFIG_CPU_SUBTYPE_SH7763) || \
1396        defined(CONFIG_CPU_SUBTYPE_SH7764) || \
1397        defined(CONFIG_CPU_SUBTYPE_SH7757)
1398                                IRQF_SHARED,
1399#else
1400                                0,
1401#endif
1402                                ndev->name, ndev);
1403        if (ret) {
1404                dev_err(&ndev->dev, "Can not assign IRQ number\n");
1405                return ret;
1406        }
1407
1408        /* Descriptor set */
1409        ret = sh_eth_ring_init(ndev);
1410        if (ret)
1411                goto out_free_irq;
1412
1413        /* device init */
1414        ret = sh_eth_dev_init(ndev);
1415        if (ret)
1416                goto out_free_irq;
1417
1418        /* PHY control start*/
1419        ret = sh_eth_phy_start(ndev);
1420        if (ret)
1421                goto out_free_irq;
1422
1423        /* Set the timer to check for link beat. */
1424        init_timer(&mdp->timer);
1425        mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1426        setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
1427
1428        return ret;
1429
1430out_free_irq:
1431        free_irq(ndev->irq, ndev);
1432        pm_runtime_put_sync(&mdp->pdev->dev);
1433        return ret;
1434}
1435
1436/* Timeout function */
1437static void sh_eth_tx_timeout(struct net_device *ndev)
1438{
1439        struct sh_eth_private *mdp = netdev_priv(ndev);
1440        struct sh_eth_rxdesc *rxdesc;
1441        int i;
1442
1443        netif_stop_queue(ndev);
1444
1445        if (netif_msg_timer(mdp))
1446                dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x,"
1447               " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR));
1448
1449        /* tx_errors count up */
1450        mdp->stats.tx_errors++;
1451
1452        /* timer off */
1453        del_timer_sync(&mdp->timer);
1454
1455        /* Free all the skbuffs in the Rx queue. */
1456        for (i = 0; i < RX_RING_SIZE; i++) {
1457                rxdesc = &mdp->rx_ring[i];
1458                rxdesc->status = 0;
1459                rxdesc->addr = 0xBADF00D0;
1460                if (mdp->rx_skbuff[i])
1461                        dev_kfree_skb(mdp->rx_skbuff[i]);
1462                mdp->rx_skbuff[i] = NULL;
1463        }
1464        for (i = 0; i < TX_RING_SIZE; i++) {
1465                if (mdp->tx_skbuff[i])
1466                        dev_kfree_skb(mdp->tx_skbuff[i]);
1467                mdp->tx_skbuff[i] = NULL;
1468        }
1469
1470        /* device init */
1471        sh_eth_dev_init(ndev);
1472
1473        /* timer on */
1474        mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
1475        add_timer(&mdp->timer);
1476}
1477
1478/* Packet transmit function */
1479static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1480{
1481        struct sh_eth_private *mdp = netdev_priv(ndev);
1482        struct sh_eth_txdesc *txdesc;
1483        u32 entry;
1484        unsigned long flags;
1485
1486        spin_lock_irqsave(&mdp->lock, flags);
1487        if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
1488                if (!sh_eth_txfree(ndev)) {
1489                        if (netif_msg_tx_queued(mdp))
1490                                dev_warn(&ndev->dev, "TxFD exhausted.\n");
1491                        netif_stop_queue(ndev);
1492                        spin_unlock_irqrestore(&mdp->lock, flags);
1493                        return NETDEV_TX_BUSY;
1494                }
1495        }
1496        spin_unlock_irqrestore(&mdp->lock, flags);
1497
1498        entry = mdp->cur_tx % TX_RING_SIZE;
1499        mdp->tx_skbuff[entry] = skb;
1500        txdesc = &mdp->tx_ring[entry];
1501        /* soft swap. */
1502        if (!mdp->cd->hw_swap)
1503                sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
1504                                 skb->len + 2);
1505        txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
1506                                      DMA_TO_DEVICE);
1507        if (skb->len < ETHERSMALL)
1508                txdesc->buffer_length = ETHERSMALL;
1509        else
1510                txdesc->buffer_length = skb->len;
1511
1512        if (entry >= TX_RING_SIZE - 1)
1513                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
1514        else
1515                txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
1516
1517        mdp->cur_tx++;
1518
1519        if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
1520                sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
1521
1522        return NETDEV_TX_OK;
1523}
1524
1525/* device close function */
1526static int sh_eth_close(struct net_device *ndev)
1527{
1528        struct sh_eth_private *mdp = netdev_priv(ndev);
1529        int ringsize;
1530
1531        netif_stop_queue(ndev);
1532
1533        /* Disable interrupts by clearing the interrupt mask. */
1534        sh_eth_write(ndev, 0x0000, EESIPR);
1535
1536        /* Stop the chip's Tx and Rx processes. */
1537        sh_eth_write(ndev, 0, EDTRR);
1538        sh_eth_write(ndev, 0, EDRRR);
1539
1540        /* PHY Disconnect */
1541        if (mdp->phydev) {
1542                phy_stop(mdp->phydev);
1543                phy_disconnect(mdp->phydev);
1544        }
1545
1546        free_irq(ndev->irq, ndev);
1547
1548        del_timer_sync(&mdp->timer);
1549
1550        /* Free all the skbuffs in the Rx queue. */
1551        sh_eth_ring_free(ndev);
1552
1553        /* free DMA buffer */
1554        ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
1555        dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1556
1557        /* free DMA buffer */
1558        ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1559        dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1560
1561        pm_runtime_put_sync(&mdp->pdev->dev);
1562
1563        return 0;
1564}
1565
1566static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1567{
1568        struct sh_eth_private *mdp = netdev_priv(ndev);
1569
1570        pm_runtime_get_sync(&mdp->pdev->dev);
1571
1572        mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR);
1573        sh_eth_write(ndev, 0, TROCR);   /* (write clear) */
1574        mdp->stats.collisions += sh_eth_read(ndev, CDCR);
1575        sh_eth_write(ndev, 0, CDCR);    /* (write clear) */
1576        mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
1577        sh_eth_write(ndev, 0, LCCR);    /* (write clear) */
1578        if (sh_eth_is_gether(mdp)) {
1579                mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
1580                sh_eth_write(ndev, 0, CERCR);   /* (write clear) */
1581                mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
1582                sh_eth_write(ndev, 0, CEECR);   /* (write clear) */
1583        } else {
1584                mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
1585                sh_eth_write(ndev, 0, CNDCR);   /* (write clear) */
1586        }
1587        pm_runtime_put_sync(&mdp->pdev->dev);
1588
1589        return &mdp->stats;
1590}
1591
1592/* ioctl to device funciotn*/
1593static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq,
1594                                int cmd)
1595{
1596        struct sh_eth_private *mdp = netdev_priv(ndev);
1597        struct phy_device *phydev = mdp->phydev;
1598
1599        if (!netif_running(ndev))
1600                return -EINVAL;
1601
1602        if (!phydev)
1603                return -ENODEV;
1604
1605        return phy_mii_ioctl(phydev, rq, cmd);
1606}
1607
1608#if defined(SH_ETH_HAS_TSU)
1609/* Multicast reception directions set */
1610static void sh_eth_set_multicast_list(struct net_device *ndev)
1611{
1612        if (ndev->flags & IFF_PROMISC) {
1613                /* Set promiscuous. */
1614                sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) |
1615                                ECMR_PRM, ECMR);
1616        } else {
1617                /* Normal, unicast/broadcast-only mode. */
1618                sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) |
1619                                ECMR_MCT, ECMR);
1620        }
1621}
1622#endif /* SH_ETH_HAS_TSU */
1623
1624/* SuperH's TSU register init function */
1625static void sh_eth_tsu_init(struct sh_eth_private *mdp)
1626{
1627        sh_eth_tsu_write(mdp, 0, TSU_FWEN0);    /* Disable forward(0->1) */
1628        sh_eth_tsu_write(mdp, 0, TSU_FWEN1);    /* Disable forward(1->0) */
1629        sh_eth_tsu_write(mdp, 0, TSU_FCM);      /* forward fifo 3k-3k */
1630        sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
1631        sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
1632        sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
1633        sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
1634        sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
1635        sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
1636        sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
1637        if (sh_eth_is_gether(mdp)) {
1638                sh_eth_tsu_write(mdp, 0, TSU_QTAG0);    /* Disable QTAG(0->1) */
1639                sh_eth_tsu_write(mdp, 0, TSU_QTAG1);    /* Disable QTAG(1->0) */
1640        } else {
1641                sh_eth_tsu_write(mdp, 0, TSU_QTAGM0);   /* Disable QTAG(0->1) */
1642                sh_eth_tsu_write(mdp, 0, TSU_QTAGM1);   /* Disable QTAG(1->0) */
1643        }
1644        sh_eth_tsu_write(mdp, 0, TSU_FWSR);     /* all interrupt status clear */
1645        sh_eth_tsu_write(mdp, 0, TSU_FWINMK);   /* Disable all interrupt */
1646        sh_eth_tsu_write(mdp, 0, TSU_TEN);      /* Disable all CAM entry */
1647        sh_eth_tsu_write(mdp, 0, TSU_POST1);    /* Disable CAM entry [ 0- 7] */
1648        sh_eth_tsu_write(mdp, 0, TSU_POST2);    /* Disable CAM entry [ 8-15] */
1649        sh_eth_tsu_write(mdp, 0, TSU_POST3);    /* Disable CAM entry [16-23] */
1650        sh_eth_tsu_write(mdp, 0, TSU_POST4);    /* Disable CAM entry [24-31] */
1651}
1652
1653/* MDIO bus release function */
1654static int sh_mdio_release(struct net_device *ndev)
1655{
1656        struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
1657
1658        /* unregister mdio bus */
1659        mdiobus_unregister(bus);
1660
1661        /* remove mdio bus info from net_device */
1662        dev_set_drvdata(&ndev->dev, NULL);
1663
1664        /* free interrupts memory */
1665        kfree(bus->irq);
1666
1667        /* free bitbang info */
1668        free_mdio_bitbang(bus);
1669
1670        return 0;
1671}
1672
1673/* MDIO bus init function */
1674static int sh_mdio_init(struct net_device *ndev, int id,
1675                        struct sh_eth_plat_data *pd)
1676{
1677        int ret, i;
1678        struct bb_info *bitbang;
1679        struct sh_eth_private *mdp = netdev_priv(ndev);
1680
1681        /* create bit control struct for PHY */
1682        bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
1683        if (!bitbang) {
1684                ret = -ENOMEM;
1685                goto out;
1686        }
1687
1688        /* bitbang init */
1689        bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
1690        bitbang->set_gate = pd->set_mdio_gate;
1691        bitbang->mdi_msk = 0x08;
1692        bitbang->mdo_msk = 0x04;
1693        bitbang->mmd_msk = 0x02;/* MMD */
1694        bitbang->mdc_msk = 0x01;
1695        bitbang->ctrl.ops = &bb_ops;
1696
1697        /* MII controller setting */
1698        mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
1699        if (!mdp->mii_bus) {
1700                ret = -ENOMEM;
1701                goto out_free_bitbang;
1702        }
1703
1704        /* Hook up MII support for ethtool */
1705        mdp->mii_bus->name = "sh_mii";
1706        mdp->mii_bus->parent = &ndev->dev;
1707        snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1708                mdp->pdev->name, id);
1709
1710        /* PHY IRQ */
1711        mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1712        if (!mdp->mii_bus->irq) {
1713                ret = -ENOMEM;
1714                goto out_free_bus;
1715        }
1716
1717        for (i = 0; i < PHY_MAX_ADDR; i++)
1718                mdp->mii_bus->irq[i] = PHY_POLL;
1719
1720        /* regist mdio bus */
1721        ret = mdiobus_register(mdp->mii_bus);
1722        if (ret)
1723                goto out_free_irq;
1724
1725        dev_set_drvdata(&ndev->dev, mdp->mii_bus);
1726
1727        return 0;
1728
1729out_free_irq:
1730        kfree(mdp->mii_bus->irq);
1731
1732out_free_bus:
1733        free_mdio_bitbang(mdp->mii_bus);
1734
1735out_free_bitbang:
1736        kfree(bitbang);
1737
1738out:
1739        return ret;
1740}
1741
1742static const u16 *sh_eth_get_register_offset(int register_type)
1743{
1744        const u16 *reg_offset = NULL;
1745
1746        switch (register_type) {
1747        case SH_ETH_REG_GIGABIT:
1748                reg_offset = sh_eth_offset_gigabit;
1749                break;
1750        case SH_ETH_REG_FAST_SH4:
1751                reg_offset = sh_eth_offset_fast_sh4;
1752                break;
1753        case SH_ETH_REG_FAST_SH3_SH2:
1754                reg_offset = sh_eth_offset_fast_sh3_sh2;
1755                break;
1756        default:
1757                printk(KERN_ERR "Unknown register type (%d)\n", register_type);
1758                break;
1759        }
1760
1761        return reg_offset;
1762}
1763
1764static const struct net_device_ops sh_eth_netdev_ops = {
1765        .ndo_open               = sh_eth_open,
1766        .ndo_stop               = sh_eth_close,
1767        .ndo_start_xmit         = sh_eth_start_xmit,
1768        .ndo_get_stats          = sh_eth_get_stats,
1769#if defined(SH_ETH_HAS_TSU)
1770        .ndo_set_rx_mode        = sh_eth_set_multicast_list,
1771#endif
1772        .ndo_tx_timeout         = sh_eth_tx_timeout,
1773        .ndo_do_ioctl           = sh_eth_do_ioctl,
1774        .ndo_validate_addr      = eth_validate_addr,
1775        .ndo_set_mac_address    = eth_mac_addr,
1776        .ndo_change_mtu         = eth_change_mtu,
1777};
1778
1779static int sh_eth_drv_probe(struct platform_device *pdev)
1780{
1781        int ret, devno = 0;
1782        struct resource *res;
1783        struct net_device *ndev = NULL;
1784        struct sh_eth_private *mdp = NULL;
1785        struct sh_eth_plat_data *pd;
1786
1787        /* get base addr */
1788        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1789        if (unlikely(res == NULL)) {
1790                dev_err(&pdev->dev, "invalid resource\n");
1791                ret = -EINVAL;
1792                goto out;
1793        }
1794
1795        ndev = alloc_etherdev(sizeof(struct sh_eth_private));
1796        if (!ndev) {
1797                dev_err(&pdev->dev, "Could not allocate device.\n");
1798                ret = -ENOMEM;
1799                goto out;
1800        }
1801
1802        /* The sh Ether-specific entries in the device structure. */
1803        ndev->base_addr = res->start;
1804        devno = pdev->id;
1805        if (devno < 0)
1806                devno = 0;
1807
1808        ndev->dma = -1;
1809        ret = platform_get_irq(pdev, 0);
1810        if (ret < 0) {
1811                ret = -ENODEV;
1812                goto out_release;
1813        }
1814        ndev->irq = ret;
1815
1816        SET_NETDEV_DEV(ndev, &pdev->dev);
1817
1818        /* Fill in the fields of the device structure with ethernet values. */
1819        ether_setup(ndev);
1820
1821        mdp = netdev_priv(ndev);
1822        mdp->addr = ioremap(res->start, resource_size(res));
1823        if (mdp->addr == NULL) {
1824                ret = -ENOMEM;
1825                dev_err(&pdev->dev, "ioremap failed.\n");
1826                goto out_release;
1827        }
1828
1829        spin_lock_init(&mdp->lock);
1830        mdp->pdev = pdev;
1831        pm_runtime_enable(&pdev->dev);
1832        pm_runtime_resume(&pdev->dev);
1833
1834        pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1835        /* get PHY ID */
1836        mdp->phy_id = pd->phy;
1837        mdp->phy_interface = pd->phy_interface;
1838        /* EDMAC endian */
1839        mdp->edmac_endian = pd->edmac_endian;
1840        mdp->no_ether_link = pd->no_ether_link;
1841        mdp->ether_link_active_low = pd->ether_link_active_low;
1842        mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
1843
1844        /* set cpu data */
1845#if defined(SH_ETH_HAS_BOTH_MODULES)
1846        mdp->cd = sh_eth_get_cpu_data(mdp);
1847#else
1848        mdp->cd = &sh_eth_my_cpu_data;
1849#endif
1850        sh_eth_set_default_cpu_data(mdp->cd);
1851
1852        /* set function */
1853        ndev->netdev_ops = &sh_eth_netdev_ops;
1854        SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
1855        ndev->watchdog_timeo = TX_TIMEOUT;
1856
1857        /* debug message level */
1858        mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
1859        mdp->post_rx = POST_RX >> (devno << 1);
1860        mdp->post_fw = POST_FW >> (devno << 1);
1861
1862        /* read and set MAC address */
1863        read_mac_address(ndev, pd->mac_addr);
1864
1865        /* First device only init */
1866        if (!devno) {
1867                if (mdp->cd->tsu) {
1868                        struct resource *rtsu;
1869                        rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1870                        if (!rtsu) {
1871                                dev_err(&pdev->dev, "Not found TSU resource\n");
1872                                goto out_release;
1873                        }
1874                        mdp->tsu_addr = ioremap(rtsu->start,
1875                                                resource_size(rtsu));
1876                }
1877                if (mdp->cd->chip_reset)
1878                        mdp->cd->chip_reset(ndev);
1879
1880                if (mdp->cd->tsu) {
1881                        /* TSU init (Init only)*/
1882                        sh_eth_tsu_init(mdp);
1883                }
1884        }
1885
1886        /* network device register */
1887        ret = register_netdev(ndev);
1888        if (ret)
1889                goto out_release;
1890
1891        /* mdio bus init */
1892        ret = sh_mdio_init(ndev, pdev->id, pd);
1893        if (ret)
1894                goto out_unregister;
1895
1896        /* print device information */
1897        pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
1898               (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
1899
1900        platform_set_drvdata(pdev, ndev);
1901
1902        return ret;
1903
1904out_unregister:
1905        unregister_netdev(ndev);
1906
1907out_release:
1908        /* net_dev free */
1909        if (mdp && mdp->addr)
1910                iounmap(mdp->addr);
1911        if (mdp && mdp->tsu_addr)
1912                iounmap(mdp->tsu_addr);
1913        if (ndev)
1914                free_netdev(ndev);
1915
1916out:
1917        return ret;
1918}
1919
1920static int sh_eth_drv_remove(struct platform_device *pdev)
1921{
1922        struct net_device *ndev = platform_get_drvdata(pdev);
1923        struct sh_eth_private *mdp = netdev_priv(ndev);
1924
1925        iounmap(mdp->tsu_addr);
1926        sh_mdio_release(ndev);
1927        unregister_netdev(ndev);
1928        pm_runtime_disable(&pdev->dev);
1929        iounmap(mdp->addr);
1930        free_netdev(ndev);
1931        platform_set_drvdata(pdev, NULL);
1932
1933        return 0;
1934}
1935
1936static int sh_eth_runtime_nop(struct device *dev)
1937{
1938        /*
1939         * Runtime PM callback shared between ->runtime_suspend()
1940         * and ->runtime_resume(). Simply returns success.
1941         *
1942         * This driver re-initializes all registers after
1943         * pm_runtime_get_sync() anyway so there is no need
1944         * to save and restore registers here.
1945         */
1946        return 0;
1947}
1948
1949static struct dev_pm_ops sh_eth_dev_pm_ops = {
1950        .runtime_suspend = sh_eth_runtime_nop,
1951        .runtime_resume = sh_eth_runtime_nop,
1952};
1953
1954static struct platform_driver sh_eth_driver = {
1955        .probe = sh_eth_drv_probe,
1956        .remove = sh_eth_drv_remove,
1957        .driver = {
1958                   .name = CARDNAME,
1959                   .pm = &sh_eth_dev_pm_ops,
1960        },
1961};
1962
1963module_platform_driver(sh_eth_driver);
1964
1965MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
1966MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
1967MODULE_LICENSE("GPL v2");
1968