linux/drivers/net/ethernet/ezchip/nps_enet.c
<<
>>
Prefs
   1/*
   2 * Copyright(c) 2015 EZchip Technologies.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms and conditions of the GNU General Public License,
   6 * version 2, as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope it will be useful, but WITHOUT
   9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  11 * more details.
  12 *
  13 * The full GNU General Public License is included in this distribution in
  14 * the file called "COPYING".
  15 */
  16
  17#include <linux/module.h>
  18#include <linux/etherdevice.h>
  19#include <linux/interrupt.h>
  20#include <linux/of_address.h>
  21#include <linux/of_irq.h>
  22#include <linux/of_net.h>
  23#include <linux/of_platform.h>
  24#include "nps_enet.h"
  25
  26#define DRV_NAME                        "nps_mgt_enet"
  27
  28static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv)
  29{
  30        u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
  31        u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
  32
  33        return (!tx_ctrl_ct && priv->tx_skb);
  34}
  35
  36static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len)
  37{
  38        struct nps_enet_priv *priv = netdev_priv(ndev);
  39        u32 i, len = DIV_ROUND_UP(frame_len, sizeof(u32));
  40
  41        /* Empty Rx FIFO buffer by reading all words */
  42        for (i = 0; i < len; i++)
  43                nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
  44}
  45
  46static void nps_enet_read_rx_fifo(struct net_device *ndev,
  47                                  unsigned char *dst, u32 length)
  48{
  49        struct nps_enet_priv *priv = netdev_priv(ndev);
  50        s32 i, last = length & (sizeof(u32) - 1);
  51        u32 *reg = (u32 *)dst, len = length / sizeof(u32);
  52        bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32));
  53
  54        /* In case dst is not aligned we need an intermediate buffer */
  55        if (dst_is_aligned) {
  56                ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len);
  57                reg += len;
  58        } else { /* !dst_is_aligned */
  59                for (i = 0; i < len; i++, reg++) {
  60                        u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
  61
  62                        put_unaligned_be32(buf, reg);
  63                }
  64        }
  65        /* copy last bytes (if any) */
  66        if (last) {
  67                u32 buf;
  68
  69                ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1);
  70                memcpy((u8 *)reg, &buf, last);
  71        }
  72}
  73
  74static u32 nps_enet_rx_handler(struct net_device *ndev)
  75{
  76        u32 frame_len, err = 0;
  77        u32 work_done = 0;
  78        struct nps_enet_priv *priv = netdev_priv(ndev);
  79        struct sk_buff *skb;
  80        u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
  81        u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
  82        u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT;
  83        u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT;
  84
  85        frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT;
  86
  87        /* Check if we got RX */
  88        if (!rx_ctrl_cr)
  89                return work_done;
  90
  91        /* If we got here there is a work for us */
  92        work_done++;
  93
  94        /* Check Rx error */
  95        if (rx_ctrl_er) {
  96                ndev->stats.rx_errors++;
  97                err = 1;
  98        }
  99
 100        /* Check Rx CRC error */
 101        if (rx_ctrl_crc) {
 102                ndev->stats.rx_crc_errors++;
 103                ndev->stats.rx_dropped++;
 104                err = 1;
 105        }
 106
 107        /* Check Frame length Min 64b */
 108        if (unlikely(frame_len < ETH_ZLEN)) {
 109                ndev->stats.rx_length_errors++;
 110                ndev->stats.rx_dropped++;
 111                err = 1;
 112        }
 113
 114        if (err)
 115                goto rx_irq_clean;
 116
 117        /* Skb allocation */
 118        skb = netdev_alloc_skb_ip_align(ndev, frame_len);
 119        if (unlikely(!skb)) {
 120                ndev->stats.rx_errors++;
 121                ndev->stats.rx_dropped++;
 122                goto rx_irq_clean;
 123        }
 124
 125        /* Copy frame from Rx fifo into the skb */
 126        nps_enet_read_rx_fifo(ndev, skb->data, frame_len);
 127
 128        skb_put(skb, frame_len);
 129        skb->protocol = eth_type_trans(skb, ndev);
 130        skb->ip_summed = CHECKSUM_UNNECESSARY;
 131
 132        ndev->stats.rx_packets++;
 133        ndev->stats.rx_bytes += frame_len;
 134        netif_receive_skb(skb);
 135
 136        goto rx_irq_frame_done;
 137
 138rx_irq_clean:
 139        /* Clean Rx fifo */
 140        nps_enet_clean_rx_fifo(ndev, frame_len);
 141
 142rx_irq_frame_done:
 143        /* Ack Rx ctrl register */
 144        nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0);
 145
 146        return work_done;
 147}
 148
 149static void nps_enet_tx_handler(struct net_device *ndev)
 150{
 151        struct nps_enet_priv *priv = netdev_priv(ndev);
 152        u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
 153        u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT;
 154        u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
 155
 156        /* Check if we got TX */
 157        if (!nps_enet_is_tx_pending(priv))
 158                return;
 159
 160        /* Ack Tx ctrl register */
 161        nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0);
 162
 163        /* Check Tx transmit error */
 164        if (unlikely(tx_ctrl_et)) {
 165                ndev->stats.tx_errors++;
 166        } else {
 167                ndev->stats.tx_packets++;
 168                ndev->stats.tx_bytes += tx_ctrl_nt;
 169        }
 170
 171        dev_kfree_skb(priv->tx_skb);
 172        priv->tx_skb = NULL;
 173
 174        if (netif_queue_stopped(ndev))
 175                netif_wake_queue(ndev);
 176}
 177
 178/**
 179 * nps_enet_poll - NAPI poll handler.
 180 * @napi:       Pointer to napi_struct structure.
 181 * @budget:     How many frames to process on one call.
 182 *
 183 * returns:     Number of processed frames
 184 */
 185static int nps_enet_poll(struct napi_struct *napi, int budget)
 186{
 187        struct net_device *ndev = napi->dev;
 188        struct nps_enet_priv *priv = netdev_priv(ndev);
 189        u32 work_done;
 190
 191        nps_enet_tx_handler(ndev);
 192        work_done = nps_enet_rx_handler(ndev);
 193        if ((work_done < budget) && napi_complete_done(napi, work_done)) {
 194                u32 buf_int_enable_value = 0;
 195
 196                /* set tx_done and rx_rdy bits */
 197                buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
 198                buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
 199
 200                nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
 201                                 buf_int_enable_value);
 202
 203                /* in case we will get a tx interrupt while interrupts
 204                 * are masked, we will lose it since the tx is edge interrupt.
 205                 * specifically, while executing the code section above,
 206                 * between nps_enet_tx_handler and the interrupts enable, all
 207                 * tx requests will be stuck until we will get an rx interrupt.
 208                 * the two code lines below will solve this situation by
 209                 * re-adding ourselves to the poll list.
 210                 */
 211                if (nps_enet_is_tx_pending(priv)) {
 212                        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
 213                        napi_reschedule(napi);
 214                }
 215        }
 216
 217        return work_done;
 218}
 219
 220/**
 221 * nps_enet_irq_handler - Global interrupt handler for ENET.
 222 * @irq:                irq number.
 223 * @dev_instance:       device instance.
 224 *
 225 * returns: IRQ_HANDLED for all cases.
 226 *
 227 * EZchip ENET has 2 interrupt causes, and depending on bits raised in
 228 * CTRL registers we may tell what is a reason for interrupt to fire up.
 229 * We got one for RX and the other for TX (completion).
 230 */
 231static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
 232{
 233        struct net_device *ndev = dev_instance;
 234        struct nps_enet_priv *priv = netdev_priv(ndev);
 235        u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
 236        u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
 237
 238        if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr)
 239                if (likely(napi_schedule_prep(&priv->napi))) {
 240                        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
 241                        __napi_schedule(&priv->napi);
 242                }
 243
 244        return IRQ_HANDLED;
 245}
 246
 247static void nps_enet_set_hw_mac_address(struct net_device *ndev)
 248{
 249        struct nps_enet_priv *priv = netdev_priv(ndev);
 250        u32 ge_mac_cfg_1_value = 0;
 251        u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
 252
 253        /* set MAC address in HW */
 254        ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT;
 255        ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT;
 256        ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT;
 257        ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT;
 258        *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK)
 259                 | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT;
 260        *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK)
 261                 | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT;
 262
 263        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1,
 264                         ge_mac_cfg_1_value);
 265
 266        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
 267                         *ge_mac_cfg_2_value);
 268}
 269
 270/**
 271 * nps_enet_hw_reset - Reset the network device.
 272 * @ndev:       Pointer to the network device.
 273 *
 274 * This function reset the PCS and TX fifo.
 275 * The programming model is to set the relevant reset bits
 276 * wait for some time for this to propagate and then unset
 277 * the reset bits. This way we ensure that reset procedure
 278 * is done successfully by device.
 279 */
 280static void nps_enet_hw_reset(struct net_device *ndev)
 281{
 282        struct nps_enet_priv *priv = netdev_priv(ndev);
 283        u32 ge_rst_value = 0, phase_fifo_ctl_value = 0;
 284
 285        /* Pcs reset sequence*/
 286        ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
 287        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
 288        usleep_range(10, 20);
 289        ge_rst_value = 0;
 290        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
 291
 292        /* Tx fifo reset sequence */
 293        phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT;
 294        phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT;
 295        nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
 296                         phase_fifo_ctl_value);
 297        usleep_range(10, 20);
 298        phase_fifo_ctl_value = 0;
 299        nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL,
 300                         phase_fifo_ctl_value);
 301}
 302
 303static void nps_enet_hw_enable_control(struct net_device *ndev)
 304{
 305        struct nps_enet_priv *priv = netdev_priv(ndev);
 306        u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0;
 307        u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value;
 308        u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value;
 309        s32 max_frame_length;
 310
 311        /* Enable Rx and Tx statistics */
 312        *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK)
 313                 | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT;
 314
 315        /* Discard packets with different MAC address */
 316        *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
 317                 | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
 318
 319        /* Discard multicast packets */
 320        *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
 321                 | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
 322
 323        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2,
 324                         *ge_mac_cfg_2_value);
 325
 326        /* Discard Packets bigger than max frame length */
 327        max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN;
 328        if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) {
 329                *ge_mac_cfg_3_value =
 330                         (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK)
 331                         | max_frame_length << CFG_3_MAX_LEN_SHIFT;
 332        }
 333
 334        /* Enable interrupts */
 335        buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
 336        buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT;
 337        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
 338                         buf_int_enable_value);
 339
 340        /* Write device MAC address to HW */
 341        nps_enet_set_hw_mac_address(ndev);
 342
 343        /* Rx and Tx HW features */
 344        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT;
 345        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT;
 346        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT;
 347
 348        /* IFG configuration */
 349        ge_mac_cfg_0_value |=
 350                 NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT;
 351        ge_mac_cfg_0_value |=
 352                 NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT;
 353
 354        /* preamble configuration */
 355        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT;
 356        ge_mac_cfg_0_value |=
 357                 NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT;
 358
 359        /* enable flow control frames */
 360        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT;
 361        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT;
 362        ge_mac_cfg_0_value |=
 363                 NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT;
 364        *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK)
 365                 | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT;
 366
 367        /* Enable Rx and Tx */
 368        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT;
 369        ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT;
 370
 371        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3,
 372                         *ge_mac_cfg_3_value);
 373        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0,
 374                         ge_mac_cfg_0_value);
 375}
 376
 377static void nps_enet_hw_disable_control(struct net_device *ndev)
 378{
 379        struct nps_enet_priv *priv = netdev_priv(ndev);
 380
 381        /* Disable interrupts */
 382        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
 383
 384        /* Disable Rx and Tx */
 385        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 0);
 386}
 387
 388static void nps_enet_send_frame(struct net_device *ndev,
 389                                struct sk_buff *skb)
 390{
 391        struct nps_enet_priv *priv = netdev_priv(ndev);
 392        u32 tx_ctrl_value = 0;
 393        short length = skb->len;
 394        u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
 395        u32 *src = (void *)skb->data;
 396        bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
 397
 398        /* In case src is not aligned we need an intermediate buffer */
 399        if (src_is_aligned)
 400                iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len);
 401        else /* !src_is_aligned */
 402                for (i = 0; i < len; i++, src++)
 403                        nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
 404                                         get_unaligned_be32(src));
 405
 406        /* Write the length of the Frame */
 407        tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
 408
 409        tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
 410        /* Send Frame */
 411        nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
 412}
 413
 414/**
 415 * nps_enet_set_mac_address - Set the MAC address for this device.
 416 * @ndev:       Pointer to net_device structure.
 417 * @p:          6 byte Address to be written as MAC address.
 418 *
 419 * This function copies the HW address from the sockaddr structure to the
 420 * net_device structure and updates the address in HW.
 421 *
 422 * returns:     -EBUSY if the net device is busy or 0 if the address is set
 423 *              successfully.
 424 */
 425static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
 426{
 427        struct sockaddr *addr = p;
 428        s32 res;
 429
 430        if (netif_running(ndev))
 431                return -EBUSY;
 432
 433        res = eth_mac_addr(ndev, p);
 434        if (!res) {
 435                ether_addr_copy(ndev->dev_addr, addr->sa_data);
 436                nps_enet_set_hw_mac_address(ndev);
 437        }
 438
 439        return res;
 440}
 441
 442/**
 443 * nps_enet_set_rx_mode - Change the receive filtering mode.
 444 * @ndev:       Pointer to the network device.
 445 *
 446 * This function enables/disables promiscuous mode
 447 */
 448static void nps_enet_set_rx_mode(struct net_device *ndev)
 449{
 450        struct nps_enet_priv *priv = netdev_priv(ndev);
 451        u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value;
 452
 453        if (ndev->flags & IFF_PROMISC) {
 454                ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
 455                         | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT;
 456                ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
 457                         | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT;
 458
 459        } else {
 460                ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK)
 461                         | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
 462                ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
 463                         | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
 464        }
 465
 466        nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value);
 467}
 468
 469/**
 470 * nps_enet_open - Open the network device.
 471 * @ndev:       Pointer to the network device.
 472 *
 473 * returns: 0, on success or non-zero error value on failure.
 474 *
 475 * This function sets the MAC address, requests and enables an IRQ
 476 * for the ENET device and starts the Tx queue.
 477 */
 478static s32 nps_enet_open(struct net_device *ndev)
 479{
 480        struct nps_enet_priv *priv = netdev_priv(ndev);
 481        s32 err;
 482
 483        /* Reset private variables */
 484        priv->tx_skb = NULL;
 485        priv->ge_mac_cfg_2_value = 0;
 486        priv->ge_mac_cfg_3_value = 0;
 487
 488        /* ge_mac_cfg_3 default values */
 489        priv->ge_mac_cfg_3_value |=
 490                 NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT;
 491
 492        priv->ge_mac_cfg_3_value |=
 493                 NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT;
 494
 495        /* Disable HW device */
 496        nps_enet_hw_disable_control(ndev);
 497
 498        /* irq Rx allocation */
 499        err = request_irq(priv->irq, nps_enet_irq_handler,
 500                          0, "enet-rx-tx", ndev);
 501        if (err)
 502                return err;
 503
 504        napi_enable(&priv->napi);
 505
 506        /* Enable HW device */
 507        nps_enet_hw_reset(ndev);
 508        nps_enet_hw_enable_control(ndev);
 509
 510        netif_start_queue(ndev);
 511
 512        return 0;
 513}
 514
 515/**
 516 * nps_enet_stop - Close the network device.
 517 * @ndev:       Pointer to the network device.
 518 *
 519 * This function stops the Tx queue, disables interrupts for the ENET device.
 520 */
 521static s32 nps_enet_stop(struct net_device *ndev)
 522{
 523        struct nps_enet_priv *priv = netdev_priv(ndev);
 524
 525        napi_disable(&priv->napi);
 526        netif_stop_queue(ndev);
 527        nps_enet_hw_disable_control(ndev);
 528        free_irq(priv->irq, ndev);
 529
 530        return 0;
 531}
 532
 533/**
 534 * nps_enet_start_xmit - Starts the data transmission.
 535 * @skb:        sk_buff pointer that contains data to be Transmitted.
 536 * @ndev:       Pointer to net_device structure.
 537 *
 538 * returns: NETDEV_TX_OK, on success
 539 *              NETDEV_TX_BUSY, if any of the descriptors are not free.
 540 *
 541 * This function is invoked from upper layers to initiate transmission.
 542 */
 543static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
 544                                       struct net_device *ndev)
 545{
 546        struct nps_enet_priv *priv = netdev_priv(ndev);
 547
 548        /* This driver handles one frame at a time  */
 549        netif_stop_queue(ndev);
 550
 551        priv->tx_skb = skb;
 552
 553        /* make sure tx_skb is actually written to the memory
 554         * before the HW is informed and the IRQ is fired.
 555         */
 556        wmb();
 557
 558        nps_enet_send_frame(ndev, skb);
 559
 560        return NETDEV_TX_OK;
 561}
 562
 563#ifdef CONFIG_NET_POLL_CONTROLLER
 564static void nps_enet_poll_controller(struct net_device *ndev)
 565{
 566        disable_irq(ndev->irq);
 567        nps_enet_irq_handler(ndev->irq, ndev);
 568        enable_irq(ndev->irq);
 569}
 570#endif
 571
 572static const struct net_device_ops nps_netdev_ops = {
 573        .ndo_open               = nps_enet_open,
 574        .ndo_stop               = nps_enet_stop,
 575        .ndo_start_xmit         = nps_enet_start_xmit,
 576        .ndo_set_mac_address    = nps_enet_set_mac_address,
 577        .ndo_set_rx_mode        = nps_enet_set_rx_mode,
 578#ifdef CONFIG_NET_POLL_CONTROLLER
 579        .ndo_poll_controller    = nps_enet_poll_controller,
 580#endif
 581};
 582
 583static s32 nps_enet_probe(struct platform_device *pdev)
 584{
 585        struct device *dev = &pdev->dev;
 586        struct net_device *ndev;
 587        struct nps_enet_priv *priv;
 588        s32 err = 0;
 589        const char *mac_addr;
 590        struct resource *res_regs;
 591
 592        if (!dev->of_node)
 593                return -ENODEV;
 594
 595        ndev = alloc_etherdev(sizeof(struct nps_enet_priv));
 596        if (!ndev)
 597                return -ENOMEM;
 598
 599        platform_set_drvdata(pdev, ndev);
 600        SET_NETDEV_DEV(ndev, dev);
 601        priv = netdev_priv(ndev);
 602
 603        /* The EZ NET specific entries in the device structure. */
 604        ndev->netdev_ops = &nps_netdev_ops;
 605        ndev->watchdog_timeo = (400 * HZ / 1000);
 606        /* FIXME :: no multicast support yet */
 607        ndev->flags &= ~IFF_MULTICAST;
 608
 609        res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 610        priv->regs_base = devm_ioremap_resource(dev, res_regs);
 611        if (IS_ERR(priv->regs_base)) {
 612                err = PTR_ERR(priv->regs_base);
 613                goto out_netdev;
 614        }
 615        dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
 616
 617        /* set kernel MAC address to dev */
 618        mac_addr = of_get_mac_address(dev->of_node);
 619        if (mac_addr)
 620                ether_addr_copy(ndev->dev_addr, mac_addr);
 621        else
 622                eth_hw_addr_random(ndev);
 623
 624        /* Get IRQ number */
 625        priv->irq = platform_get_irq(pdev, 0);
 626        if (!priv->irq) {
 627                dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n");
 628                err = -ENODEV;
 629                goto out_netdev;
 630        }
 631
 632        netif_napi_add(ndev, &priv->napi, nps_enet_poll,
 633                       NPS_ENET_NAPI_POLL_WEIGHT);
 634
 635        /* Register the driver. Should be the last thing in probe */
 636        err = register_netdev(ndev);
 637        if (err) {
 638                dev_err(dev, "Failed to register ndev for %s, err = 0x%08x\n",
 639                        ndev->name, (s32)err);
 640                goto out_netif_api;
 641        }
 642
 643        dev_info(dev, "(rx/tx=%d)\n", priv->irq);
 644        return 0;
 645
 646out_netif_api:
 647        netif_napi_del(&priv->napi);
 648out_netdev:
 649        if (err)
 650                free_netdev(ndev);
 651
 652        return err;
 653}
 654
 655static s32 nps_enet_remove(struct platform_device *pdev)
 656{
 657        struct net_device *ndev = platform_get_drvdata(pdev);
 658        struct nps_enet_priv *priv = netdev_priv(ndev);
 659
 660        unregister_netdev(ndev);
 661        free_netdev(ndev);
 662        netif_napi_del(&priv->napi);
 663
 664        return 0;
 665}
 666
 667static const struct of_device_id nps_enet_dt_ids[] = {
 668        { .compatible = "ezchip,nps-mgt-enet" },
 669        { /* Sentinel */ }
 670};
 671MODULE_DEVICE_TABLE(of, nps_enet_dt_ids);
 672
 673static struct platform_driver nps_enet_driver = {
 674        .probe = nps_enet_probe,
 675        .remove = nps_enet_remove,
 676        .driver = {
 677                .name = DRV_NAME,
 678                .of_match_table  = nps_enet_dt_ids,
 679        },
 680};
 681
 682module_platform_driver(nps_enet_driver);
 683
 684MODULE_AUTHOR("EZchip Semiconductor");
 685MODULE_LICENSE("GPL v2");
 686