linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
<<
>>
Prefs
   1/*
   2 * Xilinx Axi Ethernet device driver
   3 *
   4 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   5 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   8 * Copyright (c) 2010 - 2011 PetaLogix
   9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  10 *
  11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  12 * and Spartan6.
  13 *
  14 * TODO:
  15 *  - Add Axi Fifo support.
  16 *  - Factor out Axi DMA code into separate driver.
  17 *  - Test and fix basic multicast filtering.
  18 *  - Add support for extended multicast filtering.
  19 *  - Test basic VLAN support.
  20 *  - Add support for extended VLAN support.
  21 */
  22
  23#include <linux/delay.h>
  24#include <linux/etherdevice.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/netdevice.h>
  28#include <linux/of_mdio.h>
  29#include <linux/of_platform.h>
  30#include <linux/of_address.h>
  31#include <linux/skbuff.h>
  32#include <linux/spinlock.h>
  33#include <linux/phy.h>
  34#include <linux/mii.h>
  35#include <linux/ethtool.h>
  36
  37#include "xilinx_axienet.h"
  38
  39/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
  40#define TX_BD_NUM               64
  41#define RX_BD_NUM               128
  42
  43/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  44#define DRIVER_NAME             "xaxienet"
  45#define DRIVER_DESCRIPTION      "Xilinx Axi Ethernet driver"
  46#define DRIVER_VERSION          "1.00a"
  47
  48#define AXIENET_REGS_N          32
  49
  50/* Match table for of_platform binding */
  51static struct of_device_id axienet_of_match[] __devinitdata = {
  52        { .compatible = "xlnx,axi-ethernet-1.00.a", },
  53        { .compatible = "xlnx,axi-ethernet-1.01.a", },
  54        { .compatible = "xlnx,axi-ethernet-2.01.a", },
  55        {},
  56};
  57
  58MODULE_DEVICE_TABLE(of, axienet_of_match);
  59
  60/* Option table for setting up Axi Ethernet hardware options */
  61static struct axienet_option axienet_options[] = {
  62        /* Turn on jumbo packet support for both Rx and Tx */
  63        {
  64                .opt = XAE_OPTION_JUMBO,
  65                .reg = XAE_TC_OFFSET,
  66                .m_or = XAE_TC_JUM_MASK,
  67        }, {
  68                .opt = XAE_OPTION_JUMBO,
  69                .reg = XAE_RCW1_OFFSET,
  70                .m_or = XAE_RCW1_JUM_MASK,
  71        }, { /* Turn on VLAN packet support for both Rx and Tx */
  72                .opt = XAE_OPTION_VLAN,
  73                .reg = XAE_TC_OFFSET,
  74                .m_or = XAE_TC_VLAN_MASK,
  75        }, {
  76                .opt = XAE_OPTION_VLAN,
  77                .reg = XAE_RCW1_OFFSET,
  78                .m_or = XAE_RCW1_VLAN_MASK,
  79        }, { /* Turn on FCS stripping on receive packets */
  80                .opt = XAE_OPTION_FCS_STRIP,
  81                .reg = XAE_RCW1_OFFSET,
  82                .m_or = XAE_RCW1_FCS_MASK,
  83        }, { /* Turn on FCS insertion on transmit packets */
  84                .opt = XAE_OPTION_FCS_INSERT,
  85                .reg = XAE_TC_OFFSET,
  86                .m_or = XAE_TC_FCS_MASK,
  87        }, { /* Turn off length/type field checking on receive packets */
  88                .opt = XAE_OPTION_LENTYPE_ERR,
  89                .reg = XAE_RCW1_OFFSET,
  90                .m_or = XAE_RCW1_LT_DIS_MASK,
  91        }, { /* Turn on Rx flow control */
  92                .opt = XAE_OPTION_FLOW_CONTROL,
  93                .reg = XAE_FCC_OFFSET,
  94                .m_or = XAE_FCC_FCRX_MASK,
  95        }, { /* Turn on Tx flow control */
  96                .opt = XAE_OPTION_FLOW_CONTROL,
  97                .reg = XAE_FCC_OFFSET,
  98                .m_or = XAE_FCC_FCTX_MASK,
  99        }, { /* Turn on promiscuous frame filtering */
 100                .opt = XAE_OPTION_PROMISC,
 101                .reg = XAE_FMI_OFFSET,
 102                .m_or = XAE_FMI_PM_MASK,
 103        }, { /* Enable transmitter */
 104                .opt = XAE_OPTION_TXEN,
 105                .reg = XAE_TC_OFFSET,
 106                .m_or = XAE_TC_TX_MASK,
 107        }, { /* Enable receiver */
 108                .opt = XAE_OPTION_RXEN,
 109                .reg = XAE_RCW1_OFFSET,
 110                .m_or = XAE_RCW1_RX_MASK,
 111        },
 112        {}
 113};
 114
 115/**
 116 * axienet_dma_in32 - Memory mapped Axi DMA register read
 117 * @lp:         Pointer to axienet local structure
 118 * @reg:        Address offset from the base address of the Axi DMA core
 119 *
 120 * returns: The contents of the Axi DMA register
 121 *
 122 * This function returns the contents of the corresponding Axi DMA register.
 123 */
 124static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 125{
 126        return in_be32(lp->dma_regs + reg);
 127}
 128
 129/**
 130 * axienet_dma_out32 - Memory mapped Axi DMA register write.
 131 * @lp:         Pointer to axienet local structure
 132 * @reg:        Address offset from the base address of the Axi DMA core
 133 * @value:      Value to be written into the Axi DMA register
 134 *
 135 * This function writes the desired value into the corresponding Axi DMA
 136 * register.
 137 */
 138static inline void axienet_dma_out32(struct axienet_local *lp,
 139                                     off_t reg, u32 value)
 140{
 141        out_be32((lp->dma_regs + reg), value);
 142}
 143
 144/**
 145 * axienet_dma_bd_release - Release buffer descriptor rings
 146 * @ndev:       Pointer to the net_device structure
 147 *
 148 * This function is used to release the descriptors allocated in
 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 150 * driver stop api is called.
 151 */
 152static void axienet_dma_bd_release(struct net_device *ndev)
 153{
 154        int i;
 155        struct axienet_local *lp = netdev_priv(ndev);
 156
 157        for (i = 0; i < RX_BD_NUM; i++) {
 158                dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
 159                                 lp->max_frm_size, DMA_FROM_DEVICE);
 160                dev_kfree_skb((struct sk_buff *)
 161                              (lp->rx_bd_v[i].sw_id_offset));
 162        }
 163
 164        if (lp->rx_bd_v) {
 165                dma_free_coherent(ndev->dev.parent,
 166                                  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 167                                  lp->rx_bd_v,
 168                                  lp->rx_bd_p);
 169        }
 170        if (lp->tx_bd_v) {
 171                dma_free_coherent(ndev->dev.parent,
 172                                  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 173                                  lp->tx_bd_v,
 174                                  lp->tx_bd_p);
 175        }
 176}
 177
 178/**
 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 180 * @ndev:       Pointer to the net_device structure
 181 *
 182 * returns: 0, on success
 183 *          -ENOMEM, on failure
 184 *
 185 * This function is called to initialize the Rx and Tx DMA descriptor
 186 * rings. This initializes the descriptors with required default values
 187 * and is called when Axi Ethernet driver reset is called.
 188 */
 189static int axienet_dma_bd_init(struct net_device *ndev)
 190{
 191        u32 cr;
 192        int i;
 193        struct sk_buff *skb;
 194        struct axienet_local *lp = netdev_priv(ndev);
 195
 196        /* Reset the indexes which are used for accessing the BDs */
 197        lp->tx_bd_ci = 0;
 198        lp->tx_bd_tail = 0;
 199        lp->rx_bd_ci = 0;
 200
 201        /*
 202         * Allocate the Tx and Rx buffer descriptors.
 203         */
 204        lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 205                                         sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 206                                         &lp->tx_bd_p,
 207                                         GFP_KERNEL);
 208        if (!lp->tx_bd_v) {
 209                dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
 210                        "descriptors");
 211                goto out;
 212        }
 213
 214        lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 215                                         sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 216                                         &lp->rx_bd_p,
 217                                         GFP_KERNEL);
 218        if (!lp->rx_bd_v) {
 219                dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
 220                        "descriptors");
 221                goto out;
 222        }
 223
 224        memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
 225        for (i = 0; i < TX_BD_NUM; i++) {
 226                lp->tx_bd_v[i].next = lp->tx_bd_p +
 227                                      sizeof(*lp->tx_bd_v) *
 228                                      ((i + 1) % TX_BD_NUM);
 229        }
 230
 231        memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
 232        for (i = 0; i < RX_BD_NUM; i++) {
 233                lp->rx_bd_v[i].next = lp->rx_bd_p +
 234                                      sizeof(*lp->rx_bd_v) *
 235                                      ((i + 1) % RX_BD_NUM);
 236
 237                skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 238                if (!skb) {
 239                        dev_err(&ndev->dev, "alloc_skb error %d\n", i);
 240                        goto out;
 241                }
 242
 243                lp->rx_bd_v[i].sw_id_offset = (u32) skb;
 244                lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
 245                                                     skb->data,
 246                                                     lp->max_frm_size,
 247                                                     DMA_FROM_DEVICE);
 248                lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 249        }
 250
 251        /* Start updating the Rx channel control register */
 252        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 253        /* Update the interrupt coalesce count */
 254        cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
 255              ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
 256        /* Update the delay timer count */
 257        cr = ((cr & ~XAXIDMA_DELAY_MASK) |
 258              (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 259        /* Enable coalesce, delay timer and error interrupts */
 260        cr |= XAXIDMA_IRQ_ALL_MASK;
 261        /* Write to the Rx channel control register */
 262        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 263
 264        /* Start updating the Tx channel control register */
 265        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 266        /* Update the interrupt coalesce count */
 267        cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
 268              ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
 269        /* Update the delay timer count */
 270        cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
 271              (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 272        /* Enable coalesce, delay timer and error interrupts */
 273        cr |= XAXIDMA_IRQ_ALL_MASK;
 274        /* Write to the Tx channel control register */
 275        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 276
 277        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
 278         * halted state. This will make the Rx side ready for reception.*/
 279        axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 280        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 281        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 282                          cr | XAXIDMA_CR_RUNSTOP_MASK);
 283        axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 284                          (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
 285
 286        /* Write to the RS (Run-stop) bit in the Tx channel control register.
 287         * Tx channel is now ready to run. But only after we write to the
 288         * tail pointer register that the Tx channel will start transmitting */
 289        axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 290        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 291        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 292                          cr | XAXIDMA_CR_RUNSTOP_MASK);
 293
 294        return 0;
 295out:
 296        axienet_dma_bd_release(ndev);
 297        return -ENOMEM;
 298}
 299
 300/**
 301 * axienet_set_mac_address - Write the MAC address
 302 * @ndev:       Pointer to the net_device structure
 303 * @address:    6 byte Address to be written as MAC address
 304 *
 305 * This function is called to initialize the MAC address of the Axi Ethernet
 306 * core. It writes to the UAW0 and UAW1 registers of the core.
 307 */
 308static void axienet_set_mac_address(struct net_device *ndev, void *address)
 309{
 310        struct axienet_local *lp = netdev_priv(ndev);
 311
 312        if (address)
 313                memcpy(ndev->dev_addr, address, ETH_ALEN);
 314        if (!is_valid_ether_addr(ndev->dev_addr))
 315                random_ether_addr(ndev->dev_addr);
 316
 317        /* Set up unicast MAC address filter set its mac address */
 318        axienet_iow(lp, XAE_UAW0_OFFSET,
 319                    (ndev->dev_addr[0]) |
 320                    (ndev->dev_addr[1] << 8) |
 321                    (ndev->dev_addr[2] << 16) |
 322                    (ndev->dev_addr[3] << 24));
 323        axienet_iow(lp, XAE_UAW1_OFFSET,
 324                    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 325                      ~XAE_UAW1_UNICASTADDR_MASK) |
 326                     (ndev->dev_addr[4] |
 327                     (ndev->dev_addr[5] << 8))));
 328}
 329
 330/**
 331 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 332 * @ndev:       Pointer to the net_device structure
 333 * @p:          6 byte Address to be written as MAC address
 334 *
 335 * returns: 0 for all conditions. Presently, there is no failure case.
 336 *
 337 * This function is called to initialize the MAC address of the Axi Ethernet
 338 * core. It calls the core specific axienet_set_mac_address. This is the
 339 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 340 */
 341static int netdev_set_mac_address(struct net_device *ndev, void *p)
 342{
 343        struct sockaddr *addr = p;
 344        axienet_set_mac_address(ndev, addr->sa_data);
 345        return 0;
 346}
 347
 348/**
 349 * axienet_set_multicast_list - Prepare the multicast table
 350 * @ndev:       Pointer to the net_device structure
 351 *
 352 * This function is called to initialize the multicast table during
 353 * initialization. The Axi Ethernet basic multicast support has a four-entry
 354 * multicast table which is initialized here. Additionally this function
 355 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 356 * means whenever the multicast table entries need to be updated this
 357 * function gets called.
 358 */
 359static void axienet_set_multicast_list(struct net_device *ndev)
 360{
 361        int i;
 362        u32 reg, af0reg, af1reg;
 363        struct axienet_local *lp = netdev_priv(ndev);
 364
 365        if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 366            netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 367                /* We must make the kernel realize we had to move into
 368                 * promiscuous mode. If it was a promiscuous mode request
 369                 * the flag is already set. If not we set it. */
 370                ndev->flags |= IFF_PROMISC;
 371                reg = axienet_ior(lp, XAE_FMI_OFFSET);
 372                reg |= XAE_FMI_PM_MASK;
 373                axienet_iow(lp, XAE_FMI_OFFSET, reg);
 374                dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 375        } else if (!netdev_mc_empty(ndev)) {
 376                struct netdev_hw_addr *ha;
 377
 378                i = 0;
 379                netdev_for_each_mc_addr(ha, ndev) {
 380                        if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 381                                break;
 382
 383                        af0reg = (ha->addr[0]);
 384                        af0reg |= (ha->addr[1] << 8);
 385                        af0reg |= (ha->addr[2] << 16);
 386                        af0reg |= (ha->addr[3] << 24);
 387
 388                        af1reg = (ha->addr[4]);
 389                        af1reg |= (ha->addr[5] << 8);
 390
 391                        reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 392                        reg |= i;
 393
 394                        axienet_iow(lp, XAE_FMI_OFFSET, reg);
 395                        axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 396                        axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 397                        i++;
 398                }
 399        } else {
 400                reg = axienet_ior(lp, XAE_FMI_OFFSET);
 401                reg &= ~XAE_FMI_PM_MASK;
 402
 403                axienet_iow(lp, XAE_FMI_OFFSET, reg);
 404
 405                for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 406                        reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 407                        reg |= i;
 408
 409                        axienet_iow(lp, XAE_FMI_OFFSET, reg);
 410                        axienet_iow(lp, XAE_AF0_OFFSET, 0);
 411                        axienet_iow(lp, XAE_AF1_OFFSET, 0);
 412                }
 413
 414                dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 415        }
 416}
 417
 418/**
 419 * axienet_setoptions - Set an Axi Ethernet option
 420 * @ndev:       Pointer to the net_device structure
 421 * @options:    Option to be enabled/disabled
 422 *
 423 * The Axi Ethernet core has multiple features which can be selectively turned
 424 * on or off. The typical options could be jumbo frame option, basic VLAN
 425 * option, promiscuous mode option etc. This function is used to set or clear
 426 * these options in the Axi Ethernet hardware. This is done through
 427 * axienet_option structure .
 428 */
 429static void axienet_setoptions(struct net_device *ndev, u32 options)
 430{
 431        int reg;
 432        struct axienet_local *lp = netdev_priv(ndev);
 433        struct axienet_option *tp = &axienet_options[0];
 434
 435        while (tp->opt) {
 436                reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 437                if (options & tp->opt)
 438                        reg |= tp->m_or;
 439                axienet_iow(lp, tp->reg, reg);
 440                tp++;
 441        }
 442
 443        lp->options |= options;
 444}
 445
 446static void __axienet_device_reset(struct axienet_local *lp,
 447                                   struct device *dev, off_t offset)
 448{
 449        u32 timeout;
 450        /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 451         * process of Axi DMA takes a while to complete as all pending
 452         * commands/transfers will be flushed or completed during this
 453         * reset process. */
 454        axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
 455        timeout = DELAY_OF_ONE_MILLISEC;
 456        while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
 457                udelay(1);
 458                if (--timeout == 0) {
 459                        dev_err(dev, "axienet_device_reset DMA "
 460                                "reset timeout!\n");
 461                        break;
 462                }
 463        }
 464}
 465
 466/**
 467 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 468 * @ndev:       Pointer to the net_device structure
 469 *
 470 * This function is called to reset and initialize the Axi Ethernet core. This
 471 * is typically called during initialization. It does a reset of the Axi DMA
 472 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 473 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
 474 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 475 * core.
 476 */
 477static void axienet_device_reset(struct net_device *ndev)
 478{
 479        u32 axienet_status;
 480        struct axienet_local *lp = netdev_priv(ndev);
 481
 482        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
 483        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
 484
 485        lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 486        lp->options &= (~XAE_OPTION_JUMBO);
 487
 488        if ((ndev->mtu > XAE_MTU) &&
 489            (ndev->mtu <= XAE_JUMBO_MTU) &&
 490            (lp->jumbo_support)) {
 491                lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
 492                                   XAE_TRL_SIZE;
 493                lp->options |= XAE_OPTION_JUMBO;
 494        }
 495
 496        if (axienet_dma_bd_init(ndev)) {
 497                dev_err(&ndev->dev, "axienet_device_reset descriptor "
 498                        "allocation failed\n");
 499        }
 500
 501        axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 502        axienet_status &= ~XAE_RCW1_RX_MASK;
 503        axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 504
 505        axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 506        if (axienet_status & XAE_INT_RXRJECT_MASK)
 507                axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 508
 509        axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 510
 511        /* Sync default options with HW but leave receiver and
 512         * transmitter disabled.*/
 513        axienet_setoptions(ndev, lp->options &
 514                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 515        axienet_set_mac_address(ndev, NULL);
 516        axienet_set_multicast_list(ndev);
 517        axienet_setoptions(ndev, lp->options);
 518
 519        ndev->trans_start = jiffies;
 520}
 521
 522/**
 523 * axienet_adjust_link - Adjust the PHY link speed/duplex.
 524 * @ndev:       Pointer to the net_device structure
 525 *
 526 * This function is called to change the speed and duplex setting after
 527 * auto negotiation is done by the PHY. This is the function that gets
 528 * registered with the PHY interface through the "of_phy_connect" call.
 529 */
 530static void axienet_adjust_link(struct net_device *ndev)
 531{
 532        u32 emmc_reg;
 533        u32 link_state;
 534        u32 setspeed = 1;
 535        struct axienet_local *lp = netdev_priv(ndev);
 536        struct phy_device *phy = lp->phy_dev;
 537
 538        link_state = phy->speed | (phy->duplex << 1) | phy->link;
 539        if (lp->last_link != link_state) {
 540                if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
 541                        if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
 542                                setspeed = 0;
 543                } else {
 544                        if ((phy->speed == SPEED_1000) &&
 545                            (lp->phy_type == XAE_PHY_TYPE_MII))
 546                                setspeed = 0;
 547                }
 548
 549                if (setspeed == 1) {
 550                        emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
 551                        emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
 552
 553                        switch (phy->speed) {
 554                        case SPEED_1000:
 555                                emmc_reg |= XAE_EMMC_LINKSPD_1000;
 556                                break;
 557                        case SPEED_100:
 558                                emmc_reg |= XAE_EMMC_LINKSPD_100;
 559                                break;
 560                        case SPEED_10:
 561                                emmc_reg |= XAE_EMMC_LINKSPD_10;
 562                                break;
 563                        default:
 564                                dev_err(&ndev->dev, "Speed other than 10, 100 "
 565                                        "or 1Gbps is not supported\n");
 566                                break;
 567                        }
 568
 569                        axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
 570                        lp->last_link = link_state;
 571                        phy_print_status(phy);
 572                } else {
 573                        dev_err(&ndev->dev, "Error setting Axi Ethernet "
 574                                "mac speed\n");
 575                }
 576        }
 577}
 578
 579/**
 580 * axienet_start_xmit_done - Invoked once a transmit is completed by the
 581 * Axi DMA Tx channel.
 582 * @ndev:       Pointer to the net_device structure
 583 *
 584 * This function is invoked from the Axi DMA Tx isr to notify the completion
 585 * of transmit operation. It clears fields in the corresponding Tx BDs and
 586 * unmaps the corresponding buffer so that CPU can regain ownership of the
 587 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 588 * required.
 589 */
 590static void axienet_start_xmit_done(struct net_device *ndev)
 591{
 592        u32 size = 0;
 593        u32 packets = 0;
 594        struct axienet_local *lp = netdev_priv(ndev);
 595        struct axidma_bd *cur_p;
 596        unsigned int status = 0;
 597
 598        cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 599        status = cur_p->status;
 600        while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
 601                dma_unmap_single(ndev->dev.parent, cur_p->phys,
 602                                (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 603                                DMA_TO_DEVICE);
 604                if (cur_p->app4)
 605                        dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
 606                /*cur_p->phys = 0;*/
 607                cur_p->app0 = 0;
 608                cur_p->app1 = 0;
 609                cur_p->app2 = 0;
 610                cur_p->app4 = 0;
 611                cur_p->status = 0;
 612
 613                size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 614                packets++;
 615
 616                lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
 617                cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 618                status = cur_p->status;
 619        }
 620
 621        ndev->stats.tx_packets += packets;
 622        ndev->stats.tx_bytes += size;
 623        netif_wake_queue(ndev);
 624}
 625
 626/**
 627 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 628 * @lp:         Pointer to the axienet_local structure
 629 * @num_frag:   The number of BDs to check for
 630 *
 631 * returns: 0, on success
 632 *          NETDEV_TX_BUSY, if any of the descriptors are not free
 633 *
 634 * This function is invoked before BDs are allocated and transmission starts.
 635 * This function returns 0 if a BD or group of BDs can be allocated for
 636 * transmission. If the BD or any of the BDs are not free the function
 637 * returns a busy status. This is invoked from axienet_start_xmit.
 638 */
 639static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 640                                            int num_frag)
 641{
 642        struct axidma_bd *cur_p;
 643        cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
 644        if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
 645                return NETDEV_TX_BUSY;
 646        return 0;
 647}
 648
 649/**
 650 * axienet_start_xmit - Starts the transmission.
 651 * @skb:        sk_buff pointer that contains data to be Txed.
 652 * @ndev:       Pointer to net_device structure.
 653 *
 654 * returns: NETDEV_TX_OK, on success
 655 *          NETDEV_TX_BUSY, if any of the descriptors are not free
 656 *
 657 * This function is invoked from upper layers to initiate transmission. The
 658 * function uses the next available free BDs and populates their fields to
 659 * start the transmission. Additionally if checksum offloading is supported,
 660 * it populates AXI Stream Control fields with appropriate values.
 661 */
 662static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 663{
 664        u32 ii;
 665        u32 num_frag;
 666        u32 csum_start_off;
 667        u32 csum_index_off;
 668        skb_frag_t *frag;
 669        dma_addr_t tail_p;
 670        struct axienet_local *lp = netdev_priv(ndev);
 671        struct axidma_bd *cur_p;
 672
 673        num_frag = skb_shinfo(skb)->nr_frags;
 674        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 675
 676        if (axienet_check_tx_bd_space(lp, num_frag)) {
 677                if (!netif_queue_stopped(ndev))
 678                        netif_stop_queue(ndev);
 679                return NETDEV_TX_BUSY;
 680        }
 681
 682        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 683                if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 684                        /* Tx Full Checksum Offload Enabled */
 685                        cur_p->app0 |= 2;
 686                } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 687                        csum_start_off = skb_transport_offset(skb);
 688                        csum_index_off = csum_start_off + skb->csum_offset;
 689                        /* Tx Partial Checksum Offload Enabled */
 690                        cur_p->app0 |= 1;
 691                        cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 692                }
 693        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 694                cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 695        }
 696
 697        cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 698        cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
 699                                     skb_headlen(skb), DMA_TO_DEVICE);
 700
 701        for (ii = 0; ii < num_frag; ii++) {
 702                lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
 703                cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 704                frag = &skb_shinfo(skb)->frags[ii];
 705                cur_p->phys = dma_map_single(ndev->dev.parent,
 706                                             skb_frag_address(frag),
 707                                             skb_frag_size(frag),
 708                                             DMA_TO_DEVICE);
 709                cur_p->cntrl = skb_frag_size(frag);
 710        }
 711
 712        cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
 713        cur_p->app4 = (unsigned long)skb;
 714
 715        tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 716        /* Start the transfer */
 717        axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
 718        lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
 719
 720        return NETDEV_TX_OK;
 721}
 722
 723/**
 724 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
 725 *                BD processing.
 726 * @ndev:       Pointer to net_device structure.
 727 *
 728 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
 729 * does minimal processing and invokes "netif_rx" to complete further
 730 * processing.
 731 */
 732static void axienet_recv(struct net_device *ndev)
 733{
 734        u32 length;
 735        u32 csumstatus;
 736        u32 size = 0;
 737        u32 packets = 0;
 738        dma_addr_t tail_p;
 739        struct axienet_local *lp = netdev_priv(ndev);
 740        struct sk_buff *skb, *new_skb;
 741        struct axidma_bd *cur_p;
 742
 743        tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 744        cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 745
 746        while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 747                skb = (struct sk_buff *) (cur_p->sw_id_offset);
 748                length = cur_p->app4 & 0x0000FFFF;
 749
 750                dma_unmap_single(ndev->dev.parent, cur_p->phys,
 751                                 lp->max_frm_size,
 752                                 DMA_FROM_DEVICE);
 753
 754                skb_put(skb, length);
 755                skb->protocol = eth_type_trans(skb, ndev);
 756                /*skb_checksum_none_assert(skb);*/
 757                skb->ip_summed = CHECKSUM_NONE;
 758
 759                /* if we're doing Rx csum offload, set it up */
 760                if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
 761                        csumstatus = (cur_p->app2 &
 762                                      XAE_FULL_CSUM_STATUS_MASK) >> 3;
 763                        if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
 764                            (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
 765                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 766                        }
 767                } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
 768                           skb->protocol == __constant_htons(ETH_P_IP) &&
 769                           skb->len > 64) {
 770                        skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
 771                        skb->ip_summed = CHECKSUM_COMPLETE;
 772                }
 773
 774                netif_rx(skb);
 775
 776                size += length;
 777                packets++;
 778
 779                new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 780                if (!new_skb) {
 781                        dev_err(&ndev->dev, "no memory for new sk_buff\n");
 782                        return;
 783                }
 784                cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
 785                                             lp->max_frm_size,
 786                                             DMA_FROM_DEVICE);
 787                cur_p->cntrl = lp->max_frm_size;
 788                cur_p->status = 0;
 789                cur_p->sw_id_offset = (u32) new_skb;
 790
 791                lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
 792                cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 793        }
 794
 795        ndev->stats.rx_packets += packets;
 796        ndev->stats.rx_bytes += size;
 797
 798        axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 799}
 800
 801/**
 802 * axienet_tx_irq - Tx Done Isr.
 803 * @irq:        irq number
 804 * @_ndev:      net_device pointer
 805 *
 806 * returns: IRQ_HANDLED for all cases.
 807 *
 808 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
 809 * to complete the BD processing.
 810 */
 811static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
 812{
 813        u32 cr;
 814        unsigned int status;
 815        struct net_device *ndev = _ndev;
 816        struct axienet_local *lp = netdev_priv(ndev);
 817
 818        status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 819        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 820                axienet_start_xmit_done(lp->ndev);
 821                goto out;
 822        }
 823        if (!(status & XAXIDMA_IRQ_ALL_MASK))
 824                dev_err(&ndev->dev, "No interrupts asserted in Tx path");
 825        if (status & XAXIDMA_IRQ_ERROR_MASK) {
 826                dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
 827                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 828                        (lp->tx_bd_v[lp->tx_bd_ci]).phys);
 829
 830                cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 831                /* Disable coalesce, delay timer and error interrupts */
 832                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 833                /* Write to the Tx channel control register */
 834                axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 835
 836                cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 837                /* Disable coalesce, delay timer and error interrupts */
 838                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 839                /* Write to the Rx channel control register */
 840                axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 841
 842                tasklet_schedule(&lp->dma_err_tasklet);
 843        }
 844out:
 845        axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 846        return IRQ_HANDLED;
 847}
 848
 849/**
 850 * axienet_rx_irq - Rx Isr.
 851 * @irq:        irq number
 852 * @_ndev:      net_device pointer
 853 *
 854 * returns: IRQ_HANDLED for all cases.
 855 *
 856 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
 857 * processing.
 858 */
 859static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
 860{
 861        u32 cr;
 862        unsigned int status;
 863        struct net_device *ndev = _ndev;
 864        struct axienet_local *lp = netdev_priv(ndev);
 865
 866        status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 867        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 868                axienet_recv(lp->ndev);
 869                goto out;
 870        }
 871        if (!(status & XAXIDMA_IRQ_ALL_MASK))
 872                dev_err(&ndev->dev, "No interrupts asserted in Rx path");
 873        if (status & XAXIDMA_IRQ_ERROR_MASK) {
 874                dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
 875                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 876                        (lp->rx_bd_v[lp->rx_bd_ci]).phys);
 877
 878                cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 879                /* Disable coalesce, delay timer and error interrupts */
 880                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 881                /* Finally write to the Tx channel control register */
 882                axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 883
 884                cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 885                /* Disable coalesce, delay timer and error interrupts */
 886                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 887                /* write to the Rx channel control register */
 888                axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 889
 890                tasklet_schedule(&lp->dma_err_tasklet);
 891        }
 892out:
 893        axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 894        return IRQ_HANDLED;
 895}
 896
 897/**
 898 * axienet_open - Driver open routine.
 899 * @ndev:       Pointer to net_device structure
 900 *
 901 * returns: 0, on success.
 902 *          -ENODEV, if PHY cannot be connected to
 903 *          non-zero error value on failure
 904 *
 905 * This is the driver open routine. It calls phy_start to start the PHY device.
 906 * It also allocates interrupt service routines, enables the interrupt lines
 907 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
 908 * descriptors are initialized.
 909 */
 910static int axienet_open(struct net_device *ndev)
 911{
 912        int ret, mdio_mcreg;
 913        struct axienet_local *lp = netdev_priv(ndev);
 914
 915        dev_dbg(&ndev->dev, "axienet_open()\n");
 916
 917        mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
 918        ret = axienet_mdio_wait_until_ready(lp);
 919        if (ret < 0)
 920                return ret;
 921        /* Disable the MDIO interface till Axi Ethernet Reset is completed.
 922         * When we do an Axi Ethernet reset, it resets the complete core
 923         * including the MDIO. If MDIO is not disabled when the reset
 924         * process is started, MDIO will be broken afterwards. */
 925        axienet_iow(lp, XAE_MDIO_MC_OFFSET,
 926                    (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
 927        axienet_device_reset(ndev);
 928        /* Enable the MDIO */
 929        axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
 930        ret = axienet_mdio_wait_until_ready(lp);
 931        if (ret < 0)
 932                return ret;
 933
 934        if (lp->phy_node) {
 935                lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
 936                                             axienet_adjust_link, 0,
 937                                             PHY_INTERFACE_MODE_GMII);
 938                if (!lp->phy_dev) {
 939                        dev_err(lp->dev, "of_phy_connect() failed\n");
 940                        return -ENODEV;
 941                }
 942                phy_start(lp->phy_dev);
 943        }
 944
 945        /* Enable interrupts for Axi DMA Tx */
 946        ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
 947        if (ret)
 948                goto err_tx_irq;
 949        /* Enable interrupts for Axi DMA Rx */
 950        ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
 951        if (ret)
 952                goto err_rx_irq;
 953        /* Enable tasklets for Axi DMA error handling */
 954        tasklet_enable(&lp->dma_err_tasklet);
 955        return 0;
 956
 957err_rx_irq:
 958        free_irq(lp->tx_irq, ndev);
 959err_tx_irq:
 960        if (lp->phy_dev)
 961                phy_disconnect(lp->phy_dev);
 962        lp->phy_dev = NULL;
 963        dev_err(lp->dev, "request_irq() failed\n");
 964        return ret;
 965}
 966
 967/**
 968 * axienet_stop - Driver stop routine.
 969 * @ndev:       Pointer to net_device structure
 970 *
 971 * returns: 0, on success.
 972 *
 973 * This is the driver stop routine. It calls phy_disconnect to stop the PHY
 974 * device. It also removes the interrupt handlers and disables the interrupts.
 975 * The Axi DMA Tx/Rx BDs are released.
 976 */
 977static int axienet_stop(struct net_device *ndev)
 978{
 979        u32 cr;
 980        struct axienet_local *lp = netdev_priv(ndev);
 981
 982        dev_dbg(&ndev->dev, "axienet_close()\n");
 983
 984        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 985        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 986                          cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 987        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 988        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 989                          cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 990        axienet_setoptions(ndev, lp->options &
 991                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 992
 993        tasklet_disable(&lp->dma_err_tasklet);
 994
 995        free_irq(lp->tx_irq, ndev);
 996        free_irq(lp->rx_irq, ndev);
 997
 998        if (lp->phy_dev)
 999                phy_disconnect(lp->phy_dev);
1000        lp->phy_dev = NULL;
1001
1002        axienet_dma_bd_release(ndev);
1003        return 0;
1004}
1005
1006/**
1007 * axienet_change_mtu - Driver change mtu routine.
1008 * @ndev:       Pointer to net_device structure
1009 * @new_mtu:    New mtu value to be applied
1010 *
1011 * returns: Always returns 0 (success).
1012 *
1013 * This is the change mtu driver routine. It checks if the Axi Ethernet
1014 * hardware supports jumbo frames before changing the mtu. This can be
1015 * called only when the device is not up.
1016 */
1017static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1018{
1019        struct axienet_local *lp = netdev_priv(ndev);
1020
1021        if (netif_running(ndev))
1022                return -EBUSY;
1023        if (lp->jumbo_support) {
1024                if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1025                        return -EINVAL;
1026                ndev->mtu = new_mtu;
1027        } else {
1028                if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1029                        return -EINVAL;
1030                ndev->mtu = new_mtu;
1031        }
1032
1033        return 0;
1034}
1035
1036#ifdef CONFIG_NET_POLL_CONTROLLER
1037/**
1038 * axienet_poll_controller - Axi Ethernet poll mechanism.
1039 * @ndev:       Pointer to net_device structure
1040 *
1041 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1042 * to polling the ISRs and are enabled back after the polling is done.
1043 */
1044static void axienet_poll_controller(struct net_device *ndev)
1045{
1046        struct axienet_local *lp = netdev_priv(ndev);
1047        disable_irq(lp->tx_irq);
1048        disable_irq(lp->rx_irq);
1049        axienet_rx_irq(lp->tx_irq, ndev);
1050        axienet_tx_irq(lp->rx_irq, ndev);
1051        enable_irq(lp->tx_irq);
1052        enable_irq(lp->rx_irq);
1053}
1054#endif
1055
1056static const struct net_device_ops axienet_netdev_ops = {
1057        .ndo_open = axienet_open,
1058        .ndo_stop = axienet_stop,
1059        .ndo_start_xmit = axienet_start_xmit,
1060        .ndo_change_mtu = axienet_change_mtu,
1061        .ndo_set_mac_address = netdev_set_mac_address,
1062        .ndo_validate_addr = eth_validate_addr,
1063        .ndo_set_rx_mode = axienet_set_multicast_list,
1064#ifdef CONFIG_NET_POLL_CONTROLLER
1065        .ndo_poll_controller = axienet_poll_controller,
1066#endif
1067};
1068
1069/**
1070 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
1071 * @ndev:       Pointer to net_device structure
1072 * @ecmd:       Pointer to ethtool_cmd structure
1073 *
1074 * This implements ethtool command for getting PHY settings. If PHY could
1075 * not be found, the function returns -ENODEV. This function calls the
1076 * relevant PHY ethtool API to get the PHY settings.
1077 * Issue "ethtool ethX" under linux prompt to execute this function.
1078 */
1079static int axienet_ethtools_get_settings(struct net_device *ndev,
1080                                         struct ethtool_cmd *ecmd)
1081{
1082        struct axienet_local *lp = netdev_priv(ndev);
1083        struct phy_device *phydev = lp->phy_dev;
1084        if (!phydev)
1085                return -ENODEV;
1086        return phy_ethtool_gset(phydev, ecmd);
1087}
1088
1089/**
1090 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
1091 * @ndev:       Pointer to net_device structure
1092 * @ecmd:       Pointer to ethtool_cmd structure
1093 *
1094 * This implements ethtool command for setting various PHY settings. If PHY
1095 * could not be found, the function returns -ENODEV. This function calls the
1096 * relevant PHY ethtool API to set the PHY.
1097 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
1098 * function.
1099 */
1100static int axienet_ethtools_set_settings(struct net_device *ndev,
1101                                         struct ethtool_cmd *ecmd)
1102{
1103        struct axienet_local *lp = netdev_priv(ndev);
1104        struct phy_device *phydev = lp->phy_dev;
1105        if (!phydev)
1106                return -ENODEV;
1107        return phy_ethtool_sset(phydev, ecmd);
1108}
1109
1110/**
1111 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1112 * @ndev:       Pointer to net_device structure
1113 * @ed:         Pointer to ethtool_drvinfo structure
1114 *
1115 * This implements ethtool command for getting the driver information.
1116 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1117 */
1118static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1119                                         struct ethtool_drvinfo *ed)
1120{
1121        memset(ed, 0, sizeof(struct ethtool_drvinfo));
1122        strcpy(ed->driver, DRIVER_NAME);
1123        strcpy(ed->version, DRIVER_VERSION);
1124        ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1125}
1126
1127/**
1128 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1129 *                                 AxiEthernet core.
1130 * @ndev:       Pointer to net_device structure
1131 *
1132 * This implements ethtool command for getting the total register length
1133 * information.
1134 */
1135static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1136{
1137        return sizeof(u32) * AXIENET_REGS_N;
1138}
1139
1140/**
1141 * axienet_ethtools_get_regs - Dump the contents of all registers present
1142 *                             in AxiEthernet core.
1143 * @ndev:       Pointer to net_device structure
1144 * @regs:       Pointer to ethtool_regs structure
1145 * @ret:        Void pointer used to return the contents of the registers.
1146 *
1147 * This implements ethtool command for getting the Axi Ethernet register dump.
1148 * Issue "ethtool -d ethX" to execute this function.
1149 */
1150static void axienet_ethtools_get_regs(struct net_device *ndev,
1151                                      struct ethtool_regs *regs, void *ret)
1152{
1153        u32 *data = (u32 *) ret;
1154        size_t len = sizeof(u32) * AXIENET_REGS_N;
1155        struct axienet_local *lp = netdev_priv(ndev);
1156
1157        regs->version = 0;
1158        regs->len = len;
1159
1160        memset(data, 0, len);
1161        data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1162        data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1163        data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1164        data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1165        data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1166        data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1167        data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1168        data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1169        data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1170        data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1171        data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1172        data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1173        data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1174        data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1175        data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1176        data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1177        data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1178        data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1179        data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1180        data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1181        data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1182        data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1183        data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1184        data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1185        data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1186        data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1187        data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1188        data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1189        data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1190        data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1191        data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1192        data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1193}
1194
1195/**
1196 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1197 *                                   Tx and Rx paths.
1198 * @ndev:       Pointer to net_device structure
1199 * @epauseparm: Pointer to ethtool_pauseparam structure.
1200 *
1201 * This implements ethtool command for getting axi ethernet pause frame
1202 * setting. Issue "ethtool -a ethX" to execute this function.
1203 */
1204static void
1205axienet_ethtools_get_pauseparam(struct net_device *ndev,
1206                                struct ethtool_pauseparam *epauseparm)
1207{
1208        u32 regval;
1209        struct axienet_local *lp = netdev_priv(ndev);
1210        epauseparm->autoneg  = 0;
1211        regval = axienet_ior(lp, XAE_FCC_OFFSET);
1212        epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1213        epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1214}
1215
1216/**
1217 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1218 *                                   settings.
1219 * @ndev:       Pointer to net_device structure
1220 * @epauseparam:Pointer to ethtool_pauseparam structure
1221 *
1222 * This implements ethtool command for enabling flow control on Rx and Tx
1223 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1224 * function.
1225 */
1226static int
1227axienet_ethtools_set_pauseparam(struct net_device *ndev,
1228                                struct ethtool_pauseparam *epauseparm)
1229{
1230        u32 regval = 0;
1231        struct axienet_local *lp = netdev_priv(ndev);
1232
1233        if (netif_running(ndev)) {
1234                printk(KERN_ERR "%s: Please stop netif before applying "
1235                       "configruation\n", ndev->name);
1236                return -EFAULT;
1237        }
1238
1239        regval = axienet_ior(lp, XAE_FCC_OFFSET);
1240        if (epauseparm->tx_pause)
1241                regval |= XAE_FCC_FCTX_MASK;
1242        else
1243                regval &= ~XAE_FCC_FCTX_MASK;
1244        if (epauseparm->rx_pause)
1245                regval |= XAE_FCC_FCRX_MASK;
1246        else
1247                regval &= ~XAE_FCC_FCRX_MASK;
1248        axienet_iow(lp, XAE_FCC_OFFSET, regval);
1249
1250        return 0;
1251}
1252
1253/**
1254 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1255 * @ndev:       Pointer to net_device structure
1256 * @ecoalesce:  Pointer to ethtool_coalesce structure
1257 *
1258 * This implements ethtool command for getting the DMA interrupt coalescing
1259 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1260 * execute this function.
1261 */
1262static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1263                                         struct ethtool_coalesce *ecoalesce)
1264{
1265        u32 regval = 0;
1266        struct axienet_local *lp = netdev_priv(ndev);
1267        regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1268        ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1269                                             >> XAXIDMA_COALESCE_SHIFT;
1270        regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1271        ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1272                                             >> XAXIDMA_COALESCE_SHIFT;
1273        return 0;
1274}
1275
1276/**
1277 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1278 * @ndev:       Pointer to net_device structure
1279 * @ecoalesce:  Pointer to ethtool_coalesce structure
1280 *
1281 * This implements ethtool command for setting the DMA interrupt coalescing
1282 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1283 * prompt to execute this function.
1284 */
1285static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1286                                         struct ethtool_coalesce *ecoalesce)
1287{
1288        struct axienet_local *lp = netdev_priv(ndev);
1289
1290        if (netif_running(ndev)) {
1291                printk(KERN_ERR "%s: Please stop netif before applying "
1292                       "configruation\n", ndev->name);
1293                return -EFAULT;
1294        }
1295
1296        if ((ecoalesce->rx_coalesce_usecs) ||
1297            (ecoalesce->rx_coalesce_usecs_irq) ||
1298            (ecoalesce->rx_max_coalesced_frames_irq) ||
1299            (ecoalesce->tx_coalesce_usecs) ||
1300            (ecoalesce->tx_coalesce_usecs_irq) ||
1301            (ecoalesce->tx_max_coalesced_frames_irq) ||
1302            (ecoalesce->stats_block_coalesce_usecs) ||
1303            (ecoalesce->use_adaptive_rx_coalesce) ||
1304            (ecoalesce->use_adaptive_tx_coalesce) ||
1305            (ecoalesce->pkt_rate_low) ||
1306            (ecoalesce->rx_coalesce_usecs_low) ||
1307            (ecoalesce->rx_max_coalesced_frames_low) ||
1308            (ecoalesce->tx_coalesce_usecs_low) ||
1309            (ecoalesce->tx_max_coalesced_frames_low) ||
1310            (ecoalesce->pkt_rate_high) ||
1311            (ecoalesce->rx_coalesce_usecs_high) ||
1312            (ecoalesce->rx_max_coalesced_frames_high) ||
1313            (ecoalesce->tx_coalesce_usecs_high) ||
1314            (ecoalesce->tx_max_coalesced_frames_high) ||
1315            (ecoalesce->rate_sample_interval))
1316                return -EOPNOTSUPP;
1317        if (ecoalesce->rx_max_coalesced_frames)
1318                lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1319        if (ecoalesce->tx_max_coalesced_frames)
1320                lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1321
1322        return 0;
1323}
1324
1325static struct ethtool_ops axienet_ethtool_ops = {
1326        .get_settings   = axienet_ethtools_get_settings,
1327        .set_settings   = axienet_ethtools_set_settings,
1328        .get_drvinfo    = axienet_ethtools_get_drvinfo,
1329        .get_regs_len   = axienet_ethtools_get_regs_len,
1330        .get_regs       = axienet_ethtools_get_regs,
1331        .get_link       = ethtool_op_get_link,
1332        .get_pauseparam = axienet_ethtools_get_pauseparam,
1333        .set_pauseparam = axienet_ethtools_set_pauseparam,
1334        .get_coalesce   = axienet_ethtools_get_coalesce,
1335        .set_coalesce   = axienet_ethtools_set_coalesce,
1336};
1337
1338/**
1339 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
1340 * @data:       Data passed
1341 *
1342 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1343 * Tx/Rx BDs.
1344 */
1345static void axienet_dma_err_handler(unsigned long data)
1346{
1347        u32 axienet_status;
1348        u32 cr, i;
1349        int mdio_mcreg;
1350        struct axienet_local *lp = (struct axienet_local *) data;
1351        struct net_device *ndev = lp->ndev;
1352        struct axidma_bd *cur_p;
1353
1354        axienet_setoptions(ndev, lp->options &
1355                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1356        mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1357        axienet_mdio_wait_until_ready(lp);
1358        /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1359         * When we do an Axi Ethernet reset, it resets the complete core
1360         * including the MDIO. So if MDIO is not disabled when the reset
1361         * process is started, MDIO will be broken afterwards. */
1362        axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1363                    ~XAE_MDIO_MC_MDIOEN_MASK));
1364
1365        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1366        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1367
1368        axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1369        axienet_mdio_wait_until_ready(lp);
1370
1371        for (i = 0; i < TX_BD_NUM; i++) {
1372                cur_p = &lp->tx_bd_v[i];
1373                if (cur_p->phys)
1374                        dma_unmap_single(ndev->dev.parent, cur_p->phys,
1375                                         (cur_p->cntrl &
1376                                          XAXIDMA_BD_CTRL_LENGTH_MASK),
1377                                         DMA_TO_DEVICE);
1378                if (cur_p->app4)
1379                        dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
1380                cur_p->phys = 0;
1381                cur_p->cntrl = 0;
1382                cur_p->status = 0;
1383                cur_p->app0 = 0;
1384                cur_p->app1 = 0;
1385                cur_p->app2 = 0;
1386                cur_p->app3 = 0;
1387                cur_p->app4 = 0;
1388                cur_p->sw_id_offset = 0;
1389        }
1390
1391        for (i = 0; i < RX_BD_NUM; i++) {
1392                cur_p = &lp->rx_bd_v[i];
1393                cur_p->status = 0;
1394                cur_p->app0 = 0;
1395                cur_p->app1 = 0;
1396                cur_p->app2 = 0;
1397                cur_p->app3 = 0;
1398                cur_p->app4 = 0;
1399        }
1400
1401        lp->tx_bd_ci = 0;
1402        lp->tx_bd_tail = 0;
1403        lp->rx_bd_ci = 0;
1404
1405        /* Start updating the Rx channel control register */
1406        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1407        /* Update the interrupt coalesce count */
1408        cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1409              (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1410        /* Update the delay timer count */
1411        cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1412              (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1413        /* Enable coalesce, delay timer and error interrupts */
1414        cr |= XAXIDMA_IRQ_ALL_MASK;
1415        /* Finally write to the Rx channel control register */
1416        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1417
1418        /* Start updating the Tx channel control register */
1419        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1420        /* Update the interrupt coalesce count */
1421        cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1422              (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1423        /* Update the delay timer count */
1424        cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1425              (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1426        /* Enable coalesce, delay timer and error interrupts */
1427        cr |= XAXIDMA_IRQ_ALL_MASK;
1428        /* Finally write to the Tx channel control register */
1429        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1430
1431        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
1432         * halted state. This will make the Rx side ready for reception.*/
1433        axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1434        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1435        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1436                          cr | XAXIDMA_CR_RUNSTOP_MASK);
1437        axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1438                          (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1439
1440        /* Write to the RS (Run-stop) bit in the Tx channel control register.
1441         * Tx channel is now ready to run. But only after we write to the
1442         * tail pointer register that the Tx channel will start transmitting */
1443        axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1444        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1445        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1446                          cr | XAXIDMA_CR_RUNSTOP_MASK);
1447
1448        axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1449        axienet_status &= ~XAE_RCW1_RX_MASK;
1450        axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1451
1452        axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1453        if (axienet_status & XAE_INT_RXRJECT_MASK)
1454                axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1455        axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1456
1457        /* Sync default options with HW but leave receiver and
1458         * transmitter disabled.*/
1459        axienet_setoptions(ndev, lp->options &
1460                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1461        axienet_set_mac_address(ndev, NULL);
1462        axienet_set_multicast_list(ndev);
1463        axienet_setoptions(ndev, lp->options);
1464}
1465
1466/**
1467 * axienet_of_probe - Axi Ethernet probe function.
1468 * @op:         Pointer to platform device structure.
1469 * @match:      Pointer to device id structure
1470 *
1471 * returns: 0, on success
1472 *          Non-zero error value on failure.
1473 *
1474 * This is the probe routine for Axi Ethernet driver. This is called before
1475 * any other driver routines are invoked. It allocates and sets up the Ethernet
1476 * device. Parses through device tree and populates fields of
1477 * axienet_local. It registers the Ethernet device.
1478 */
1479static int __devinit axienet_of_probe(struct platform_device *op)
1480{
1481        __be32 *p;
1482        int size, ret = 0;
1483        struct device_node *np;
1484        struct axienet_local *lp;
1485        struct net_device *ndev;
1486        const void *addr;
1487
1488        ndev = alloc_etherdev(sizeof(*lp));
1489        if (!ndev)
1490                return -ENOMEM;
1491
1492        ether_setup(ndev);
1493        dev_set_drvdata(&op->dev, ndev);
1494
1495        SET_NETDEV_DEV(ndev, &op->dev);
1496        ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1497        ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1498        ndev->netdev_ops = &axienet_netdev_ops;
1499        ndev->ethtool_ops = &axienet_ethtool_ops;
1500
1501        lp = netdev_priv(ndev);
1502        lp->ndev = ndev;
1503        lp->dev = &op->dev;
1504        lp->options = XAE_OPTION_DEFAULTS;
1505        /* Map device registers */
1506        lp->regs = of_iomap(op->dev.of_node, 0);
1507        if (!lp->regs) {
1508                dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1509                goto nodev;
1510        }
1511        /* Setup checksum offload, but default to off if not specified */
1512        lp->features = 0;
1513
1514        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1515        if (p) {
1516                switch (be32_to_cpup(p)) {
1517                case 1:
1518                        lp->csum_offload_on_tx_path =
1519                                XAE_FEATURE_PARTIAL_TX_CSUM;
1520                        lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1521                        /* Can checksum TCP/UDP over IPv4. */
1522                        ndev->features |= NETIF_F_IP_CSUM;
1523                        break;
1524                case 2:
1525                        lp->csum_offload_on_tx_path =
1526                                XAE_FEATURE_FULL_TX_CSUM;
1527                        lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1528                        /* Can checksum TCP/UDP over IPv4. */
1529                        ndev->features |= NETIF_F_IP_CSUM;
1530                        break;
1531                default:
1532                        lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1533                }
1534        }
1535        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1536        if (p) {
1537                switch (be32_to_cpup(p)) {
1538                case 1:
1539                        lp->csum_offload_on_rx_path =
1540                                XAE_FEATURE_PARTIAL_RX_CSUM;
1541                        lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1542                        break;
1543                case 2:
1544                        lp->csum_offload_on_rx_path =
1545                                XAE_FEATURE_FULL_RX_CSUM;
1546                        lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1547                        break;
1548                default:
1549                        lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1550                }
1551        }
1552        /* For supporting jumbo frames, the Axi Ethernet hardware must have
1553         * a larger Rx/Tx Memory. Typically, the size must be more than or
1554         * equal to 16384 bytes, so that we can enable jumbo option and start
1555         * supporting jumbo frames. Here we check for memory allocated for
1556         * Rx/Tx in the hardware from the device-tree and accordingly set
1557         * flags. */
1558        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
1559        if (p) {
1560                if ((be32_to_cpup(p)) >= 0x4000)
1561                        lp->jumbo_support = 1;
1562        }
1563        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1564                                       NULL);
1565        if (p)
1566                lp->temac_type = be32_to_cpup(p);
1567        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1568        if (p)
1569                lp->phy_type = be32_to_cpup(p);
1570
1571        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1572        np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1573        if (!np) {
1574                dev_err(&op->dev, "could not find DMA node\n");
1575                goto err_iounmap;
1576        }
1577        lp->dma_regs = of_iomap(np, 0);
1578        if (lp->dma_regs) {
1579                dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
1580        } else {
1581                dev_err(&op->dev, "unable to map DMA registers\n");
1582                of_node_put(np);
1583        }
1584        lp->rx_irq = irq_of_parse_and_map(np, 1);
1585        lp->tx_irq = irq_of_parse_and_map(np, 0);
1586        of_node_put(np);
1587        if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
1588                dev_err(&op->dev, "could not determine irqs\n");
1589                ret = -ENOMEM;
1590                goto err_iounmap_2;
1591        }
1592
1593        /* Retrieve the MAC address */
1594        addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1595        if ((!addr) || (size != 6)) {
1596                dev_err(&op->dev, "could not find MAC address\n");
1597                ret = -ENODEV;
1598                goto err_iounmap_2;
1599        }
1600        axienet_set_mac_address(ndev, (void *) addr);
1601
1602        lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1603        lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1604
1605        lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1606        ret = axienet_mdio_setup(lp, op->dev.of_node);
1607        if (ret)
1608                dev_warn(&op->dev, "error registering MDIO bus\n");
1609
1610        ret = register_netdev(lp->ndev);
1611        if (ret) {
1612                dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1613                goto err_iounmap_2;
1614        }
1615
1616        tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
1617                     (unsigned long) lp);
1618        tasklet_disable(&lp->dma_err_tasklet);
1619
1620        return 0;
1621
1622err_iounmap_2:
1623        if (lp->dma_regs)
1624                iounmap(lp->dma_regs);
1625err_iounmap:
1626        iounmap(lp->regs);
1627nodev:
1628        free_netdev(ndev);
1629        ndev = NULL;
1630        return ret;
1631}
1632
1633static int __devexit axienet_of_remove(struct platform_device *op)
1634{
1635        struct net_device *ndev = dev_get_drvdata(&op->dev);
1636        struct axienet_local *lp = netdev_priv(ndev);
1637
1638        axienet_mdio_teardown(lp);
1639        unregister_netdev(ndev);
1640
1641        if (lp->phy_node)
1642                of_node_put(lp->phy_node);
1643        lp->phy_node = NULL;
1644
1645        dev_set_drvdata(&op->dev, NULL);
1646
1647        iounmap(lp->regs);
1648        if (lp->dma_regs)
1649                iounmap(lp->dma_regs);
1650        free_netdev(ndev);
1651
1652        return 0;
1653}
1654
1655static struct platform_driver axienet_of_driver = {
1656        .probe = axienet_of_probe,
1657        .remove = __devexit_p(axienet_of_remove),
1658        .driver = {
1659                 .owner = THIS_MODULE,
1660                 .name = "xilinx_axienet",
1661                 .of_match_table = axienet_of_match,
1662        },
1663};
1664
1665module_platform_driver(axienet_of_driver);
1666
1667MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1668MODULE_AUTHOR("Xilinx");
1669MODULE_LICENSE("GPL");
1670