linux/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
<<
>>
Prefs
   1/*
   2 * Xilinx Axi Ethernet device driver
   3 *
   4 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   5 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   7 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
   8 * Copyright (c) 2010 - 2011 PetaLogix
   9 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
  10 *
  11 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
  12 * and Spartan6.
  13 *
  14 * TODO:
  15 *  - Add Axi Fifo support.
  16 *  - Factor out Axi DMA code into separate driver.
  17 *  - Test and fix basic multicast filtering.
  18 *  - Add support for extended multicast filtering.
  19 *  - Test basic VLAN support.
  20 *  - Add support for extended VLAN support.
  21 */
  22
  23#include <linux/delay.h>
  24#include <linux/etherdevice.h>
  25#include <linux/init.h>
  26#include <linux/module.h>
  27#include <linux/netdevice.h>
  28#include <linux/of_mdio.h>
  29#include <linux/of_platform.h>
  30#include <linux/of_address.h>
  31#include <linux/skbuff.h>
  32#include <linux/spinlock.h>
  33#include <linux/phy.h>
  34#include <linux/mii.h>
  35#include <linux/ethtool.h>
  36
  37#include "xilinx_axienet.h"
  38
  39/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
  40#define TX_BD_NUM               64
  41#define RX_BD_NUM               128
  42
  43/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
  44#define DRIVER_NAME             "xaxienet"
  45#define DRIVER_DESCRIPTION      "Xilinx Axi Ethernet driver"
  46#define DRIVER_VERSION          "1.00a"
  47
  48#define AXIENET_REGS_N          32
  49
  50/* Match table for of_platform binding */
  51static struct of_device_id axienet_of_match[] = {
  52        { .compatible = "xlnx,axi-ethernet-1.00.a", },
  53        { .compatible = "xlnx,axi-ethernet-1.01.a", },
  54        { .compatible = "xlnx,axi-ethernet-2.01.a", },
  55        {},
  56};
  57
  58MODULE_DEVICE_TABLE(of, axienet_of_match);
  59
  60/* Option table for setting up Axi Ethernet hardware options */
  61static struct axienet_option axienet_options[] = {
  62        /* Turn on jumbo packet support for both Rx and Tx */
  63        {
  64                .opt = XAE_OPTION_JUMBO,
  65                .reg = XAE_TC_OFFSET,
  66                .m_or = XAE_TC_JUM_MASK,
  67        }, {
  68                .opt = XAE_OPTION_JUMBO,
  69                .reg = XAE_RCW1_OFFSET,
  70                .m_or = XAE_RCW1_JUM_MASK,
  71        }, { /* Turn on VLAN packet support for both Rx and Tx */
  72                .opt = XAE_OPTION_VLAN,
  73                .reg = XAE_TC_OFFSET,
  74                .m_or = XAE_TC_VLAN_MASK,
  75        }, {
  76                .opt = XAE_OPTION_VLAN,
  77                .reg = XAE_RCW1_OFFSET,
  78                .m_or = XAE_RCW1_VLAN_MASK,
  79        }, { /* Turn on FCS stripping on receive packets */
  80                .opt = XAE_OPTION_FCS_STRIP,
  81                .reg = XAE_RCW1_OFFSET,
  82                .m_or = XAE_RCW1_FCS_MASK,
  83        }, { /* Turn on FCS insertion on transmit packets */
  84                .opt = XAE_OPTION_FCS_INSERT,
  85                .reg = XAE_TC_OFFSET,
  86                .m_or = XAE_TC_FCS_MASK,
  87        }, { /* Turn off length/type field checking on receive packets */
  88                .opt = XAE_OPTION_LENTYPE_ERR,
  89                .reg = XAE_RCW1_OFFSET,
  90                .m_or = XAE_RCW1_LT_DIS_MASK,
  91        }, { /* Turn on Rx flow control */
  92                .opt = XAE_OPTION_FLOW_CONTROL,
  93                .reg = XAE_FCC_OFFSET,
  94                .m_or = XAE_FCC_FCRX_MASK,
  95        }, { /* Turn on Tx flow control */
  96                .opt = XAE_OPTION_FLOW_CONTROL,
  97                .reg = XAE_FCC_OFFSET,
  98                .m_or = XAE_FCC_FCTX_MASK,
  99        }, { /* Turn on promiscuous frame filtering */
 100                .opt = XAE_OPTION_PROMISC,
 101                .reg = XAE_FMI_OFFSET,
 102                .m_or = XAE_FMI_PM_MASK,
 103        }, { /* Enable transmitter */
 104                .opt = XAE_OPTION_TXEN,
 105                .reg = XAE_TC_OFFSET,
 106                .m_or = XAE_TC_TX_MASK,
 107        }, { /* Enable receiver */
 108                .opt = XAE_OPTION_RXEN,
 109                .reg = XAE_RCW1_OFFSET,
 110                .m_or = XAE_RCW1_RX_MASK,
 111        },
 112        {}
 113};
 114
 115/**
 116 * axienet_dma_in32 - Memory mapped Axi DMA register read
 117 * @lp:         Pointer to axienet local structure
 118 * @reg:        Address offset from the base address of the Axi DMA core
 119 *
 120 * returns: The contents of the Axi DMA register
 121 *
 122 * This function returns the contents of the corresponding Axi DMA register.
 123 */
 124static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
 125{
 126        return in_be32(lp->dma_regs + reg);
 127}
 128
 129/**
 130 * axienet_dma_out32 - Memory mapped Axi DMA register write.
 131 * @lp:         Pointer to axienet local structure
 132 * @reg:        Address offset from the base address of the Axi DMA core
 133 * @value:      Value to be written into the Axi DMA register
 134 *
 135 * This function writes the desired value into the corresponding Axi DMA
 136 * register.
 137 */
 138static inline void axienet_dma_out32(struct axienet_local *lp,
 139                                     off_t reg, u32 value)
 140{
 141        out_be32((lp->dma_regs + reg), value);
 142}
 143
 144/**
 145 * axienet_dma_bd_release - Release buffer descriptor rings
 146 * @ndev:       Pointer to the net_device structure
 147 *
 148 * This function is used to release the descriptors allocated in
 149 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
 150 * driver stop api is called.
 151 */
 152static void axienet_dma_bd_release(struct net_device *ndev)
 153{
 154        int i;
 155        struct axienet_local *lp = netdev_priv(ndev);
 156
 157        for (i = 0; i < RX_BD_NUM; i++) {
 158                dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
 159                                 lp->max_frm_size, DMA_FROM_DEVICE);
 160                dev_kfree_skb((struct sk_buff *)
 161                              (lp->rx_bd_v[i].sw_id_offset));
 162        }
 163
 164        if (lp->rx_bd_v) {
 165                dma_free_coherent(ndev->dev.parent,
 166                                  sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 167                                  lp->rx_bd_v,
 168                                  lp->rx_bd_p);
 169        }
 170        if (lp->tx_bd_v) {
 171                dma_free_coherent(ndev->dev.parent,
 172                                  sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 173                                  lp->tx_bd_v,
 174                                  lp->tx_bd_p);
 175        }
 176}
 177
 178/**
 179 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
 180 * @ndev:       Pointer to the net_device structure
 181 *
 182 * returns: 0, on success
 183 *          -ENOMEM, on failure
 184 *
 185 * This function is called to initialize the Rx and Tx DMA descriptor
 186 * rings. This initializes the descriptors with required default values
 187 * and is called when Axi Ethernet driver reset is called.
 188 */
 189static int axienet_dma_bd_init(struct net_device *ndev)
 190{
 191        u32 cr;
 192        int i;
 193        struct sk_buff *skb;
 194        struct axienet_local *lp = netdev_priv(ndev);
 195
 196        /* Reset the indexes which are used for accessing the BDs */
 197        lp->tx_bd_ci = 0;
 198        lp->tx_bd_tail = 0;
 199        lp->rx_bd_ci = 0;
 200
 201        /*
 202         * Allocate the Tx and Rx buffer descriptors.
 203         */
 204        lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 205                                         sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 206                                         &lp->tx_bd_p,
 207                                         GFP_KERNEL);
 208        if (!lp->tx_bd_v) {
 209                dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
 210                        "descriptors");
 211                goto out;
 212        }
 213
 214        lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 215                                         sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 216                                         &lp->rx_bd_p,
 217                                         GFP_KERNEL);
 218        if (!lp->rx_bd_v) {
 219                dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
 220                        "descriptors");
 221                goto out;
 222        }
 223
 224        memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
 225        for (i = 0; i < TX_BD_NUM; i++) {
 226                lp->tx_bd_v[i].next = lp->tx_bd_p +
 227                                      sizeof(*lp->tx_bd_v) *
 228                                      ((i + 1) % TX_BD_NUM);
 229        }
 230
 231        memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
 232        for (i = 0; i < RX_BD_NUM; i++) {
 233                lp->rx_bd_v[i].next = lp->rx_bd_p +
 234                                      sizeof(*lp->rx_bd_v) *
 235                                      ((i + 1) % RX_BD_NUM);
 236
 237                skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 238                if (!skb) {
 239                        dev_err(&ndev->dev, "alloc_skb error %d\n", i);
 240                        goto out;
 241                }
 242
 243                lp->rx_bd_v[i].sw_id_offset = (u32) skb;
 244                lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
 245                                                     skb->data,
 246                                                     lp->max_frm_size,
 247                                                     DMA_FROM_DEVICE);
 248                lp->rx_bd_v[i].cntrl = lp->max_frm_size;
 249        }
 250
 251        /* Start updating the Rx channel control register */
 252        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 253        /* Update the interrupt coalesce count */
 254        cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
 255              ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
 256        /* Update the delay timer count */
 257        cr = ((cr & ~XAXIDMA_DELAY_MASK) |
 258              (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 259        /* Enable coalesce, delay timer and error interrupts */
 260        cr |= XAXIDMA_IRQ_ALL_MASK;
 261        /* Write to the Rx channel control register */
 262        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 263
 264        /* Start updating the Tx channel control register */
 265        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 266        /* Update the interrupt coalesce count */
 267        cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
 268              ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
 269        /* Update the delay timer count */
 270        cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
 271              (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
 272        /* Enable coalesce, delay timer and error interrupts */
 273        cr |= XAXIDMA_IRQ_ALL_MASK;
 274        /* Write to the Tx channel control register */
 275        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 276
 277        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
 278         * halted state. This will make the Rx side ready for reception.*/
 279        axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
 280        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 281        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 282                          cr | XAXIDMA_CR_RUNSTOP_MASK);
 283        axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
 284                          (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
 285
 286        /* Write to the RS (Run-stop) bit in the Tx channel control register.
 287         * Tx channel is now ready to run. But only after we write to the
 288         * tail pointer register that the Tx channel will start transmitting */
 289        axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
 290        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 291        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 292                          cr | XAXIDMA_CR_RUNSTOP_MASK);
 293
 294        return 0;
 295out:
 296        axienet_dma_bd_release(ndev);
 297        return -ENOMEM;
 298}
 299
 300/**
 301 * axienet_set_mac_address - Write the MAC address
 302 * @ndev:       Pointer to the net_device structure
 303 * @address:    6 byte Address to be written as MAC address
 304 *
 305 * This function is called to initialize the MAC address of the Axi Ethernet
 306 * core. It writes to the UAW0 and UAW1 registers of the core.
 307 */
 308static void axienet_set_mac_address(struct net_device *ndev, void *address)
 309{
 310        struct axienet_local *lp = netdev_priv(ndev);
 311
 312        if (address)
 313                memcpy(ndev->dev_addr, address, ETH_ALEN);
 314        if (!is_valid_ether_addr(ndev->dev_addr))
 315                eth_random_addr(ndev->dev_addr);
 316
 317        /* Set up unicast MAC address filter set its mac address */
 318        axienet_iow(lp, XAE_UAW0_OFFSET,
 319                    (ndev->dev_addr[0]) |
 320                    (ndev->dev_addr[1] << 8) |
 321                    (ndev->dev_addr[2] << 16) |
 322                    (ndev->dev_addr[3] << 24));
 323        axienet_iow(lp, XAE_UAW1_OFFSET,
 324                    (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
 325                      ~XAE_UAW1_UNICASTADDR_MASK) |
 326                     (ndev->dev_addr[4] |
 327                     (ndev->dev_addr[5] << 8))));
 328}
 329
 330/**
 331 * netdev_set_mac_address - Write the MAC address (from outside the driver)
 332 * @ndev:       Pointer to the net_device structure
 333 * @p:          6 byte Address to be written as MAC address
 334 *
 335 * returns: 0 for all conditions. Presently, there is no failure case.
 336 *
 337 * This function is called to initialize the MAC address of the Axi Ethernet
 338 * core. It calls the core specific axienet_set_mac_address. This is the
 339 * function that goes into net_device_ops structure entry ndo_set_mac_address.
 340 */
 341static int netdev_set_mac_address(struct net_device *ndev, void *p)
 342{
 343        struct sockaddr *addr = p;
 344        axienet_set_mac_address(ndev, addr->sa_data);
 345        return 0;
 346}
 347
 348/**
 349 * axienet_set_multicast_list - Prepare the multicast table
 350 * @ndev:       Pointer to the net_device structure
 351 *
 352 * This function is called to initialize the multicast table during
 353 * initialization. The Axi Ethernet basic multicast support has a four-entry
 354 * multicast table which is initialized here. Additionally this function
 355 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
 356 * means whenever the multicast table entries need to be updated this
 357 * function gets called.
 358 */
 359static void axienet_set_multicast_list(struct net_device *ndev)
 360{
 361        int i;
 362        u32 reg, af0reg, af1reg;
 363        struct axienet_local *lp = netdev_priv(ndev);
 364
 365        if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 366            netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
 367                /* We must make the kernel realize we had to move into
 368                 * promiscuous mode. If it was a promiscuous mode request
 369                 * the flag is already set. If not we set it. */
 370                ndev->flags |= IFF_PROMISC;
 371                reg = axienet_ior(lp, XAE_FMI_OFFSET);
 372                reg |= XAE_FMI_PM_MASK;
 373                axienet_iow(lp, XAE_FMI_OFFSET, reg);
 374                dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 375        } else if (!netdev_mc_empty(ndev)) {
 376                struct netdev_hw_addr *ha;
 377
 378                i = 0;
 379                netdev_for_each_mc_addr(ha, ndev) {
 380                        if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
 381                                break;
 382
 383                        af0reg = (ha->addr[0]);
 384                        af0reg |= (ha->addr[1] << 8);
 385                        af0reg |= (ha->addr[2] << 16);
 386                        af0reg |= (ha->addr[3] << 24);
 387
 388                        af1reg = (ha->addr[4]);
 389                        af1reg |= (ha->addr[5] << 8);
 390
 391                        reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 392                        reg |= i;
 393
 394                        axienet_iow(lp, XAE_FMI_OFFSET, reg);
 395                        axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
 396                        axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
 397                        i++;
 398                }
 399        } else {
 400                reg = axienet_ior(lp, XAE_FMI_OFFSET);
 401                reg &= ~XAE_FMI_PM_MASK;
 402
 403                axienet_iow(lp, XAE_FMI_OFFSET, reg);
 404
 405                for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
 406                        reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
 407                        reg |= i;
 408
 409                        axienet_iow(lp, XAE_FMI_OFFSET, reg);
 410                        axienet_iow(lp, XAE_AF0_OFFSET, 0);
 411                        axienet_iow(lp, XAE_AF1_OFFSET, 0);
 412                }
 413
 414                dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 415        }
 416}
 417
 418/**
 419 * axienet_setoptions - Set an Axi Ethernet option
 420 * @ndev:       Pointer to the net_device structure
 421 * @options:    Option to be enabled/disabled
 422 *
 423 * The Axi Ethernet core has multiple features which can be selectively turned
 424 * on or off. The typical options could be jumbo frame option, basic VLAN
 425 * option, promiscuous mode option etc. This function is used to set or clear
 426 * these options in the Axi Ethernet hardware. This is done through
 427 * axienet_option structure .
 428 */
 429static void axienet_setoptions(struct net_device *ndev, u32 options)
 430{
 431        int reg;
 432        struct axienet_local *lp = netdev_priv(ndev);
 433        struct axienet_option *tp = &axienet_options[0];
 434
 435        while (tp->opt) {
 436                reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
 437                if (options & tp->opt)
 438                        reg |= tp->m_or;
 439                axienet_iow(lp, tp->reg, reg);
 440                tp++;
 441        }
 442
 443        lp->options |= options;
 444}
 445
 446static void __axienet_device_reset(struct axienet_local *lp,
 447                                   struct device *dev, off_t offset)
 448{
 449        u32 timeout;
 450        /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
 451         * process of Axi DMA takes a while to complete as all pending
 452         * commands/transfers will be flushed or completed during this
 453         * reset process. */
 454        axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
 455        timeout = DELAY_OF_ONE_MILLISEC;
 456        while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
 457                udelay(1);
 458                if (--timeout == 0) {
 459                        dev_err(dev, "axienet_device_reset DMA "
 460                                "reset timeout!\n");
 461                        break;
 462                }
 463        }
 464}
 465
 466/**
 467 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
 468 * @ndev:       Pointer to the net_device structure
 469 *
 470 * This function is called to reset and initialize the Axi Ethernet core. This
 471 * is typically called during initialization. It does a reset of the Axi DMA
 472 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
 473 * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
 474 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
 475 * core.
 476 */
 477static void axienet_device_reset(struct net_device *ndev)
 478{
 479        u32 axienet_status;
 480        struct axienet_local *lp = netdev_priv(ndev);
 481
 482        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
 483        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
 484
 485        lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
 486        lp->options &= (~XAE_OPTION_JUMBO);
 487
 488        if ((ndev->mtu > XAE_MTU) &&
 489            (ndev->mtu <= XAE_JUMBO_MTU) &&
 490            (lp->jumbo_support)) {
 491                lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE +
 492                                   XAE_TRL_SIZE;
 493                lp->options |= XAE_OPTION_JUMBO;
 494        }
 495
 496        if (axienet_dma_bd_init(ndev)) {
 497                dev_err(&ndev->dev, "axienet_device_reset descriptor "
 498                        "allocation failed\n");
 499        }
 500
 501        axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
 502        axienet_status &= ~XAE_RCW1_RX_MASK;
 503        axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
 504
 505        axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
 506        if (axienet_status & XAE_INT_RXRJECT_MASK)
 507                axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
 508
 509        axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
 510
 511        /* Sync default options with HW but leave receiver and
 512         * transmitter disabled.*/
 513        axienet_setoptions(ndev, lp->options &
 514                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 515        axienet_set_mac_address(ndev, NULL);
 516        axienet_set_multicast_list(ndev);
 517        axienet_setoptions(ndev, lp->options);
 518
 519        ndev->trans_start = jiffies;
 520}
 521
 522/**
 523 * axienet_adjust_link - Adjust the PHY link speed/duplex.
 524 * @ndev:       Pointer to the net_device structure
 525 *
 526 * This function is called to change the speed and duplex setting after
 527 * auto negotiation is done by the PHY. This is the function that gets
 528 * registered with the PHY interface through the "of_phy_connect" call.
 529 */
 530static void axienet_adjust_link(struct net_device *ndev)
 531{
 532        u32 emmc_reg;
 533        u32 link_state;
 534        u32 setspeed = 1;
 535        struct axienet_local *lp = netdev_priv(ndev);
 536        struct phy_device *phy = lp->phy_dev;
 537
 538        link_state = phy->speed | (phy->duplex << 1) | phy->link;
 539        if (lp->last_link != link_state) {
 540                if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
 541                        if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X)
 542                                setspeed = 0;
 543                } else {
 544                        if ((phy->speed == SPEED_1000) &&
 545                            (lp->phy_type == XAE_PHY_TYPE_MII))
 546                                setspeed = 0;
 547                }
 548
 549                if (setspeed == 1) {
 550                        emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
 551                        emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
 552
 553                        switch (phy->speed) {
 554                        case SPEED_1000:
 555                                emmc_reg |= XAE_EMMC_LINKSPD_1000;
 556                                break;
 557                        case SPEED_100:
 558                                emmc_reg |= XAE_EMMC_LINKSPD_100;
 559                                break;
 560                        case SPEED_10:
 561                                emmc_reg |= XAE_EMMC_LINKSPD_10;
 562                                break;
 563                        default:
 564                                dev_err(&ndev->dev, "Speed other than 10, 100 "
 565                                        "or 1Gbps is not supported\n");
 566                                break;
 567                        }
 568
 569                        axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
 570                        lp->last_link = link_state;
 571                        phy_print_status(phy);
 572                } else {
 573                        dev_err(&ndev->dev, "Error setting Axi Ethernet "
 574                                "mac speed\n");
 575                }
 576        }
 577}
 578
 579/**
 580 * axienet_start_xmit_done - Invoked once a transmit is completed by the
 581 * Axi DMA Tx channel.
 582 * @ndev:       Pointer to the net_device structure
 583 *
 584 * This function is invoked from the Axi DMA Tx isr to notify the completion
 585 * of transmit operation. It clears fields in the corresponding Tx BDs and
 586 * unmaps the corresponding buffer so that CPU can regain ownership of the
 587 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
 588 * required.
 589 */
 590static void axienet_start_xmit_done(struct net_device *ndev)
 591{
 592        u32 size = 0;
 593        u32 packets = 0;
 594        struct axienet_local *lp = netdev_priv(ndev);
 595        struct axidma_bd *cur_p;
 596        unsigned int status = 0;
 597
 598        cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 599        status = cur_p->status;
 600        while (status & XAXIDMA_BD_STS_COMPLETE_MASK) {
 601                dma_unmap_single(ndev->dev.parent, cur_p->phys,
 602                                (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
 603                                DMA_TO_DEVICE);
 604                if (cur_p->app4)
 605                        dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
 606                /*cur_p->phys = 0;*/
 607                cur_p->app0 = 0;
 608                cur_p->app1 = 0;
 609                cur_p->app2 = 0;
 610                cur_p->app4 = 0;
 611                cur_p->status = 0;
 612
 613                size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
 614                packets++;
 615
 616                lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
 617                cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 618                status = cur_p->status;
 619        }
 620
 621        ndev->stats.tx_packets += packets;
 622        ndev->stats.tx_bytes += size;
 623        netif_wake_queue(ndev);
 624}
 625
 626/**
 627 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
 628 * @lp:         Pointer to the axienet_local structure
 629 * @num_frag:   The number of BDs to check for
 630 *
 631 * returns: 0, on success
 632 *          NETDEV_TX_BUSY, if any of the descriptors are not free
 633 *
 634 * This function is invoked before BDs are allocated and transmission starts.
 635 * This function returns 0 if a BD or group of BDs can be allocated for
 636 * transmission. If the BD or any of the BDs are not free the function
 637 * returns a busy status. This is invoked from axienet_start_xmit.
 638 */
 639static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
 640                                            int num_frag)
 641{
 642        struct axidma_bd *cur_p;
 643        cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
 644        if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
 645                return NETDEV_TX_BUSY;
 646        return 0;
 647}
 648
 649/**
 650 * axienet_start_xmit - Starts the transmission.
 651 * @skb:        sk_buff pointer that contains data to be Txed.
 652 * @ndev:       Pointer to net_device structure.
 653 *
 654 * returns: NETDEV_TX_OK, on success
 655 *          NETDEV_TX_BUSY, if any of the descriptors are not free
 656 *
 657 * This function is invoked from upper layers to initiate transmission. The
 658 * function uses the next available free BDs and populates their fields to
 659 * start the transmission. Additionally if checksum offloading is supported,
 660 * it populates AXI Stream Control fields with appropriate values.
 661 */
 662static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 663{
 664        u32 ii;
 665        u32 num_frag;
 666        u32 csum_start_off;
 667        u32 csum_index_off;
 668        skb_frag_t *frag;
 669        dma_addr_t tail_p;
 670        struct axienet_local *lp = netdev_priv(ndev);
 671        struct axidma_bd *cur_p;
 672
 673        num_frag = skb_shinfo(skb)->nr_frags;
 674        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 675
 676        if (axienet_check_tx_bd_space(lp, num_frag)) {
 677                if (!netif_queue_stopped(ndev))
 678                        netif_stop_queue(ndev);
 679                return NETDEV_TX_BUSY;
 680        }
 681
 682        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 683                if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
 684                        /* Tx Full Checksum Offload Enabled */
 685                        cur_p->app0 |= 2;
 686                } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
 687                        csum_start_off = skb_transport_offset(skb);
 688                        csum_index_off = csum_start_off + skb->csum_offset;
 689                        /* Tx Partial Checksum Offload Enabled */
 690                        cur_p->app0 |= 1;
 691                        cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 692                }
 693        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 694                cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
 695        }
 696
 697        cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
 698        cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
 699                                     skb_headlen(skb), DMA_TO_DEVICE);
 700
 701        for (ii = 0; ii < num_frag; ii++) {
 702                lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
 703                cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 704                frag = &skb_shinfo(skb)->frags[ii];
 705                cur_p->phys = dma_map_single(ndev->dev.parent,
 706                                             skb_frag_address(frag),
 707                                             skb_frag_size(frag),
 708                                             DMA_TO_DEVICE);
 709                cur_p->cntrl = skb_frag_size(frag);
 710        }
 711
 712        cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
 713        cur_p->app4 = (unsigned long)skb;
 714
 715        tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 716        /* Start the transfer */
 717        axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
 718        lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
 719
 720        return NETDEV_TX_OK;
 721}
 722
 723/**
 724 * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
 725 *                BD processing.
 726 * @ndev:       Pointer to net_device structure.
 727 *
 728 * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
 729 * does minimal processing and invokes "netif_rx" to complete further
 730 * processing.
 731 */
 732static void axienet_recv(struct net_device *ndev)
 733{
 734        u32 length;
 735        u32 csumstatus;
 736        u32 size = 0;
 737        u32 packets = 0;
 738        dma_addr_t tail_p;
 739        struct axienet_local *lp = netdev_priv(ndev);
 740        struct sk_buff *skb, *new_skb;
 741        struct axidma_bd *cur_p;
 742
 743        tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 744        cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 745
 746        while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
 747                skb = (struct sk_buff *) (cur_p->sw_id_offset);
 748                length = cur_p->app4 & 0x0000FFFF;
 749
 750                dma_unmap_single(ndev->dev.parent, cur_p->phys,
 751                                 lp->max_frm_size,
 752                                 DMA_FROM_DEVICE);
 753
 754                skb_put(skb, length);
 755                skb->protocol = eth_type_trans(skb, ndev);
 756                /*skb_checksum_none_assert(skb);*/
 757                skb->ip_summed = CHECKSUM_NONE;
 758
 759                /* if we're doing Rx csum offload, set it up */
 760                if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
 761                        csumstatus = (cur_p->app2 &
 762                                      XAE_FULL_CSUM_STATUS_MASK) >> 3;
 763                        if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
 764                            (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
 765                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 766                        }
 767                } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
 768                           skb->protocol == __constant_htons(ETH_P_IP) &&
 769                           skb->len > 64) {
 770                        skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
 771                        skb->ip_summed = CHECKSUM_COMPLETE;
 772                }
 773
 774                netif_rx(skb);
 775
 776                size += length;
 777                packets++;
 778
 779                new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
 780                if (!new_skb) {
 781                        dev_err(&ndev->dev, "no memory for new sk_buff\n");
 782                        return;
 783                }
 784                cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
 785                                             lp->max_frm_size,
 786                                             DMA_FROM_DEVICE);
 787                cur_p->cntrl = lp->max_frm_size;
 788                cur_p->status = 0;
 789                cur_p->sw_id_offset = (u32) new_skb;
 790
 791                lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
 792                cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 793        }
 794
 795        ndev->stats.rx_packets += packets;
 796        ndev->stats.rx_bytes += size;
 797
 798        axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
 799}
 800
 801/**
 802 * axienet_tx_irq - Tx Done Isr.
 803 * @irq:        irq number
 804 * @_ndev:      net_device pointer
 805 *
 806 * returns: IRQ_HANDLED for all cases.
 807 *
 808 * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
 809 * to complete the BD processing.
 810 */
 811static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
 812{
 813        u32 cr;
 814        unsigned int status;
 815        struct net_device *ndev = _ndev;
 816        struct axienet_local *lp = netdev_priv(ndev);
 817
 818        status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
 819        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 820                axienet_start_xmit_done(lp->ndev);
 821                goto out;
 822        }
 823        if (!(status & XAXIDMA_IRQ_ALL_MASK))
 824                dev_err(&ndev->dev, "No interrupts asserted in Tx path");
 825        if (status & XAXIDMA_IRQ_ERROR_MASK) {
 826                dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
 827                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 828                        (lp->tx_bd_v[lp->tx_bd_ci]).phys);
 829
 830                cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 831                /* Disable coalesce, delay timer and error interrupts */
 832                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 833                /* Write to the Tx channel control register */
 834                axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 835
 836                cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 837                /* Disable coalesce, delay timer and error interrupts */
 838                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 839                /* Write to the Rx channel control register */
 840                axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 841
 842                tasklet_schedule(&lp->dma_err_tasklet);
 843        }
 844out:
 845        axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
 846        return IRQ_HANDLED;
 847}
 848
 849/**
 850 * axienet_rx_irq - Rx Isr.
 851 * @irq:        irq number
 852 * @_ndev:      net_device pointer
 853 *
 854 * returns: IRQ_HANDLED for all cases.
 855 *
 856 * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
 857 * processing.
 858 */
 859static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
 860{
 861        u32 cr;
 862        unsigned int status;
 863        struct net_device *ndev = _ndev;
 864        struct axienet_local *lp = netdev_priv(ndev);
 865
 866        status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
 867        if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
 868                axienet_recv(lp->ndev);
 869                goto out;
 870        }
 871        if (!(status & XAXIDMA_IRQ_ALL_MASK))
 872                dev_err(&ndev->dev, "No interrupts asserted in Rx path");
 873        if (status & XAXIDMA_IRQ_ERROR_MASK) {
 874                dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
 875                dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
 876                        (lp->rx_bd_v[lp->rx_bd_ci]).phys);
 877
 878                cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 879                /* Disable coalesce, delay timer and error interrupts */
 880                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 881                /* Finally write to the Tx channel control register */
 882                axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
 883
 884                cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 885                /* Disable coalesce, delay timer and error interrupts */
 886                cr &= (~XAXIDMA_IRQ_ALL_MASK);
 887                /* write to the Rx channel control register */
 888                axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
 889
 890                tasklet_schedule(&lp->dma_err_tasklet);
 891        }
 892out:
 893        axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
 894        return IRQ_HANDLED;
 895}
 896
 897static void axienet_dma_err_handler(unsigned long data);
 898
 899/**
 900 * axienet_open - Driver open routine.
 901 * @ndev:       Pointer to net_device structure
 902 *
 903 * returns: 0, on success.
 904 *          -ENODEV, if PHY cannot be connected to
 905 *          non-zero error value on failure
 906 *
 907 * This is the driver open routine. It calls phy_start to start the PHY device.
 908 * It also allocates interrupt service routines, enables the interrupt lines
 909 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
 910 * descriptors are initialized.
 911 */
 912static int axienet_open(struct net_device *ndev)
 913{
 914        int ret, mdio_mcreg;
 915        struct axienet_local *lp = netdev_priv(ndev);
 916
 917        dev_dbg(&ndev->dev, "axienet_open()\n");
 918
 919        mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
 920        ret = axienet_mdio_wait_until_ready(lp);
 921        if (ret < 0)
 922                return ret;
 923        /* Disable the MDIO interface till Axi Ethernet Reset is completed.
 924         * When we do an Axi Ethernet reset, it resets the complete core
 925         * including the MDIO. If MDIO is not disabled when the reset
 926         * process is started, MDIO will be broken afterwards. */
 927        axienet_iow(lp, XAE_MDIO_MC_OFFSET,
 928                    (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
 929        axienet_device_reset(ndev);
 930        /* Enable the MDIO */
 931        axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
 932        ret = axienet_mdio_wait_until_ready(lp);
 933        if (ret < 0)
 934                return ret;
 935
 936        if (lp->phy_node) {
 937                lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
 938                                             axienet_adjust_link, 0,
 939                                             PHY_INTERFACE_MODE_GMII);
 940                if (!lp->phy_dev) {
 941                        dev_err(lp->dev, "of_phy_connect() failed\n");
 942                        return -ENODEV;
 943                }
 944                phy_start(lp->phy_dev);
 945        }
 946
 947        /* Enable tasklets for Axi DMA error handling */
 948        tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
 949                     (unsigned long) lp);
 950
 951        /* Enable interrupts for Axi DMA Tx */
 952        ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
 953        if (ret)
 954                goto err_tx_irq;
 955        /* Enable interrupts for Axi DMA Rx */
 956        ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
 957        if (ret)
 958                goto err_rx_irq;
 959
 960        return 0;
 961
 962err_rx_irq:
 963        free_irq(lp->tx_irq, ndev);
 964err_tx_irq:
 965        if (lp->phy_dev)
 966                phy_disconnect(lp->phy_dev);
 967        lp->phy_dev = NULL;
 968        tasklet_kill(&lp->dma_err_tasklet);
 969        dev_err(lp->dev, "request_irq() failed\n");
 970        return ret;
 971}
 972
 973/**
 974 * axienet_stop - Driver stop routine.
 975 * @ndev:       Pointer to net_device structure
 976 *
 977 * returns: 0, on success.
 978 *
 979 * This is the driver stop routine. It calls phy_disconnect to stop the PHY
 980 * device. It also removes the interrupt handlers and disables the interrupts.
 981 * The Axi DMA Tx/Rx BDs are released.
 982 */
 983static int axienet_stop(struct net_device *ndev)
 984{
 985        u32 cr;
 986        struct axienet_local *lp = netdev_priv(ndev);
 987
 988        dev_dbg(&ndev->dev, "axienet_close()\n");
 989
 990        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
 991        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
 992                          cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 993        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
 994        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
 995                          cr & (~XAXIDMA_CR_RUNSTOP_MASK));
 996        axienet_setoptions(ndev, lp->options &
 997                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
 998
 999        tasklet_kill(&lp->dma_err_tasklet);
1000
1001        free_irq(lp->tx_irq, ndev);
1002        free_irq(lp->rx_irq, ndev);
1003
1004        if (lp->phy_dev)
1005                phy_disconnect(lp->phy_dev);
1006        lp->phy_dev = NULL;
1007
1008        axienet_dma_bd_release(ndev);
1009        return 0;
1010}
1011
1012/**
1013 * axienet_change_mtu - Driver change mtu routine.
1014 * @ndev:       Pointer to net_device structure
1015 * @new_mtu:    New mtu value to be applied
1016 *
1017 * returns: Always returns 0 (success).
1018 *
1019 * This is the change mtu driver routine. It checks if the Axi Ethernet
1020 * hardware supports jumbo frames before changing the mtu. This can be
1021 * called only when the device is not up.
1022 */
1023static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1024{
1025        struct axienet_local *lp = netdev_priv(ndev);
1026
1027        if (netif_running(ndev))
1028                return -EBUSY;
1029        if (lp->jumbo_support) {
1030                if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64))
1031                        return -EINVAL;
1032                ndev->mtu = new_mtu;
1033        } else {
1034                if ((new_mtu > XAE_MTU) || (new_mtu < 64))
1035                        return -EINVAL;
1036                ndev->mtu = new_mtu;
1037        }
1038
1039        return 0;
1040}
1041
1042#ifdef CONFIG_NET_POLL_CONTROLLER
1043/**
1044 * axienet_poll_controller - Axi Ethernet poll mechanism.
1045 * @ndev:       Pointer to net_device structure
1046 *
1047 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1048 * to polling the ISRs and are enabled back after the polling is done.
1049 */
1050static void axienet_poll_controller(struct net_device *ndev)
1051{
1052        struct axienet_local *lp = netdev_priv(ndev);
1053        disable_irq(lp->tx_irq);
1054        disable_irq(lp->rx_irq);
1055        axienet_rx_irq(lp->tx_irq, ndev);
1056        axienet_tx_irq(lp->rx_irq, ndev);
1057        enable_irq(lp->tx_irq);
1058        enable_irq(lp->rx_irq);
1059}
1060#endif
1061
1062static const struct net_device_ops axienet_netdev_ops = {
1063        .ndo_open = axienet_open,
1064        .ndo_stop = axienet_stop,
1065        .ndo_start_xmit = axienet_start_xmit,
1066        .ndo_change_mtu = axienet_change_mtu,
1067        .ndo_set_mac_address = netdev_set_mac_address,
1068        .ndo_validate_addr = eth_validate_addr,
1069        .ndo_set_rx_mode = axienet_set_multicast_list,
1070#ifdef CONFIG_NET_POLL_CONTROLLER
1071        .ndo_poll_controller = axienet_poll_controller,
1072#endif
1073};
1074
1075/**
1076 * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
1077 * @ndev:       Pointer to net_device structure
1078 * @ecmd:       Pointer to ethtool_cmd structure
1079 *
1080 * This implements ethtool command for getting PHY settings. If PHY could
1081 * not be found, the function returns -ENODEV. This function calls the
1082 * relevant PHY ethtool API to get the PHY settings.
1083 * Issue "ethtool ethX" under linux prompt to execute this function.
1084 */
1085static int axienet_ethtools_get_settings(struct net_device *ndev,
1086                                         struct ethtool_cmd *ecmd)
1087{
1088        struct axienet_local *lp = netdev_priv(ndev);
1089        struct phy_device *phydev = lp->phy_dev;
1090        if (!phydev)
1091                return -ENODEV;
1092        return phy_ethtool_gset(phydev, ecmd);
1093}
1094
1095/**
1096 * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
1097 * @ndev:       Pointer to net_device structure
1098 * @ecmd:       Pointer to ethtool_cmd structure
1099 *
1100 * This implements ethtool command for setting various PHY settings. If PHY
1101 * could not be found, the function returns -ENODEV. This function calls the
1102 * relevant PHY ethtool API to set the PHY.
1103 * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
1104 * function.
1105 */
1106static int axienet_ethtools_set_settings(struct net_device *ndev,
1107                                         struct ethtool_cmd *ecmd)
1108{
1109        struct axienet_local *lp = netdev_priv(ndev);
1110        struct phy_device *phydev = lp->phy_dev;
1111        if (!phydev)
1112                return -ENODEV;
1113        return phy_ethtool_sset(phydev, ecmd);
1114}
1115
1116/**
1117 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1118 * @ndev:       Pointer to net_device structure
1119 * @ed:         Pointer to ethtool_drvinfo structure
1120 *
1121 * This implements ethtool command for getting the driver information.
1122 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1123 */
1124static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1125                                         struct ethtool_drvinfo *ed)
1126{
1127        memset(ed, 0, sizeof(struct ethtool_drvinfo));
1128        strcpy(ed->driver, DRIVER_NAME);
1129        strcpy(ed->version, DRIVER_VERSION);
1130        ed->regdump_len = sizeof(u32) * AXIENET_REGS_N;
1131}
1132
1133/**
1134 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1135 *                                 AxiEthernet core.
1136 * @ndev:       Pointer to net_device structure
1137 *
1138 * This implements ethtool command for getting the total register length
1139 * information.
1140 */
1141static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1142{
1143        return sizeof(u32) * AXIENET_REGS_N;
1144}
1145
1146/**
1147 * axienet_ethtools_get_regs - Dump the contents of all registers present
1148 *                             in AxiEthernet core.
1149 * @ndev:       Pointer to net_device structure
1150 * @regs:       Pointer to ethtool_regs structure
1151 * @ret:        Void pointer used to return the contents of the registers.
1152 *
1153 * This implements ethtool command for getting the Axi Ethernet register dump.
1154 * Issue "ethtool -d ethX" to execute this function.
1155 */
1156static void axienet_ethtools_get_regs(struct net_device *ndev,
1157                                      struct ethtool_regs *regs, void *ret)
1158{
1159        u32 *data = (u32 *) ret;
1160        size_t len = sizeof(u32) * AXIENET_REGS_N;
1161        struct axienet_local *lp = netdev_priv(ndev);
1162
1163        regs->version = 0;
1164        regs->len = len;
1165
1166        memset(data, 0, len);
1167        data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1168        data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1169        data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1170        data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1171        data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1172        data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1173        data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1174        data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1175        data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1176        data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1177        data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1178        data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1179        data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1180        data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1181        data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1182        data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1183        data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1184        data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1185        data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1186        data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1187        data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1188        data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1189        data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1190        data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET);
1191        data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET);
1192        data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET);
1193        data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET);
1194        data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1195        data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1196        data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1197        data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1198        data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1199}
1200
1201/**
1202 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1203 *                                   Tx and Rx paths.
1204 * @ndev:       Pointer to net_device structure
1205 * @epauseparm: Pointer to ethtool_pauseparam structure.
1206 *
1207 * This implements ethtool command for getting axi ethernet pause frame
1208 * setting. Issue "ethtool -a ethX" to execute this function.
1209 */
1210static void
1211axienet_ethtools_get_pauseparam(struct net_device *ndev,
1212                                struct ethtool_pauseparam *epauseparm)
1213{
1214        u32 regval;
1215        struct axienet_local *lp = netdev_priv(ndev);
1216        epauseparm->autoneg  = 0;
1217        regval = axienet_ior(lp, XAE_FCC_OFFSET);
1218        epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
1219        epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
1220}
1221
1222/**
1223 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1224 *                                   settings.
1225 * @ndev:       Pointer to net_device structure
1226 * @epauseparam:Pointer to ethtool_pauseparam structure
1227 *
1228 * This implements ethtool command for enabling flow control on Rx and Tx
1229 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1230 * function.
1231 */
1232static int
1233axienet_ethtools_set_pauseparam(struct net_device *ndev,
1234                                struct ethtool_pauseparam *epauseparm)
1235{
1236        u32 regval = 0;
1237        struct axienet_local *lp = netdev_priv(ndev);
1238
1239        if (netif_running(ndev)) {
1240                printk(KERN_ERR "%s: Please stop netif before applying "
1241                       "configruation\n", ndev->name);
1242                return -EFAULT;
1243        }
1244
1245        regval = axienet_ior(lp, XAE_FCC_OFFSET);
1246        if (epauseparm->tx_pause)
1247                regval |= XAE_FCC_FCTX_MASK;
1248        else
1249                regval &= ~XAE_FCC_FCTX_MASK;
1250        if (epauseparm->rx_pause)
1251                regval |= XAE_FCC_FCRX_MASK;
1252        else
1253                regval &= ~XAE_FCC_FCRX_MASK;
1254        axienet_iow(lp, XAE_FCC_OFFSET, regval);
1255
1256        return 0;
1257}
1258
1259/**
1260 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1261 * @ndev:       Pointer to net_device structure
1262 * @ecoalesce:  Pointer to ethtool_coalesce structure
1263 *
1264 * This implements ethtool command for getting the DMA interrupt coalescing
1265 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1266 * execute this function.
1267 */
1268static int axienet_ethtools_get_coalesce(struct net_device *ndev,
1269                                         struct ethtool_coalesce *ecoalesce)
1270{
1271        u32 regval = 0;
1272        struct axienet_local *lp = netdev_priv(ndev);
1273        regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1274        ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1275                                             >> XAXIDMA_COALESCE_SHIFT;
1276        regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1277        ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
1278                                             >> XAXIDMA_COALESCE_SHIFT;
1279        return 0;
1280}
1281
1282/**
1283 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1284 * @ndev:       Pointer to net_device structure
1285 * @ecoalesce:  Pointer to ethtool_coalesce structure
1286 *
1287 * This implements ethtool command for setting the DMA interrupt coalescing
1288 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1289 * prompt to execute this function.
1290 */
1291static int axienet_ethtools_set_coalesce(struct net_device *ndev,
1292                                         struct ethtool_coalesce *ecoalesce)
1293{
1294        struct axienet_local *lp = netdev_priv(ndev);
1295
1296        if (netif_running(ndev)) {
1297                printk(KERN_ERR "%s: Please stop netif before applying "
1298                       "configruation\n", ndev->name);
1299                return -EFAULT;
1300        }
1301
1302        if ((ecoalesce->rx_coalesce_usecs) ||
1303            (ecoalesce->rx_coalesce_usecs_irq) ||
1304            (ecoalesce->rx_max_coalesced_frames_irq) ||
1305            (ecoalesce->tx_coalesce_usecs) ||
1306            (ecoalesce->tx_coalesce_usecs_irq) ||
1307            (ecoalesce->tx_max_coalesced_frames_irq) ||
1308            (ecoalesce->stats_block_coalesce_usecs) ||
1309            (ecoalesce->use_adaptive_rx_coalesce) ||
1310            (ecoalesce->use_adaptive_tx_coalesce) ||
1311            (ecoalesce->pkt_rate_low) ||
1312            (ecoalesce->rx_coalesce_usecs_low) ||
1313            (ecoalesce->rx_max_coalesced_frames_low) ||
1314            (ecoalesce->tx_coalesce_usecs_low) ||
1315            (ecoalesce->tx_max_coalesced_frames_low) ||
1316            (ecoalesce->pkt_rate_high) ||
1317            (ecoalesce->rx_coalesce_usecs_high) ||
1318            (ecoalesce->rx_max_coalesced_frames_high) ||
1319            (ecoalesce->tx_coalesce_usecs_high) ||
1320            (ecoalesce->tx_max_coalesced_frames_high) ||
1321            (ecoalesce->rate_sample_interval))
1322                return -EOPNOTSUPP;
1323        if (ecoalesce->rx_max_coalesced_frames)
1324                lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1325        if (ecoalesce->tx_max_coalesced_frames)
1326                lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1327
1328        return 0;
1329}
1330
1331static struct ethtool_ops axienet_ethtool_ops = {
1332        .get_settings   = axienet_ethtools_get_settings,
1333        .set_settings   = axienet_ethtools_set_settings,
1334        .get_drvinfo    = axienet_ethtools_get_drvinfo,
1335        .get_regs_len   = axienet_ethtools_get_regs_len,
1336        .get_regs       = axienet_ethtools_get_regs,
1337        .get_link       = ethtool_op_get_link,
1338        .get_pauseparam = axienet_ethtools_get_pauseparam,
1339        .set_pauseparam = axienet_ethtools_set_pauseparam,
1340        .get_coalesce   = axienet_ethtools_get_coalesce,
1341        .set_coalesce   = axienet_ethtools_set_coalesce,
1342};
1343
1344/**
1345 * axienet_dma_err_handler - Tasklet handler for Axi DMA Error
1346 * @data:       Data passed
1347 *
1348 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1349 * Tx/Rx BDs.
1350 */
1351static void axienet_dma_err_handler(unsigned long data)
1352{
1353        u32 axienet_status;
1354        u32 cr, i;
1355        int mdio_mcreg;
1356        struct axienet_local *lp = (struct axienet_local *) data;
1357        struct net_device *ndev = lp->ndev;
1358        struct axidma_bd *cur_p;
1359
1360        axienet_setoptions(ndev, lp->options &
1361                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1362        mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1363        axienet_mdio_wait_until_ready(lp);
1364        /* Disable the MDIO interface till Axi Ethernet Reset is completed.
1365         * When we do an Axi Ethernet reset, it resets the complete core
1366         * including the MDIO. So if MDIO is not disabled when the reset
1367         * process is started, MDIO will be broken afterwards. */
1368        axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
1369                    ~XAE_MDIO_MC_MDIOEN_MASK));
1370
1371        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
1372        __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
1373
1374        axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
1375        axienet_mdio_wait_until_ready(lp);
1376
1377        for (i = 0; i < TX_BD_NUM; i++) {
1378                cur_p = &lp->tx_bd_v[i];
1379                if (cur_p->phys)
1380                        dma_unmap_single(ndev->dev.parent, cur_p->phys,
1381                                         (cur_p->cntrl &
1382                                          XAXIDMA_BD_CTRL_LENGTH_MASK),
1383                                         DMA_TO_DEVICE);
1384                if (cur_p->app4)
1385                        dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
1386                cur_p->phys = 0;
1387                cur_p->cntrl = 0;
1388                cur_p->status = 0;
1389                cur_p->app0 = 0;
1390                cur_p->app1 = 0;
1391                cur_p->app2 = 0;
1392                cur_p->app3 = 0;
1393                cur_p->app4 = 0;
1394                cur_p->sw_id_offset = 0;
1395        }
1396
1397        for (i = 0; i < RX_BD_NUM; i++) {
1398                cur_p = &lp->rx_bd_v[i];
1399                cur_p->status = 0;
1400                cur_p->app0 = 0;
1401                cur_p->app1 = 0;
1402                cur_p->app2 = 0;
1403                cur_p->app3 = 0;
1404                cur_p->app4 = 0;
1405        }
1406
1407        lp->tx_bd_ci = 0;
1408        lp->tx_bd_tail = 0;
1409        lp->rx_bd_ci = 0;
1410
1411        /* Start updating the Rx channel control register */
1412        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1413        /* Update the interrupt coalesce count */
1414        cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
1415              (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1416        /* Update the delay timer count */
1417        cr = ((cr & ~XAXIDMA_DELAY_MASK) |
1418              (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1419        /* Enable coalesce, delay timer and error interrupts */
1420        cr |= XAXIDMA_IRQ_ALL_MASK;
1421        /* Finally write to the Rx channel control register */
1422        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1423
1424        /* Start updating the Tx channel control register */
1425        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1426        /* Update the interrupt coalesce count */
1427        cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
1428              (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
1429        /* Update the delay timer count */
1430        cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
1431              (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
1432        /* Enable coalesce, delay timer and error interrupts */
1433        cr |= XAXIDMA_IRQ_ALL_MASK;
1434        /* Finally write to the Tx channel control register */
1435        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1436
1437        /* Populate the tail pointer and bring the Rx Axi DMA engine out of
1438         * halted state. This will make the Rx side ready for reception.*/
1439        axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
1440        cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1441        axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
1442                          cr | XAXIDMA_CR_RUNSTOP_MASK);
1443        axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
1444                          (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1445
1446        /* Write to the RS (Run-stop) bit in the Tx channel control register.
1447         * Tx channel is now ready to run. But only after we write to the
1448         * tail pointer register that the Tx channel will start transmitting */
1449        axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
1450        cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1451        axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
1452                          cr | XAXIDMA_CR_RUNSTOP_MASK);
1453
1454        axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1455        axienet_status &= ~XAE_RCW1_RX_MASK;
1456        axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1457
1458        axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1459        if (axienet_status & XAE_INT_RXRJECT_MASK)
1460                axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1461        axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1462
1463        /* Sync default options with HW but leave receiver and
1464         * transmitter disabled.*/
1465        axienet_setoptions(ndev, lp->options &
1466                           ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1467        axienet_set_mac_address(ndev, NULL);
1468        axienet_set_multicast_list(ndev);
1469        axienet_setoptions(ndev, lp->options);
1470}
1471
1472/**
1473 * axienet_of_probe - Axi Ethernet probe function.
1474 * @op:         Pointer to platform device structure.
1475 * @match:      Pointer to device id structure
1476 *
1477 * returns: 0, on success
1478 *          Non-zero error value on failure.
1479 *
1480 * This is the probe routine for Axi Ethernet driver. This is called before
1481 * any other driver routines are invoked. It allocates and sets up the Ethernet
1482 * device. Parses through device tree and populates fields of
1483 * axienet_local. It registers the Ethernet device.
1484 */
1485static int axienet_of_probe(struct platform_device *op)
1486{
1487        __be32 *p;
1488        int size, ret = 0;
1489        struct device_node *np;
1490        struct axienet_local *lp;
1491        struct net_device *ndev;
1492        const void *addr;
1493
1494        ndev = alloc_etherdev(sizeof(*lp));
1495        if (!ndev)
1496                return -ENOMEM;
1497
1498        ether_setup(ndev);
1499        dev_set_drvdata(&op->dev, ndev);
1500
1501        SET_NETDEV_DEV(ndev, &op->dev);
1502        ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1503        ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1504        ndev->netdev_ops = &axienet_netdev_ops;
1505        ndev->ethtool_ops = &axienet_ethtool_ops;
1506
1507        lp = netdev_priv(ndev);
1508        lp->ndev = ndev;
1509        lp->dev = &op->dev;
1510        lp->options = XAE_OPTION_DEFAULTS;
1511        /* Map device registers */
1512        lp->regs = of_iomap(op->dev.of_node, 0);
1513        if (!lp->regs) {
1514                dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
1515                goto nodev;
1516        }
1517        /* Setup checksum offload, but default to off if not specified */
1518        lp->features = 0;
1519
1520        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1521        if (p) {
1522                switch (be32_to_cpup(p)) {
1523                case 1:
1524                        lp->csum_offload_on_tx_path =
1525                                XAE_FEATURE_PARTIAL_TX_CSUM;
1526                        lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1527                        /* Can checksum TCP/UDP over IPv4. */
1528                        ndev->features |= NETIF_F_IP_CSUM;
1529                        break;
1530                case 2:
1531                        lp->csum_offload_on_tx_path =
1532                                XAE_FEATURE_FULL_TX_CSUM;
1533                        lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1534                        /* Can checksum TCP/UDP over IPv4. */
1535                        ndev->features |= NETIF_F_IP_CSUM;
1536                        break;
1537                default:
1538                        lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1539                }
1540        }
1541        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1542        if (p) {
1543                switch (be32_to_cpup(p)) {
1544                case 1:
1545                        lp->csum_offload_on_rx_path =
1546                                XAE_FEATURE_PARTIAL_RX_CSUM;
1547                        lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1548                        break;
1549                case 2:
1550                        lp->csum_offload_on_rx_path =
1551                                XAE_FEATURE_FULL_RX_CSUM;
1552                        lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1553                        break;
1554                default:
1555                        lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1556                }
1557        }
1558        /* For supporting jumbo frames, the Axi Ethernet hardware must have
1559         * a larger Rx/Tx Memory. Typically, the size must be more than or
1560         * equal to 16384 bytes, so that we can enable jumbo option and start
1561         * supporting jumbo frames. Here we check for memory allocated for
1562         * Rx/Tx in the hardware from the device-tree and accordingly set
1563         * flags. */
1564        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL);
1565        if (p) {
1566                if ((be32_to_cpup(p)) >= 0x4000)
1567                        lp->jumbo_support = 1;
1568        }
1569        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
1570                                       NULL);
1571        if (p)
1572                lp->temac_type = be32_to_cpup(p);
1573        p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
1574        if (p)
1575                lp->phy_type = be32_to_cpup(p);
1576
1577        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1578        np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
1579        if (!np) {
1580                dev_err(&op->dev, "could not find DMA node\n");
1581                goto err_iounmap;
1582        }
1583        lp->dma_regs = of_iomap(np, 0);
1584        if (lp->dma_regs) {
1585                dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs);
1586        } else {
1587                dev_err(&op->dev, "unable to map DMA registers\n");
1588                of_node_put(np);
1589        }
1590        lp->rx_irq = irq_of_parse_and_map(np, 1);
1591        lp->tx_irq = irq_of_parse_and_map(np, 0);
1592        of_node_put(np);
1593        if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
1594                dev_err(&op->dev, "could not determine irqs\n");
1595                ret = -ENOMEM;
1596                goto err_iounmap_2;
1597        }
1598
1599        /* Retrieve the MAC address */
1600        addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1601        if ((!addr) || (size != 6)) {
1602                dev_err(&op->dev, "could not find MAC address\n");
1603                ret = -ENODEV;
1604                goto err_iounmap_2;
1605        }
1606        axienet_set_mac_address(ndev, (void *) addr);
1607
1608        lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1609        lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
1610
1611        lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1612        ret = axienet_mdio_setup(lp, op->dev.of_node);
1613        if (ret)
1614                dev_warn(&op->dev, "error registering MDIO bus\n");
1615
1616        ret = register_netdev(lp->ndev);
1617        if (ret) {
1618                dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
1619                goto err_iounmap_2;
1620        }
1621
1622        return 0;
1623
1624err_iounmap_2:
1625        if (lp->dma_regs)
1626                iounmap(lp->dma_regs);
1627err_iounmap:
1628        iounmap(lp->regs);
1629nodev:
1630        free_netdev(ndev);
1631        ndev = NULL;
1632        return ret;
1633}
1634
1635static int axienet_of_remove(struct platform_device *op)
1636{
1637        struct net_device *ndev = dev_get_drvdata(&op->dev);
1638        struct axienet_local *lp = netdev_priv(ndev);
1639
1640        axienet_mdio_teardown(lp);
1641        unregister_netdev(ndev);
1642
1643        if (lp->phy_node)
1644                of_node_put(lp->phy_node);
1645        lp->phy_node = NULL;
1646
1647        dev_set_drvdata(&op->dev, NULL);
1648
1649        iounmap(lp->regs);
1650        if (lp->dma_regs)
1651                iounmap(lp->dma_regs);
1652        free_netdev(ndev);
1653
1654        return 0;
1655}
1656
1657static struct platform_driver axienet_of_driver = {
1658        .probe = axienet_of_probe,
1659        .remove = axienet_of_remove,
1660        .driver = {
1661                 .owner = THIS_MODULE,
1662                 .name = "xilinx_axienet",
1663                 .of_match_table = axienet_of_match,
1664        },
1665};
1666
1667module_platform_driver(axienet_of_driver);
1668
1669MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
1670MODULE_AUTHOR("Xilinx");
1671MODULE_LICENSE("GPL");
1672