linux/drivers/net/ethernet/xilinx/ll_temac_main.c
<<
>>
Prefs
   1/*
   2 * Driver for Xilinx TEMAC Ethernet device
   3 *
   4 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   5 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   7 *
   8 * This is a driver for the Xilinx ll_temac ipcore which is often used
   9 * in the Virtex and Spartan series of chips.
  10 *
  11 * Notes:
  12 * - The ll_temac hardware uses indirect access for many of the TEMAC
  13 *   registers, include the MDIO bus.  However, indirect access to MDIO
  14 *   registers take considerably more clock cycles than to TEMAC registers.
  15 *   MDIO accesses are long, so threads doing them should probably sleep
  16 *   rather than busywait.  However, since only one indirect access can be
  17 *   in progress at any given time, that means that *all* indirect accesses
  18 *   could end up sleeping (to wait for an MDIO access to complete).
  19 *   Fortunately none of the indirect accesses are on the 'hot' path for tx
  20 *   or rx, so this should be okay.
  21 *
  22 * TODO:
  23 * - Factor out locallink DMA code into separate driver
  24 * - Fix multicast assignment.
  25 * - Fix support for hardware checksumming.
  26 * - Testing.  Lots and lots of testing.
  27 *
  28 */
  29
  30#include <linux/delay.h>
  31#include <linux/etherdevice.h>
  32#include <linux/init.h>
  33#include <linux/mii.h>
  34#include <linux/module.h>
  35#include <linux/mutex.h>
  36#include <linux/netdevice.h>
  37#include <linux/of.h>
  38#include <linux/of_device.h>
  39#include <linux/of_mdio.h>
  40#include <linux/of_platform.h>
  41#include <linux/of_address.h>
  42#include <linux/skbuff.h>
  43#include <linux/spinlock.h>
  44#include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
  45#include <linux/udp.h>      /* needed for sizeof(udphdr) */
  46#include <linux/phy.h>
  47#include <linux/in.h>
  48#include <linux/io.h>
  49#include <linux/ip.h>
  50#include <linux/slab.h>
  51#include <linux/interrupt.h>
  52#include <linux/dma-mapping.h>
  53
  54#include "ll_temac.h"
  55
  56#define TX_BD_NUM   64
  57#define RX_BD_NUM   128
  58
  59/* ---------------------------------------------------------------------
  60 * Low level register access functions
  61 */
  62
  63u32 temac_ior(struct temac_local *lp, int offset)
  64{
  65        return in_be32((u32 *)(lp->regs + offset));
  66}
  67
  68void temac_iow(struct temac_local *lp, int offset, u32 value)
  69{
  70        out_be32((u32 *) (lp->regs + offset), value);
  71}
  72
  73int temac_indirect_busywait(struct temac_local *lp)
  74{
  75        long end = jiffies + 2;
  76
  77        while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
  78                if (end - jiffies <= 0) {
  79                        WARN_ON(1);
  80                        return -ETIMEDOUT;
  81                }
  82                msleep(1);
  83        }
  84        return 0;
  85}
  86
  87/**
  88 * temac_indirect_in32
  89 *
  90 * lp->indirect_mutex must be held when calling this function
  91 */
  92u32 temac_indirect_in32(struct temac_local *lp, int reg)
  93{
  94        u32 val;
  95
  96        if (temac_indirect_busywait(lp))
  97                return -ETIMEDOUT;
  98        temac_iow(lp, XTE_CTL0_OFFSET, reg);
  99        if (temac_indirect_busywait(lp))
 100                return -ETIMEDOUT;
 101        val = temac_ior(lp, XTE_LSW0_OFFSET);
 102
 103        return val;
 104}
 105
 106/**
 107 * temac_indirect_out32
 108 *
 109 * lp->indirect_mutex must be held when calling this function
 110 */
 111void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
 112{
 113        if (temac_indirect_busywait(lp))
 114                return;
 115        temac_iow(lp, XTE_LSW0_OFFSET, value);
 116        temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
 117        temac_indirect_busywait(lp);
 118}
 119
 120/**
 121 * temac_dma_in32 - Memory mapped DMA read, this function expects a
 122 * register input that is based on DCR word addresses which
 123 * are then converted to memory mapped byte addresses
 124 */
 125static u32 temac_dma_in32(struct temac_local *lp, int reg)
 126{
 127        return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
 128}
 129
 130/**
 131 * temac_dma_out32 - Memory mapped DMA read, this function expects a
 132 * register input that is based on DCR word addresses which
 133 * are then converted to memory mapped byte addresses
 134 */
 135static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
 136{
 137        out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
 138}
 139
 140/* DMA register access functions can be DCR based or memory mapped.
 141 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
 142 * memory mapped.
 143 */
 144#ifdef CONFIG_PPC_DCR
 145
 146/**
 147 * temac_dma_dcr_in32 - DCR based DMA read
 148 */
 149static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
 150{
 151        return dcr_read(lp->sdma_dcrs, reg);
 152}
 153
 154/**
 155 * temac_dma_dcr_out32 - DCR based DMA write
 156 */
 157static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
 158{
 159        dcr_write(lp->sdma_dcrs, reg, value);
 160}
 161
 162/**
 163 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
 164 * I/O  functions
 165 */
 166static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
 167                                struct device_node *np)
 168{
 169        unsigned int dcrs;
 170
 171        /* setup the dcr address mapping if it's in the device tree */
 172
 173        dcrs = dcr_resource_start(np, 0);
 174        if (dcrs != 0) {
 175                lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
 176                lp->dma_in = temac_dma_dcr_in;
 177                lp->dma_out = temac_dma_dcr_out;
 178                dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
 179                return 0;
 180        }
 181        /* no DCR in the device tree, indicate a failure */
 182        return -1;
 183}
 184
 185#else
 186
 187/*
 188 * temac_dcr_setup - This is a stub for when DCR is not supported,
 189 * such as with MicroBlaze
 190 */
 191static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
 192                                struct device_node *np)
 193{
 194        return -1;
 195}
 196
 197#endif
 198
 199/**
 200 * temac_dma_bd_release - Release buffer descriptor rings
 201 */
 202static void temac_dma_bd_release(struct net_device *ndev)
 203{
 204        struct temac_local *lp = netdev_priv(ndev);
 205        int i;
 206
 207        /* Reset Local Link (DMA) */
 208        lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
 209
 210        for (i = 0; i < RX_BD_NUM; i++) {
 211                if (!lp->rx_skb[i])
 212                        break;
 213                else {
 214                        dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
 215                                        XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
 216                        dev_kfree_skb(lp->rx_skb[i]);
 217                }
 218        }
 219        if (lp->rx_bd_v)
 220                dma_free_coherent(ndev->dev.parent,
 221                                sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 222                                lp->rx_bd_v, lp->rx_bd_p);
 223        if (lp->tx_bd_v)
 224                dma_free_coherent(ndev->dev.parent,
 225                                sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 226                                lp->tx_bd_v, lp->tx_bd_p);
 227        if (lp->rx_skb)
 228                kfree(lp->rx_skb);
 229}
 230
 231/**
 232 * temac_dma_bd_init - Setup buffer descriptor rings
 233 */
 234static int temac_dma_bd_init(struct net_device *ndev)
 235{
 236        struct temac_local *lp = netdev_priv(ndev);
 237        struct sk_buff *skb;
 238        int i;
 239
 240        lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
 241        if (!lp->rx_skb) {
 242                dev_err(&ndev->dev,
 243                                "can't allocate memory for DMA RX buffer\n");
 244                goto out;
 245        }
 246        /* allocate the tx and rx ring buffer descriptors. */
 247        /* returns a virtual address and a physical address. */
 248        lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 249                                         sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 250                                         &lp->tx_bd_p, GFP_KERNEL);
 251        if (!lp->tx_bd_v) {
 252                dev_err(&ndev->dev,
 253                                "unable to allocate DMA TX buffer descriptors");
 254                goto out;
 255        }
 256        lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 257                                         sizeof(*lp->rx_bd_v) * RX_BD_NUM,
 258                                         &lp->rx_bd_p, GFP_KERNEL);
 259        if (!lp->rx_bd_v) {
 260                dev_err(&ndev->dev,
 261                                "unable to allocate DMA RX buffer descriptors");
 262                goto out;
 263        }
 264
 265        memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
 266        for (i = 0; i < TX_BD_NUM; i++) {
 267                lp->tx_bd_v[i].next = lp->tx_bd_p +
 268                                sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
 269        }
 270
 271        memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
 272        for (i = 0; i < RX_BD_NUM; i++) {
 273                lp->rx_bd_v[i].next = lp->rx_bd_p +
 274                                sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
 275
 276                skb = netdev_alloc_skb_ip_align(ndev,
 277                                                XTE_MAX_JUMBO_FRAME_SIZE);
 278
 279                if (skb == 0) {
 280                        dev_err(&ndev->dev, "alloc_skb error %d\n", i);
 281                        goto out;
 282                }
 283                lp->rx_skb[i] = skb;
 284                /* returns physical address of skb->data */
 285                lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
 286                                                     skb->data,
 287                                                     XTE_MAX_JUMBO_FRAME_SIZE,
 288                                                     DMA_FROM_DEVICE);
 289                lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
 290                lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
 291        }
 292
 293        lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
 294                                          CHNL_CTRL_IRQ_EN |
 295                                          CHNL_CTRL_IRQ_DLY_EN |
 296                                          CHNL_CTRL_IRQ_COAL_EN);
 297        /* 0x10220483 */
 298        /* 0x00100483 */
 299        lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
 300                                          CHNL_CTRL_IRQ_EN |
 301                                          CHNL_CTRL_IRQ_DLY_EN |
 302                                          CHNL_CTRL_IRQ_COAL_EN |
 303                                          CHNL_CTRL_IRQ_IOE);
 304        /* 0xff010283 */
 305
 306        lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
 307        lp->dma_out(lp, RX_TAILDESC_PTR,
 308                       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
 309        lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
 310
 311        return 0;
 312
 313out:
 314        temac_dma_bd_release(ndev);
 315        return -ENOMEM;
 316}
 317
 318/* ---------------------------------------------------------------------
 319 * net_device_ops
 320 */
 321
 322static int temac_set_mac_address(struct net_device *ndev, void *address)
 323{
 324        struct temac_local *lp = netdev_priv(ndev);
 325
 326        if (address)
 327                memcpy(ndev->dev_addr, address, ETH_ALEN);
 328
 329        if (!is_valid_ether_addr(ndev->dev_addr))
 330                eth_hw_addr_random(ndev);
 331        else
 332                ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
 333
 334        /* set up unicast MAC address filter set its mac address */
 335        mutex_lock(&lp->indirect_mutex);
 336        temac_indirect_out32(lp, XTE_UAW0_OFFSET,
 337                             (ndev->dev_addr[0]) |
 338                             (ndev->dev_addr[1] << 8) |
 339                             (ndev->dev_addr[2] << 16) |
 340                             (ndev->dev_addr[3] << 24));
 341        /* There are reserved bits in EUAW1
 342         * so don't affect them Set MAC bits [47:32] in EUAW1 */
 343        temac_indirect_out32(lp, XTE_UAW1_OFFSET,
 344                             (ndev->dev_addr[4] & 0x000000ff) |
 345                             (ndev->dev_addr[5] << 8));
 346        mutex_unlock(&lp->indirect_mutex);
 347
 348        return 0;
 349}
 350
 351static int netdev_set_mac_address(struct net_device *ndev, void *p)
 352{
 353        struct sockaddr *addr = p;
 354
 355        return temac_set_mac_address(ndev, addr->sa_data);
 356}
 357
 358static void temac_set_multicast_list(struct net_device *ndev)
 359{
 360        struct temac_local *lp = netdev_priv(ndev);
 361        u32 multi_addr_msw, multi_addr_lsw, val;
 362        int i;
 363
 364        mutex_lock(&lp->indirect_mutex);
 365        if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
 366            netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
 367                /*
 368                 *      We must make the kernel realise we had to move
 369                 *      into promisc mode or we start all out war on
 370                 *      the cable. If it was a promisc request the
 371                 *      flag is already set. If not we assert it.
 372                 */
 373                ndev->flags |= IFF_PROMISC;
 374                temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
 375                dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 376        } else if (!netdev_mc_empty(ndev)) {
 377                struct netdev_hw_addr *ha;
 378
 379                i = 0;
 380                netdev_for_each_mc_addr(ha, ndev) {
 381                        if (i >= MULTICAST_CAM_TABLE_NUM)
 382                                break;
 383                        multi_addr_msw = ((ha->addr[3] << 24) |
 384                                          (ha->addr[2] << 16) |
 385                                          (ha->addr[1] << 8) |
 386                                          (ha->addr[0]));
 387                        temac_indirect_out32(lp, XTE_MAW0_OFFSET,
 388                                             multi_addr_msw);
 389                        multi_addr_lsw = ((ha->addr[5] << 8) |
 390                                          (ha->addr[4]) | (i << 16));
 391                        temac_indirect_out32(lp, XTE_MAW1_OFFSET,
 392                                             multi_addr_lsw);
 393                        i++;
 394                }
 395        } else {
 396                val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
 397                temac_indirect_out32(lp, XTE_AFM_OFFSET,
 398                                     val & ~XTE_AFM_EPPRM_MASK);
 399                temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
 400                temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
 401                dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 402        }
 403        mutex_unlock(&lp->indirect_mutex);
 404}
 405
 406struct temac_option {
 407        int flg;
 408        u32 opt;
 409        u32 reg;
 410        u32 m_or;
 411        u32 m_and;
 412} temac_options[] = {
 413        /* Turn on jumbo packet support for both Rx and Tx */
 414        {
 415                .opt = XTE_OPTION_JUMBO,
 416                .reg = XTE_TXC_OFFSET,
 417                .m_or = XTE_TXC_TXJMBO_MASK,
 418        },
 419        {
 420                .opt = XTE_OPTION_JUMBO,
 421                .reg = XTE_RXC1_OFFSET,
 422                .m_or =XTE_RXC1_RXJMBO_MASK,
 423        },
 424        /* Turn on VLAN packet support for both Rx and Tx */
 425        {
 426                .opt = XTE_OPTION_VLAN,
 427                .reg = XTE_TXC_OFFSET,
 428                .m_or =XTE_TXC_TXVLAN_MASK,
 429        },
 430        {
 431                .opt = XTE_OPTION_VLAN,
 432                .reg = XTE_RXC1_OFFSET,
 433                .m_or =XTE_RXC1_RXVLAN_MASK,
 434        },
 435        /* Turn on FCS stripping on receive packets */
 436        {
 437                .opt = XTE_OPTION_FCS_STRIP,
 438                .reg = XTE_RXC1_OFFSET,
 439                .m_or =XTE_RXC1_RXFCS_MASK,
 440        },
 441        /* Turn on FCS insertion on transmit packets */
 442        {
 443                .opt = XTE_OPTION_FCS_INSERT,
 444                .reg = XTE_TXC_OFFSET,
 445                .m_or =XTE_TXC_TXFCS_MASK,
 446        },
 447        /* Turn on length/type field checking on receive packets */
 448        {
 449                .opt = XTE_OPTION_LENTYPE_ERR,
 450                .reg = XTE_RXC1_OFFSET,
 451                .m_or =XTE_RXC1_RXLT_MASK,
 452        },
 453        /* Turn on flow control */
 454        {
 455                .opt = XTE_OPTION_FLOW_CONTROL,
 456                .reg = XTE_FCC_OFFSET,
 457                .m_or =XTE_FCC_RXFLO_MASK,
 458        },
 459        /* Turn on flow control */
 460        {
 461                .opt = XTE_OPTION_FLOW_CONTROL,
 462                .reg = XTE_FCC_OFFSET,
 463                .m_or =XTE_FCC_TXFLO_MASK,
 464        },
 465        /* Turn on promiscuous frame filtering (all frames are received ) */
 466        {
 467                .opt = XTE_OPTION_PROMISC,
 468                .reg = XTE_AFM_OFFSET,
 469                .m_or =XTE_AFM_EPPRM_MASK,
 470        },
 471        /* Enable transmitter if not already enabled */
 472        {
 473                .opt = XTE_OPTION_TXEN,
 474                .reg = XTE_TXC_OFFSET,
 475                .m_or =XTE_TXC_TXEN_MASK,
 476        },
 477        /* Enable receiver? */
 478        {
 479                .opt = XTE_OPTION_RXEN,
 480                .reg = XTE_RXC1_OFFSET,
 481                .m_or =XTE_RXC1_RXEN_MASK,
 482        },
 483        {}
 484};
 485
 486/**
 487 * temac_setoptions
 488 */
 489static u32 temac_setoptions(struct net_device *ndev, u32 options)
 490{
 491        struct temac_local *lp = netdev_priv(ndev);
 492        struct temac_option *tp = &temac_options[0];
 493        int reg;
 494
 495        mutex_lock(&lp->indirect_mutex);
 496        while (tp->opt) {
 497                reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
 498                if (options & tp->opt)
 499                        reg |= tp->m_or;
 500                temac_indirect_out32(lp, tp->reg, reg);
 501                tp++;
 502        }
 503        lp->options |= options;
 504        mutex_unlock(&lp->indirect_mutex);
 505
 506        return 0;
 507}
 508
 509/* Initialize temac */
 510static void temac_device_reset(struct net_device *ndev)
 511{
 512        struct temac_local *lp = netdev_priv(ndev);
 513        u32 timeout;
 514        u32 val;
 515
 516        /* Perform a software reset */
 517
 518        /* 0x300 host enable bit ? */
 519        /* reset PHY through control register ?:1 */
 520
 521        dev_dbg(&ndev->dev, "%s()\n", __func__);
 522
 523        mutex_lock(&lp->indirect_mutex);
 524        /* Reset the receiver and wait for it to finish reset */
 525        temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
 526        timeout = 1000;
 527        while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
 528                udelay(1);
 529                if (--timeout == 0) {
 530                        dev_err(&ndev->dev,
 531                                "temac_device_reset RX reset timeout!!\n");
 532                        break;
 533                }
 534        }
 535
 536        /* Reset the transmitter and wait for it to finish reset */
 537        temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
 538        timeout = 1000;
 539        while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
 540                udelay(1);
 541                if (--timeout == 0) {
 542                        dev_err(&ndev->dev,
 543                                "temac_device_reset TX reset timeout!!\n");
 544                        break;
 545                }
 546        }
 547
 548        /* Disable the receiver */
 549        val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
 550        temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
 551
 552        /* Reset Local Link (DMA) */
 553        lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
 554        timeout = 1000;
 555        while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
 556                udelay(1);
 557                if (--timeout == 0) {
 558                        dev_err(&ndev->dev,
 559                                "temac_device_reset DMA reset timeout!!\n");
 560                        break;
 561                }
 562        }
 563        lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
 564
 565        if (temac_dma_bd_init(ndev)) {
 566                dev_err(&ndev->dev,
 567                                "temac_device_reset descriptor allocation failed\n");
 568        }
 569
 570        temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
 571        temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
 572        temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
 573        temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
 574
 575        mutex_unlock(&lp->indirect_mutex);
 576
 577        /* Sync default options with HW
 578         * but leave receiver and transmitter disabled.  */
 579        temac_setoptions(ndev,
 580                         lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
 581
 582        temac_set_mac_address(ndev, NULL);
 583
 584        /* Set address filter table */
 585        temac_set_multicast_list(ndev);
 586        if (temac_setoptions(ndev, lp->options))
 587                dev_err(&ndev->dev, "Error setting TEMAC options\n");
 588
 589        /* Init Driver variable */
 590        ndev->trans_start = jiffies; /* prevent tx timeout */
 591}
 592
 593void temac_adjust_link(struct net_device *ndev)
 594{
 595        struct temac_local *lp = netdev_priv(ndev);
 596        struct phy_device *phy = lp->phy_dev;
 597        u32 mii_speed;
 598        int link_state;
 599
 600        /* hash together the state values to decide if something has changed */
 601        link_state = phy->speed | (phy->duplex << 1) | phy->link;
 602
 603        mutex_lock(&lp->indirect_mutex);
 604        if (lp->last_link != link_state) {
 605                mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
 606                mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
 607
 608                switch (phy->speed) {
 609                case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
 610                case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
 611                case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
 612                }
 613
 614                /* Write new speed setting out to TEMAC */
 615                temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
 616                lp->last_link = link_state;
 617                phy_print_status(phy);
 618        }
 619        mutex_unlock(&lp->indirect_mutex);
 620}
 621
 622static void temac_start_xmit_done(struct net_device *ndev)
 623{
 624        struct temac_local *lp = netdev_priv(ndev);
 625        struct cdmac_bd *cur_p;
 626        unsigned int stat = 0;
 627
 628        cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 629        stat = cur_p->app0;
 630
 631        while (stat & STS_CTRL_APP0_CMPLT) {
 632                dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
 633                                 DMA_TO_DEVICE);
 634                if (cur_p->app4)
 635                        dev_kfree_skb_irq((struct sk_buff *)cur_p->app4);
 636                cur_p->app0 = 0;
 637                cur_p->app1 = 0;
 638                cur_p->app2 = 0;
 639                cur_p->app3 = 0;
 640                cur_p->app4 = 0;
 641
 642                ndev->stats.tx_packets++;
 643                ndev->stats.tx_bytes += cur_p->len;
 644
 645                lp->tx_bd_ci++;
 646                if (lp->tx_bd_ci >= TX_BD_NUM)
 647                        lp->tx_bd_ci = 0;
 648
 649                cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 650                stat = cur_p->app0;
 651        }
 652
 653        netif_wake_queue(ndev);
 654}
 655
 656static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
 657{
 658        struct cdmac_bd *cur_p;
 659        int tail;
 660
 661        tail = lp->tx_bd_tail;
 662        cur_p = &lp->tx_bd_v[tail];
 663
 664        do {
 665                if (cur_p->app0)
 666                        return NETDEV_TX_BUSY;
 667
 668                tail++;
 669                if (tail >= TX_BD_NUM)
 670                        tail = 0;
 671
 672                cur_p = &lp->tx_bd_v[tail];
 673                num_frag--;
 674        } while (num_frag >= 0);
 675
 676        return 0;
 677}
 678
 679static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 680{
 681        struct temac_local *lp = netdev_priv(ndev);
 682        struct cdmac_bd *cur_p;
 683        dma_addr_t start_p, tail_p;
 684        int ii;
 685        unsigned long num_frag;
 686        skb_frag_t *frag;
 687
 688        num_frag = skb_shinfo(skb)->nr_frags;
 689        frag = &skb_shinfo(skb)->frags[0];
 690        start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 691        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 692
 693        if (temac_check_tx_bd_space(lp, num_frag)) {
 694                if (!netif_queue_stopped(ndev)) {
 695                        netif_stop_queue(ndev);
 696                        return NETDEV_TX_BUSY;
 697                }
 698                return NETDEV_TX_BUSY;
 699        }
 700
 701        cur_p->app0 = 0;
 702        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 703                unsigned int csum_start_off = skb_checksum_start_offset(skb);
 704                unsigned int csum_index_off = csum_start_off + skb->csum_offset;
 705
 706                cur_p->app0 |= 1; /* TX Checksum Enabled */
 707                cur_p->app1 = (csum_start_off << 16) | csum_index_off;
 708                cur_p->app2 = 0;  /* initial checksum seed */
 709        }
 710
 711        cur_p->app0 |= STS_CTRL_APP0_SOP;
 712        cur_p->len = skb_headlen(skb);
 713        cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
 714                                     DMA_TO_DEVICE);
 715        cur_p->app4 = (unsigned long)skb;
 716
 717        for (ii = 0; ii < num_frag; ii++) {
 718                lp->tx_bd_tail++;
 719                if (lp->tx_bd_tail >= TX_BD_NUM)
 720                        lp->tx_bd_tail = 0;
 721
 722                cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 723                cur_p->phys = dma_map_single(ndev->dev.parent,
 724                                             skb_frag_address(frag),
 725                                             skb_frag_size(frag), DMA_TO_DEVICE);
 726                cur_p->len = skb_frag_size(frag);
 727                cur_p->app0 = 0;
 728                frag++;
 729        }
 730        cur_p->app0 |= STS_CTRL_APP0_EOP;
 731
 732        tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 733        lp->tx_bd_tail++;
 734        if (lp->tx_bd_tail >= TX_BD_NUM)
 735                lp->tx_bd_tail = 0;
 736
 737        skb_tx_timestamp(skb);
 738
 739        /* Kick off the transfer */
 740        lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
 741
 742        return NETDEV_TX_OK;
 743}
 744
 745
 746static void ll_temac_recv(struct net_device *ndev)
 747{
 748        struct temac_local *lp = netdev_priv(ndev);
 749        struct sk_buff *skb, *new_skb;
 750        unsigned int bdstat;
 751        struct cdmac_bd *cur_p;
 752        dma_addr_t tail_p;
 753        int length;
 754        unsigned long flags;
 755
 756        spin_lock_irqsave(&lp->rx_lock, flags);
 757
 758        tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
 759        cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 760
 761        bdstat = cur_p->app0;
 762        while ((bdstat & STS_CTRL_APP0_CMPLT)) {
 763
 764                skb = lp->rx_skb[lp->rx_bd_ci];
 765                length = cur_p->app4 & 0x3FFF;
 766
 767                dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
 768                                 DMA_FROM_DEVICE);
 769
 770                skb_put(skb, length);
 771                skb->protocol = eth_type_trans(skb, ndev);
 772                skb_checksum_none_assert(skb);
 773
 774                /* if we're doing rx csum offload, set it up */
 775                if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
 776                        (skb->protocol == __constant_htons(ETH_P_IP)) &&
 777                        (skb->len > 64)) {
 778
 779                        skb->csum = cur_p->app3 & 0xFFFF;
 780                        skb->ip_summed = CHECKSUM_COMPLETE;
 781                }
 782
 783                if (!skb_defer_rx_timestamp(skb))
 784                        netif_rx(skb);
 785
 786                ndev->stats.rx_packets++;
 787                ndev->stats.rx_bytes += length;
 788
 789                new_skb = netdev_alloc_skb_ip_align(ndev,
 790                                                XTE_MAX_JUMBO_FRAME_SIZE);
 791
 792                if (new_skb == 0) {
 793                        dev_err(&ndev->dev, "no memory for new sk_buff\n");
 794                        spin_unlock_irqrestore(&lp->rx_lock, flags);
 795                        return;
 796                }
 797
 798                cur_p->app0 = STS_CTRL_APP0_IRQONEND;
 799                cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
 800                                             XTE_MAX_JUMBO_FRAME_SIZE,
 801                                             DMA_FROM_DEVICE);
 802                cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
 803                lp->rx_skb[lp->rx_bd_ci] = new_skb;
 804
 805                lp->rx_bd_ci++;
 806                if (lp->rx_bd_ci >= RX_BD_NUM)
 807                        lp->rx_bd_ci = 0;
 808
 809                cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
 810                bdstat = cur_p->app0;
 811        }
 812        lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
 813
 814        spin_unlock_irqrestore(&lp->rx_lock, flags);
 815}
 816
 817static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
 818{
 819        struct net_device *ndev = _ndev;
 820        struct temac_local *lp = netdev_priv(ndev);
 821        unsigned int status;
 822
 823        status = lp->dma_in(lp, TX_IRQ_REG);
 824        lp->dma_out(lp, TX_IRQ_REG, status);
 825
 826        if (status & (IRQ_COAL | IRQ_DLY))
 827                temac_start_xmit_done(lp->ndev);
 828        if (status & 0x080)
 829                dev_err(&ndev->dev, "DMA error 0x%x\n", status);
 830
 831        return IRQ_HANDLED;
 832}
 833
 834static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
 835{
 836        struct net_device *ndev = _ndev;
 837        struct temac_local *lp = netdev_priv(ndev);
 838        unsigned int status;
 839
 840        /* Read and clear the status registers */
 841        status = lp->dma_in(lp, RX_IRQ_REG);
 842        lp->dma_out(lp, RX_IRQ_REG, status);
 843
 844        if (status & (IRQ_COAL | IRQ_DLY))
 845                ll_temac_recv(lp->ndev);
 846
 847        return IRQ_HANDLED;
 848}
 849
 850static int temac_open(struct net_device *ndev)
 851{
 852        struct temac_local *lp = netdev_priv(ndev);
 853        int rc;
 854
 855        dev_dbg(&ndev->dev, "temac_open()\n");
 856
 857        if (lp->phy_node) {
 858                lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
 859                                             temac_adjust_link, 0, 0);
 860                if (!lp->phy_dev) {
 861                        dev_err(lp->dev, "of_phy_connect() failed\n");
 862                        return -ENODEV;
 863                }
 864
 865                phy_start(lp->phy_dev);
 866        }
 867
 868        temac_device_reset(ndev);
 869
 870        rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
 871        if (rc)
 872                goto err_tx_irq;
 873        rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
 874        if (rc)
 875                goto err_rx_irq;
 876
 877        return 0;
 878
 879 err_rx_irq:
 880        free_irq(lp->tx_irq, ndev);
 881 err_tx_irq:
 882        if (lp->phy_dev)
 883                phy_disconnect(lp->phy_dev);
 884        lp->phy_dev = NULL;
 885        dev_err(lp->dev, "request_irq() failed\n");
 886        return rc;
 887}
 888
 889static int temac_stop(struct net_device *ndev)
 890{
 891        struct temac_local *lp = netdev_priv(ndev);
 892
 893        dev_dbg(&ndev->dev, "temac_close()\n");
 894
 895        free_irq(lp->tx_irq, ndev);
 896        free_irq(lp->rx_irq, ndev);
 897
 898        if (lp->phy_dev)
 899                phy_disconnect(lp->phy_dev);
 900        lp->phy_dev = NULL;
 901
 902        temac_dma_bd_release(ndev);
 903
 904        return 0;
 905}
 906
 907#ifdef CONFIG_NET_POLL_CONTROLLER
 908static void
 909temac_poll_controller(struct net_device *ndev)
 910{
 911        struct temac_local *lp = netdev_priv(ndev);
 912
 913        disable_irq(lp->tx_irq);
 914        disable_irq(lp->rx_irq);
 915
 916        ll_temac_rx_irq(lp->tx_irq, ndev);
 917        ll_temac_tx_irq(lp->rx_irq, ndev);
 918
 919        enable_irq(lp->tx_irq);
 920        enable_irq(lp->rx_irq);
 921}
 922#endif
 923
 924static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 925{
 926        struct temac_local *lp = netdev_priv(ndev);
 927
 928        if (!netif_running(ndev))
 929                return -EINVAL;
 930
 931        if (!lp->phy_dev)
 932                return -EINVAL;
 933
 934        return phy_mii_ioctl(lp->phy_dev, rq, cmd);
 935}
 936
 937static const struct net_device_ops temac_netdev_ops = {
 938        .ndo_open = temac_open,
 939        .ndo_stop = temac_stop,
 940        .ndo_start_xmit = temac_start_xmit,
 941        .ndo_set_mac_address = netdev_set_mac_address,
 942        .ndo_validate_addr = eth_validate_addr,
 943        .ndo_do_ioctl = temac_ioctl,
 944#ifdef CONFIG_NET_POLL_CONTROLLER
 945        .ndo_poll_controller = temac_poll_controller,
 946#endif
 947};
 948
 949/* ---------------------------------------------------------------------
 950 * SYSFS device attributes
 951 */
 952static ssize_t temac_show_llink_regs(struct device *dev,
 953                                     struct device_attribute *attr, char *buf)
 954{
 955        struct net_device *ndev = dev_get_drvdata(dev);
 956        struct temac_local *lp = netdev_priv(ndev);
 957        int i, len = 0;
 958
 959        for (i = 0; i < 0x11; i++)
 960                len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
 961                               (i % 8) == 7 ? "\n" : " ");
 962        len += sprintf(buf + len, "\n");
 963
 964        return len;
 965}
 966
 967static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
 968
 969static struct attribute *temac_device_attrs[] = {
 970        &dev_attr_llink_regs.attr,
 971        NULL,
 972};
 973
 974static const struct attribute_group temac_attr_group = {
 975        .attrs = temac_device_attrs,
 976};
 977
 978/* ethtool support */
 979static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 980{
 981        struct temac_local *lp = netdev_priv(ndev);
 982        return phy_ethtool_gset(lp->phy_dev, cmd);
 983}
 984
 985static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
 986{
 987        struct temac_local *lp = netdev_priv(ndev);
 988        return phy_ethtool_sset(lp->phy_dev, cmd);
 989}
 990
 991static int temac_nway_reset(struct net_device *ndev)
 992{
 993        struct temac_local *lp = netdev_priv(ndev);
 994        return phy_start_aneg(lp->phy_dev);
 995}
 996
 997static const struct ethtool_ops temac_ethtool_ops = {
 998        .get_settings = temac_get_settings,
 999        .set_settings = temac_set_settings,
1000        .nway_reset = temac_nway_reset,
1001        .get_link = ethtool_op_get_link,
1002        .get_ts_info = ethtool_op_get_ts_info,
1003};
1004
1005static int __devinit temac_of_probe(struct platform_device *op)
1006{
1007        struct device_node *np;
1008        struct temac_local *lp;
1009        struct net_device *ndev;
1010        const void *addr;
1011        __be32 *p;
1012        int size, rc = 0;
1013
1014        /* Init network device structure */
1015        ndev = alloc_etherdev(sizeof(*lp));
1016        if (!ndev)
1017                return -ENOMEM;
1018
1019        ether_setup(ndev);
1020        dev_set_drvdata(&op->dev, ndev);
1021        SET_NETDEV_DEV(ndev, &op->dev);
1022        ndev->flags &= ~IFF_MULTICAST;  /* clear multicast */
1023        ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
1024        ndev->netdev_ops = &temac_netdev_ops;
1025        ndev->ethtool_ops = &temac_ethtool_ops;
1026#if 0
1027        ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1028        ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1029        ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1030        ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1031        ndev->features |= NETIF_F_HW_VLAN_TX; /* Transmit VLAN hw accel */
1032        ndev->features |= NETIF_F_HW_VLAN_RX; /* Receive VLAN hw acceleration */
1033        ndev->features |= NETIF_F_HW_VLAN_FILTER; /* Receive VLAN filtering */
1034        ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1035        ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1036        ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1037        ndev->features |= NETIF_F_LRO; /* large receive offload */
1038#endif
1039
1040        /* setup temac private info structure */
1041        lp = netdev_priv(ndev);
1042        lp->ndev = ndev;
1043        lp->dev = &op->dev;
1044        lp->options = XTE_OPTION_DEFAULTS;
1045        spin_lock_init(&lp->rx_lock);
1046        mutex_init(&lp->indirect_mutex);
1047
1048        /* map device registers */
1049        lp->regs = of_iomap(op->dev.of_node, 0);
1050        if (!lp->regs) {
1051                dev_err(&op->dev, "could not map temac regs.\n");
1052                goto nodev;
1053        }
1054
1055        /* Setup checksum offload, but default to off if not specified */
1056        lp->temac_features = 0;
1057        p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
1058        if (p && be32_to_cpu(*p)) {
1059                lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1060                /* Can checksum TCP/UDP over IPv4. */
1061                ndev->features |= NETIF_F_IP_CSUM;
1062        }
1063        p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
1064        if (p && be32_to_cpu(*p))
1065                lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1066
1067        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
1068        np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
1069        if (!np) {
1070                dev_err(&op->dev, "could not find DMA node\n");
1071                goto err_iounmap;
1072        }
1073
1074        /* Setup the DMA register accesses, could be DCR or memory mapped */
1075        if (temac_dcr_setup(lp, op, np)) {
1076
1077                /* no DCR in the device tree, try non-DCR */
1078                lp->sdma_regs = of_iomap(np, 0);
1079                if (lp->sdma_regs) {
1080                        lp->dma_in = temac_dma_in32;
1081                        lp->dma_out = temac_dma_out32;
1082                        dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
1083                } else {
1084                        dev_err(&op->dev, "unable to map DMA registers\n");
1085                        of_node_put(np);
1086                        goto err_iounmap;
1087                }
1088        }
1089
1090        lp->rx_irq = irq_of_parse_and_map(np, 0);
1091        lp->tx_irq = irq_of_parse_and_map(np, 1);
1092
1093        of_node_put(np); /* Finished with the DMA node; drop the reference */
1094
1095        if (!lp->rx_irq || !lp->tx_irq) {
1096                dev_err(&op->dev, "could not determine irqs\n");
1097                rc = -ENOMEM;
1098                goto err_iounmap_2;
1099        }
1100
1101
1102        /* Retrieve the MAC address */
1103        addr = of_get_property(op->dev.of_node, "local-mac-address", &size);
1104        if ((!addr) || (size != 6)) {
1105                dev_err(&op->dev, "could not find MAC address\n");
1106                rc = -ENODEV;
1107                goto err_iounmap_2;
1108        }
1109        temac_set_mac_address(ndev, (void *)addr);
1110
1111        rc = temac_mdio_setup(lp, op->dev.of_node);
1112        if (rc)
1113                dev_warn(&op->dev, "error registering MDIO bus\n");
1114
1115        lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
1116        if (lp->phy_node)
1117                dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np);
1118
1119        /* Add the device attributes */
1120        rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1121        if (rc) {
1122                dev_err(lp->dev, "Error creating sysfs files\n");
1123                goto err_iounmap_2;
1124        }
1125
1126        rc = register_netdev(lp->ndev);
1127        if (rc) {
1128                dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1129                goto err_register_ndev;
1130        }
1131
1132        return 0;
1133
1134 err_register_ndev:
1135        sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1136 err_iounmap_2:
1137        if (lp->sdma_regs)
1138                iounmap(lp->sdma_regs);
1139 err_iounmap:
1140        iounmap(lp->regs);
1141 nodev:
1142        free_netdev(ndev);
1143        ndev = NULL;
1144        return rc;
1145}
1146
1147static int __devexit temac_of_remove(struct platform_device *op)
1148{
1149        struct net_device *ndev = dev_get_drvdata(&op->dev);
1150        struct temac_local *lp = netdev_priv(ndev);
1151
1152        temac_mdio_teardown(lp);
1153        unregister_netdev(ndev);
1154        sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1155        if (lp->phy_node)
1156                of_node_put(lp->phy_node);
1157        lp->phy_node = NULL;
1158        dev_set_drvdata(&op->dev, NULL);
1159        iounmap(lp->regs);
1160        if (lp->sdma_regs)
1161                iounmap(lp->sdma_regs);
1162        free_netdev(ndev);
1163        return 0;
1164}
1165
1166static struct of_device_id temac_of_match[] __devinitdata = {
1167        { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1168        { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1169        { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1170        { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1171        {},
1172};
1173MODULE_DEVICE_TABLE(of, temac_of_match);
1174
1175static struct platform_driver temac_of_driver = {
1176        .probe = temac_of_probe,
1177        .remove = __devexit_p(temac_of_remove),
1178        .driver = {
1179                .owner = THIS_MODULE,
1180                .name = "xilinx_temac",
1181                .of_match_table = temac_of_match,
1182        },
1183};
1184
1185module_platform_driver(temac_of_driver);
1186
1187MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1188MODULE_AUTHOR("Yoshio Kashiwagi");
1189MODULE_LICENSE("GPL");
1190