linux/drivers/net/ethernet/xilinx/ll_temac_main.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Driver for Xilinx TEMAC Ethernet device
   4 *
   5 * Copyright (c) 2008 Nissin Systems Co., Ltd.,  Yoshio Kashiwagi
   6 * Copyright (c) 2005-2008 DLA Systems,  David H. Lynch Jr. <dhlii@dlasys.net>
   7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
   8 *
   9 * This is a driver for the Xilinx ll_temac ipcore which is often used
  10 * in the Virtex and Spartan series of chips.
  11 *
  12 * Notes:
  13 * - The ll_temac hardware uses indirect access for many of the TEMAC
  14 *   registers, include the MDIO bus.  However, indirect access to MDIO
  15 *   registers take considerably more clock cycles than to TEMAC registers.
  16 *   MDIO accesses are long, so threads doing them should probably sleep
  17 *   rather than busywait.  However, since only one indirect access can be
  18 *   in progress at any given time, that means that *all* indirect accesses
  19 *   could end up sleeping (to wait for an MDIO access to complete).
  20 *   Fortunately none of the indirect accesses are on the 'hot' path for tx
  21 *   or rx, so this should be okay.
  22 *
  23 * TODO:
  24 * - Factor out locallink DMA code into separate driver
  25 * - Fix support for hardware checksumming.
  26 * - Testing.  Lots and lots of testing.
  27 *
  28 */
  29
  30#include <linux/delay.h>
  31#include <linux/etherdevice.h>
  32#include <linux/mii.h>
  33#include <linux/module.h>
  34#include <linux/mutex.h>
  35#include <linux/netdevice.h>
  36#include <linux/if_ether.h>
  37#include <linux/of.h>
  38#include <linux/of_device.h>
  39#include <linux/of_irq.h>
  40#include <linux/of_mdio.h>
  41#include <linux/of_net.h>
  42#include <linux/of_platform.h>
  43#include <linux/of_address.h>
  44#include <linux/skbuff.h>
  45#include <linux/spinlock.h>
  46#include <linux/tcp.h>      /* needed for sizeof(tcphdr) */
  47#include <linux/udp.h>      /* needed for sizeof(udphdr) */
  48#include <linux/phy.h>
  49#include <linux/in.h>
  50#include <linux/io.h>
  51#include <linux/ip.h>
  52#include <linux/slab.h>
  53#include <linux/interrupt.h>
  54#include <linux/workqueue.h>
  55#include <linux/dma-mapping.h>
  56#include <linux/processor.h>
  57#include <linux/platform_data/xilinx-ll-temac.h>
  58
  59#include "ll_temac.h"
  60
  61/* Descriptors defines for Tx and Rx DMA */
  62#define TX_BD_NUM_DEFAULT               64
  63#define RX_BD_NUM_DEFAULT               1024
  64#define TX_BD_NUM_MAX                   4096
  65#define RX_BD_NUM_MAX                   4096
  66
  67/* ---------------------------------------------------------------------
  68 * Low level register access functions
  69 */
  70
  71static u32 _temac_ior_be(struct temac_local *lp, int offset)
  72{
  73        return ioread32be(lp->regs + offset);
  74}
  75
  76static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
  77{
  78        return iowrite32be(value, lp->regs + offset);
  79}
  80
  81static u32 _temac_ior_le(struct temac_local *lp, int offset)
  82{
  83        return ioread32(lp->regs + offset);
  84}
  85
  86static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
  87{
  88        return iowrite32(value, lp->regs + offset);
  89}
  90
  91static bool hard_acs_rdy(struct temac_local *lp)
  92{
  93        return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
  94}
  95
  96static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
  97{
  98        ktime_t cur = ktime_get();
  99
 100        return hard_acs_rdy(lp) || ktime_after(cur, timeout);
 101}
 102
 103/* Poll for maximum 20 ms.  This is similar to the 2 jiffies @ 100 Hz
 104 * that was used before, and should cover MDIO bus speed down to 3200
 105 * Hz.
 106 */
 107#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
 108
 109/**
 110 * temac_indirect_busywait - Wait for current indirect register access
 111 * to complete.
 112 */
 113int temac_indirect_busywait(struct temac_local *lp)
 114{
 115        ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
 116
 117        spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
 118        if (WARN_ON(!hard_acs_rdy(lp)))
 119                return -ETIMEDOUT;
 120        else
 121                return 0;
 122}
 123
 124/**
 125 * temac_indirect_in32 - Indirect register read access.  This function
 126 * must be called without lp->indirect_lock being held.
 127 */
 128u32 temac_indirect_in32(struct temac_local *lp, int reg)
 129{
 130        unsigned long flags;
 131        int val;
 132
 133        spin_lock_irqsave(lp->indirect_lock, flags);
 134        val = temac_indirect_in32_locked(lp, reg);
 135        spin_unlock_irqrestore(lp->indirect_lock, flags);
 136        return val;
 137}
 138
 139/**
 140 * temac_indirect_in32_locked - Indirect register read access.  This
 141 * function must be called with lp->indirect_lock being held.  Use
 142 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
 143 * repeated lock/unlock and to ensure uninterrupted access to indirect
 144 * registers.
 145 */
 146u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
 147{
 148        /* This initial wait should normally not spin, as we always
 149         * try to wait for indirect access to complete before
 150         * releasing the indirect_lock.
 151         */
 152        if (WARN_ON(temac_indirect_busywait(lp)))
 153                return -ETIMEDOUT;
 154        /* Initiate read from indirect register */
 155        temac_iow(lp, XTE_CTL0_OFFSET, reg);
 156        /* Wait for indirect register access to complete.  We really
 157         * should not see timeouts, and could even end up causing
 158         * problem for following indirect access, so let's make a bit
 159         * of WARN noise.
 160         */
 161        if (WARN_ON(temac_indirect_busywait(lp)))
 162                return -ETIMEDOUT;
 163        /* Value is ready now */
 164        return temac_ior(lp, XTE_LSW0_OFFSET);
 165}
 166
 167/**
 168 * temac_indirect_out32 - Indirect register write access.  This function
 169 * must be called without lp->indirect_lock being held.
 170 */
 171void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
 172{
 173        unsigned long flags;
 174
 175        spin_lock_irqsave(lp->indirect_lock, flags);
 176        temac_indirect_out32_locked(lp, reg, value);
 177        spin_unlock_irqrestore(lp->indirect_lock, flags);
 178}
 179
 180/**
 181 * temac_indirect_out32_locked - Indirect register write access.  This
 182 * function must be called with lp->indirect_lock being held.  Use
 183 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
 184 * repeated lock/unlock and to ensure uninterrupted access to indirect
 185 * registers.
 186 */
 187void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
 188{
 189        /* As in temac_indirect_in32_locked(), we should normally not
 190         * spin here.  And if it happens, we actually end up silently
 191         * ignoring the write request.  Ouch.
 192         */
 193        if (WARN_ON(temac_indirect_busywait(lp)))
 194                return;
 195        /* Initiate write to indirect register */
 196        temac_iow(lp, XTE_LSW0_OFFSET, value);
 197        temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
 198        /* As in temac_indirect_in32_locked(), we should not see timeouts
 199         * here.  And if it happens, we continue before the write has
 200         * completed.  Not good.
 201         */
 202        WARN_ON(temac_indirect_busywait(lp));
 203}
 204
 205/**
 206 * temac_dma_in32_* - Memory mapped DMA read, these function expects a
 207 * register input that is based on DCR word addresses which are then
 208 * converted to memory mapped byte addresses.  To be assigned to
 209 * lp->dma_in32.
 210 */
 211static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
 212{
 213        return ioread32be(lp->sdma_regs + (reg << 2));
 214}
 215
 216static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
 217{
 218        return ioread32(lp->sdma_regs + (reg << 2));
 219}
 220
 221/**
 222 * temac_dma_out32_* - Memory mapped DMA read, these function expects
 223 * a register input that is based on DCR word addresses which are then
 224 * converted to memory mapped byte addresses.  To be assigned to
 225 * lp->dma_out32.
 226 */
 227static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
 228{
 229        iowrite32be(value, lp->sdma_regs + (reg << 2));
 230}
 231
 232static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
 233{
 234        iowrite32(value, lp->sdma_regs + (reg << 2));
 235}
 236
 237/* DMA register access functions can be DCR based or memory mapped.
 238 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
 239 * memory mapped.
 240 */
 241#ifdef CONFIG_PPC_DCR
 242
 243/**
 244 * temac_dma_dcr_in32 - DCR based DMA read
 245 */
 246static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
 247{
 248        return dcr_read(lp->sdma_dcrs, reg);
 249}
 250
 251/**
 252 * temac_dma_dcr_out32 - DCR based DMA write
 253 */
 254static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
 255{
 256        dcr_write(lp->sdma_dcrs, reg, value);
 257}
 258
 259/**
 260 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
 261 * I/O  functions
 262 */
 263static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
 264                                struct device_node *np)
 265{
 266        unsigned int dcrs;
 267
 268        /* setup the dcr address mapping if it's in the device tree */
 269
 270        dcrs = dcr_resource_start(np, 0);
 271        if (dcrs != 0) {
 272                lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
 273                lp->dma_in = temac_dma_dcr_in;
 274                lp->dma_out = temac_dma_dcr_out;
 275                dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
 276                return 0;
 277        }
 278        /* no DCR in the device tree, indicate a failure */
 279        return -1;
 280}
 281
 282#else
 283
 284/*
 285 * temac_dcr_setup - This is a stub for when DCR is not supported,
 286 * such as with MicroBlaze and x86
 287 */
 288static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
 289                                struct device_node *np)
 290{
 291        return -1;
 292}
 293
 294#endif
 295
 296/**
 297 * temac_dma_bd_release - Release buffer descriptor rings
 298 */
 299static void temac_dma_bd_release(struct net_device *ndev)
 300{
 301        struct temac_local *lp = netdev_priv(ndev);
 302        int i;
 303
 304        /* Reset Local Link (DMA) */
 305        lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
 306
 307        for (i = 0; i < lp->rx_bd_num; i++) {
 308                if (!lp->rx_skb[i])
 309                        break;
 310                else {
 311                        dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
 312                                        XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
 313                        dev_kfree_skb(lp->rx_skb[i]);
 314                }
 315        }
 316        if (lp->rx_bd_v)
 317                dma_free_coherent(ndev->dev.parent,
 318                                  sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 319                                  lp->rx_bd_v, lp->rx_bd_p);
 320        if (lp->tx_bd_v)
 321                dma_free_coherent(ndev->dev.parent,
 322                                  sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 323                                  lp->tx_bd_v, lp->tx_bd_p);
 324}
 325
 326/**
 327 * temac_dma_bd_init - Setup buffer descriptor rings
 328 */
 329static int temac_dma_bd_init(struct net_device *ndev)
 330{
 331        struct temac_local *lp = netdev_priv(ndev);
 332        struct sk_buff *skb;
 333        dma_addr_t skb_dma_addr;
 334        int i;
 335
 336        lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
 337                                  sizeof(*lp->rx_skb), GFP_KERNEL);
 338        if (!lp->rx_skb)
 339                goto out;
 340
 341        /* allocate the tx and rx ring buffer descriptors. */
 342        /* returns a virtual address and a physical address. */
 343        lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 344                                         sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
 345                                         &lp->tx_bd_p, GFP_KERNEL);
 346        if (!lp->tx_bd_v)
 347                goto out;
 348
 349        lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 350                                         sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
 351                                         &lp->rx_bd_p, GFP_KERNEL);
 352        if (!lp->rx_bd_v)
 353                goto out;
 354
 355        for (i = 0; i < lp->tx_bd_num; i++) {
 356                lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
 357                        + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
 358        }
 359
 360        for (i = 0; i < lp->rx_bd_num; i++) {
 361                lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
 362                        + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
 363
 364                skb = netdev_alloc_skb_ip_align(ndev,
 365                                                XTE_MAX_JUMBO_FRAME_SIZE);
 366                if (!skb)
 367                        goto out;
 368
 369                lp->rx_skb[i] = skb;
 370                /* returns physical address of skb->data */
 371                skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 372                                              XTE_MAX_JUMBO_FRAME_SIZE,
 373                                              DMA_FROM_DEVICE);
 374                if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
 375                        goto out;
 376                lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
 377                lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
 378                lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
 379        }
 380
 381        /* Configure DMA channel (irq setup) */
 382        lp->dma_out(lp, TX_CHNL_CTRL,
 383                    lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
 384                    0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
 385                    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
 386                    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
 387        lp->dma_out(lp, RX_CHNL_CTRL,
 388                    lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
 389                    CHNL_CTRL_IRQ_IOE |
 390                    CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
 391                    CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
 392
 393        /* Init descriptor indexes */
 394        lp->tx_bd_ci = 0;
 395        lp->tx_bd_tail = 0;
 396        lp->rx_bd_ci = 0;
 397        lp->rx_bd_tail = lp->rx_bd_num - 1;
 398
 399        /* Enable RX DMA transfers */
 400        wmb();
 401        lp->dma_out(lp, RX_CURDESC_PTR,  lp->rx_bd_p);
 402        lp->dma_out(lp, RX_TAILDESC_PTR,
 403                       lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
 404
 405        /* Prepare for TX DMA transfer */
 406        lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
 407
 408        return 0;
 409
 410out:
 411        temac_dma_bd_release(ndev);
 412        return -ENOMEM;
 413}
 414
 415/* ---------------------------------------------------------------------
 416 * net_device_ops
 417 */
 418
 419static void temac_do_set_mac_address(struct net_device *ndev)
 420{
 421        struct temac_local *lp = netdev_priv(ndev);
 422        unsigned long flags;
 423
 424        /* set up unicast MAC address filter set its mac address */
 425        spin_lock_irqsave(lp->indirect_lock, flags);
 426        temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
 427                                    (ndev->dev_addr[0]) |
 428                                    (ndev->dev_addr[1] << 8) |
 429                                    (ndev->dev_addr[2] << 16) |
 430                                    (ndev->dev_addr[3] << 24));
 431        /* There are reserved bits in EUAW1
 432         * so don't affect them Set MAC bits [47:32] in EUAW1 */
 433        temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
 434                                    (ndev->dev_addr[4] & 0x000000ff) |
 435                                    (ndev->dev_addr[5] << 8));
 436        spin_unlock_irqrestore(lp->indirect_lock, flags);
 437}
 438
 439static int temac_init_mac_address(struct net_device *ndev, const void *address)
 440{
 441        ether_addr_copy(ndev->dev_addr, address);
 442        if (!is_valid_ether_addr(ndev->dev_addr))
 443                eth_hw_addr_random(ndev);
 444        temac_do_set_mac_address(ndev);
 445        return 0;
 446}
 447
 448static int temac_set_mac_address(struct net_device *ndev, void *p)
 449{
 450        struct sockaddr *addr = p;
 451
 452        if (!is_valid_ether_addr(addr->sa_data))
 453                return -EADDRNOTAVAIL;
 454        memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
 455        temac_do_set_mac_address(ndev);
 456        return 0;
 457}
 458
 459static void temac_set_multicast_list(struct net_device *ndev)
 460{
 461        struct temac_local *lp = netdev_priv(ndev);
 462        u32 multi_addr_msw, multi_addr_lsw;
 463        int i = 0;
 464        unsigned long flags;
 465        bool promisc_mode_disabled = false;
 466
 467        if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
 468            (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
 469                temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
 470                dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
 471                return;
 472        }
 473
 474        spin_lock_irqsave(lp->indirect_lock, flags);
 475
 476        if (!netdev_mc_empty(ndev)) {
 477                struct netdev_hw_addr *ha;
 478
 479                netdev_for_each_mc_addr(ha, ndev) {
 480                        if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
 481                                break;
 482                        multi_addr_msw = ((ha->addr[3] << 24) |
 483                                          (ha->addr[2] << 16) |
 484                                          (ha->addr[1] << 8) |
 485                                          (ha->addr[0]));
 486                        temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
 487                                                    multi_addr_msw);
 488                        multi_addr_lsw = ((ha->addr[5] << 8) |
 489                                          (ha->addr[4]) | (i << 16));
 490                        temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
 491                                                    multi_addr_lsw);
 492                        i++;
 493                }
 494        }
 495
 496        /* Clear all or remaining/unused address table entries */
 497        while (i < MULTICAST_CAM_TABLE_NUM) {
 498                temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
 499                temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
 500                i++;
 501        }
 502
 503        /* Enable address filter block if currently disabled */
 504        if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
 505            & XTE_AFM_EPPRM_MASK) {
 506                temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
 507                promisc_mode_disabled = true;
 508        }
 509
 510        spin_unlock_irqrestore(lp->indirect_lock, flags);
 511
 512        if (promisc_mode_disabled)
 513                dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
 514}
 515
 516static struct temac_option {
 517        int flg;
 518        u32 opt;
 519        u32 reg;
 520        u32 m_or;
 521        u32 m_and;
 522} temac_options[] = {
 523        /* Turn on jumbo packet support for both Rx and Tx */
 524        {
 525                .opt = XTE_OPTION_JUMBO,
 526                .reg = XTE_TXC_OFFSET,
 527                .m_or = XTE_TXC_TXJMBO_MASK,
 528        },
 529        {
 530                .opt = XTE_OPTION_JUMBO,
 531                .reg = XTE_RXC1_OFFSET,
 532                .m_or =XTE_RXC1_RXJMBO_MASK,
 533        },
 534        /* Turn on VLAN packet support for both Rx and Tx */
 535        {
 536                .opt = XTE_OPTION_VLAN,
 537                .reg = XTE_TXC_OFFSET,
 538                .m_or =XTE_TXC_TXVLAN_MASK,
 539        },
 540        {
 541                .opt = XTE_OPTION_VLAN,
 542                .reg = XTE_RXC1_OFFSET,
 543                .m_or =XTE_RXC1_RXVLAN_MASK,
 544        },
 545        /* Turn on FCS stripping on receive packets */
 546        {
 547                .opt = XTE_OPTION_FCS_STRIP,
 548                .reg = XTE_RXC1_OFFSET,
 549                .m_or =XTE_RXC1_RXFCS_MASK,
 550        },
 551        /* Turn on FCS insertion on transmit packets */
 552        {
 553                .opt = XTE_OPTION_FCS_INSERT,
 554                .reg = XTE_TXC_OFFSET,
 555                .m_or =XTE_TXC_TXFCS_MASK,
 556        },
 557        /* Turn on length/type field checking on receive packets */
 558        {
 559                .opt = XTE_OPTION_LENTYPE_ERR,
 560                .reg = XTE_RXC1_OFFSET,
 561                .m_or =XTE_RXC1_RXLT_MASK,
 562        },
 563        /* Turn on flow control */
 564        {
 565                .opt = XTE_OPTION_FLOW_CONTROL,
 566                .reg = XTE_FCC_OFFSET,
 567                .m_or =XTE_FCC_RXFLO_MASK,
 568        },
 569        /* Turn on flow control */
 570        {
 571                .opt = XTE_OPTION_FLOW_CONTROL,
 572                .reg = XTE_FCC_OFFSET,
 573                .m_or =XTE_FCC_TXFLO_MASK,
 574        },
 575        /* Turn on promiscuous frame filtering (all frames are received ) */
 576        {
 577                .opt = XTE_OPTION_PROMISC,
 578                .reg = XTE_AFM_OFFSET,
 579                .m_or =XTE_AFM_EPPRM_MASK,
 580        },
 581        /* Enable transmitter if not already enabled */
 582        {
 583                .opt = XTE_OPTION_TXEN,
 584                .reg = XTE_TXC_OFFSET,
 585                .m_or =XTE_TXC_TXEN_MASK,
 586        },
 587        /* Enable receiver? */
 588        {
 589                .opt = XTE_OPTION_RXEN,
 590                .reg = XTE_RXC1_OFFSET,
 591                .m_or =XTE_RXC1_RXEN_MASK,
 592        },
 593        {}
 594};
 595
 596/**
 597 * temac_setoptions
 598 */
 599static u32 temac_setoptions(struct net_device *ndev, u32 options)
 600{
 601        struct temac_local *lp = netdev_priv(ndev);
 602        struct temac_option *tp = &temac_options[0];
 603        int reg;
 604        unsigned long flags;
 605
 606        spin_lock_irqsave(lp->indirect_lock, flags);
 607        while (tp->opt) {
 608                reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
 609                if (options & tp->opt) {
 610                        reg |= tp->m_or;
 611                        temac_indirect_out32_locked(lp, tp->reg, reg);
 612                }
 613                tp++;
 614        }
 615        spin_unlock_irqrestore(lp->indirect_lock, flags);
 616        lp->options |= options;
 617
 618        return 0;
 619}
 620
 621/* Initialize temac */
 622static void temac_device_reset(struct net_device *ndev)
 623{
 624        struct temac_local *lp = netdev_priv(ndev);
 625        u32 timeout;
 626        u32 val;
 627        unsigned long flags;
 628
 629        /* Perform a software reset */
 630
 631        /* 0x300 host enable bit ? */
 632        /* reset PHY through control register ?:1 */
 633
 634        dev_dbg(&ndev->dev, "%s()\n", __func__);
 635
 636        /* Reset the receiver and wait for it to finish reset */
 637        temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
 638        timeout = 1000;
 639        while (temac_indirect_in32(lp, XTE_RXC1_OFFSET) & XTE_RXC1_RXRST_MASK) {
 640                udelay(1);
 641                if (--timeout == 0) {
 642                        dev_err(&ndev->dev,
 643                                "temac_device_reset RX reset timeout!!\n");
 644                        break;
 645                }
 646        }
 647
 648        /* Reset the transmitter and wait for it to finish reset */
 649        temac_indirect_out32(lp, XTE_TXC_OFFSET, XTE_TXC_TXRST_MASK);
 650        timeout = 1000;
 651        while (temac_indirect_in32(lp, XTE_TXC_OFFSET) & XTE_TXC_TXRST_MASK) {
 652                udelay(1);
 653                if (--timeout == 0) {
 654                        dev_err(&ndev->dev,
 655                                "temac_device_reset TX reset timeout!!\n");
 656                        break;
 657                }
 658        }
 659
 660        /* Disable the receiver */
 661        spin_lock_irqsave(lp->indirect_lock, flags);
 662        val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
 663        temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
 664                                    val & ~XTE_RXC1_RXEN_MASK);
 665        spin_unlock_irqrestore(lp->indirect_lock, flags);
 666
 667        /* Reset Local Link (DMA) */
 668        lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
 669        timeout = 1000;
 670        while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
 671                udelay(1);
 672                if (--timeout == 0) {
 673                        dev_err(&ndev->dev,
 674                                "temac_device_reset DMA reset timeout!!\n");
 675                        break;
 676                }
 677        }
 678        lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
 679
 680        if (temac_dma_bd_init(ndev)) {
 681                dev_err(&ndev->dev,
 682                                "temac_device_reset descriptor allocation failed\n");
 683        }
 684
 685        spin_lock_irqsave(lp->indirect_lock, flags);
 686        temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
 687        temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
 688        temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
 689        temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
 690        spin_unlock_irqrestore(lp->indirect_lock, flags);
 691
 692        /* Sync default options with HW
 693         * but leave receiver and transmitter disabled.  */
 694        temac_setoptions(ndev,
 695                         lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
 696
 697        temac_do_set_mac_address(ndev);
 698
 699        /* Set address filter table */
 700        temac_set_multicast_list(ndev);
 701        if (temac_setoptions(ndev, lp->options))
 702                dev_err(&ndev->dev, "Error setting TEMAC options\n");
 703
 704        /* Init Driver variable */
 705        netif_trans_update(ndev); /* prevent tx timeout */
 706}
 707
 708static void temac_adjust_link(struct net_device *ndev)
 709{
 710        struct temac_local *lp = netdev_priv(ndev);
 711        struct phy_device *phy = ndev->phydev;
 712        u32 mii_speed;
 713        int link_state;
 714        unsigned long flags;
 715
 716        /* hash together the state values to decide if something has changed */
 717        link_state = phy->speed | (phy->duplex << 1) | phy->link;
 718
 719        if (lp->last_link != link_state) {
 720                spin_lock_irqsave(lp->indirect_lock, flags);
 721                mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
 722                mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
 723
 724                switch (phy->speed) {
 725                case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
 726                case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
 727                case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
 728                }
 729
 730                /* Write new speed setting out to TEMAC */
 731                temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
 732                spin_unlock_irqrestore(lp->indirect_lock, flags);
 733
 734                lp->last_link = link_state;
 735                phy_print_status(phy);
 736        }
 737}
 738
 739#ifdef CONFIG_64BIT
 740
 741static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
 742{
 743        bd->app3 = (u32)(((u64)p) >> 32);
 744        bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
 745}
 746
 747static void *ptr_from_txbd(struct cdmac_bd *bd)
 748{
 749        return (void *)(((u64)(bd->app3) << 32) | bd->app4);
 750}
 751
 752#else
 753
 754static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
 755{
 756        bd->app4 = (u32)p;
 757}
 758
 759static void *ptr_from_txbd(struct cdmac_bd *bd)
 760{
 761        return (void *)(bd->app4);
 762}
 763
 764#endif
 765
 766static void temac_start_xmit_done(struct net_device *ndev)
 767{
 768        struct temac_local *lp = netdev_priv(ndev);
 769        struct cdmac_bd *cur_p;
 770        unsigned int stat = 0;
 771        struct sk_buff *skb;
 772
 773        cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 774        stat = be32_to_cpu(cur_p->app0);
 775
 776        while (stat & STS_CTRL_APP0_CMPLT) {
 777                dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
 778                                 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
 779                skb = (struct sk_buff *)ptr_from_txbd(cur_p);
 780                if (skb)
 781                        dev_consume_skb_irq(skb);
 782                cur_p->app0 = 0;
 783                cur_p->app1 = 0;
 784                cur_p->app2 = 0;
 785                cur_p->app3 = 0;
 786                cur_p->app4 = 0;
 787
 788                ndev->stats.tx_packets++;
 789                ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
 790
 791                lp->tx_bd_ci++;
 792                if (lp->tx_bd_ci >= lp->tx_bd_num)
 793                        lp->tx_bd_ci = 0;
 794
 795                cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
 796                stat = be32_to_cpu(cur_p->app0);
 797        }
 798
 799        /* Matches barrier in temac_start_xmit */
 800        smp_mb();
 801
 802        netif_wake_queue(ndev);
 803}
 804
 805static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
 806{
 807        struct cdmac_bd *cur_p;
 808        int tail;
 809
 810        tail = lp->tx_bd_tail;
 811        cur_p = &lp->tx_bd_v[tail];
 812
 813        do {
 814                if (cur_p->app0)
 815                        return NETDEV_TX_BUSY;
 816
 817                tail++;
 818                if (tail >= lp->tx_bd_num)
 819                        tail = 0;
 820
 821                cur_p = &lp->tx_bd_v[tail];
 822                num_frag--;
 823        } while (num_frag >= 0);
 824
 825        return 0;
 826}
 827
 828static netdev_tx_t
 829temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 830{
 831        struct temac_local *lp = netdev_priv(ndev);
 832        struct cdmac_bd *cur_p;
 833        dma_addr_t tail_p, skb_dma_addr;
 834        int ii;
 835        unsigned long num_frag;
 836        skb_frag_t *frag;
 837
 838        num_frag = skb_shinfo(skb)->nr_frags;
 839        frag = &skb_shinfo(skb)->frags[0];
 840        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 841
 842        if (temac_check_tx_bd_space(lp, num_frag + 1)) {
 843                if (netif_queue_stopped(ndev))
 844                        return NETDEV_TX_BUSY;
 845
 846                netif_stop_queue(ndev);
 847
 848                /* Matches barrier in temac_start_xmit_done */
 849                smp_mb();
 850
 851                /* Space might have just been freed - check again */
 852                if (temac_check_tx_bd_space(lp, num_frag))
 853                        return NETDEV_TX_BUSY;
 854
 855                netif_wake_queue(ndev);
 856        }
 857
 858        cur_p->app0 = 0;
 859        if (skb->ip_summed == CHECKSUM_PARTIAL) {
 860                unsigned int csum_start_off = skb_checksum_start_offset(skb);
 861                unsigned int csum_index_off = csum_start_off + skb->csum_offset;
 862
 863                cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
 864                cur_p->app1 = cpu_to_be32((csum_start_off << 16)
 865                                          | csum_index_off);
 866                cur_p->app2 = 0;  /* initial checksum seed */
 867        }
 868
 869        cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
 870        skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
 871                                      skb_headlen(skb), DMA_TO_DEVICE);
 872        cur_p->len = cpu_to_be32(skb_headlen(skb));
 873        if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
 874                dev_kfree_skb_any(skb);
 875                ndev->stats.tx_dropped++;
 876                return NETDEV_TX_OK;
 877        }
 878        cur_p->phys = cpu_to_be32(skb_dma_addr);
 879        ptr_to_txbd((void *)skb, cur_p);
 880
 881        for (ii = 0; ii < num_frag; ii++) {
 882                if (++lp->tx_bd_tail >= lp->tx_bd_num)
 883                        lp->tx_bd_tail = 0;
 884
 885                cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 886                skb_dma_addr = dma_map_single(ndev->dev.parent,
 887                                              skb_frag_address(frag),
 888                                              skb_frag_size(frag),
 889                                              DMA_TO_DEVICE);
 890                if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
 891                        if (--lp->tx_bd_tail < 0)
 892                                lp->tx_bd_tail = lp->tx_bd_num - 1;
 893                        cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 894                        while (--ii >= 0) {
 895                                --frag;
 896                                dma_unmap_single(ndev->dev.parent,
 897                                                 be32_to_cpu(cur_p->phys),
 898                                                 skb_frag_size(frag),
 899                                                 DMA_TO_DEVICE);
 900                                if (--lp->tx_bd_tail < 0)
 901                                        lp->tx_bd_tail = lp->tx_bd_num - 1;
 902                                cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
 903                        }
 904                        dma_unmap_single(ndev->dev.parent,
 905                                         be32_to_cpu(cur_p->phys),
 906                                         skb_headlen(skb), DMA_TO_DEVICE);
 907                        dev_kfree_skb_any(skb);
 908                        ndev->stats.tx_dropped++;
 909                        return NETDEV_TX_OK;
 910                }
 911                cur_p->phys = cpu_to_be32(skb_dma_addr);
 912                cur_p->len = cpu_to_be32(skb_frag_size(frag));
 913                cur_p->app0 = 0;
 914                frag++;
 915        }
 916        cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
 917
 918        tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
 919        lp->tx_bd_tail++;
 920        if (lp->tx_bd_tail >= lp->tx_bd_num)
 921                lp->tx_bd_tail = 0;
 922
 923        skb_tx_timestamp(skb);
 924
 925        /* Kick off the transfer */
 926        wmb();
 927        lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
 928
 929        return NETDEV_TX_OK;
 930}
 931
 932static int ll_temac_recv_buffers_available(struct temac_local *lp)
 933{
 934        int available;
 935
 936        if (!lp->rx_skb[lp->rx_bd_ci])
 937                return 0;
 938        available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
 939        if (available <= 0)
 940                available += lp->rx_bd_num;
 941        return available;
 942}
 943
 944static void ll_temac_recv(struct net_device *ndev)
 945{
 946        struct temac_local *lp = netdev_priv(ndev);
 947        unsigned long flags;
 948        int rx_bd;
 949        bool update_tail = false;
 950
 951        spin_lock_irqsave(&lp->rx_lock, flags);
 952
 953        /* Process all received buffers, passing them on network
 954         * stack.  After this, the buffer descriptors will be in an
 955         * un-allocated stage, where no skb is allocated for it, and
 956         * they are therefore not available for TEMAC/DMA.
 957         */
 958        do {
 959                struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
 960                struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
 961                unsigned int bdstat = be32_to_cpu(bd->app0);
 962                int length;
 963
 964                /* While this should not normally happen, we can end
 965                 * here when GFP_ATOMIC allocations fail, and we
 966                 * therefore have un-allocated buffers.
 967                 */
 968                if (!skb)
 969                        break;
 970
 971                /* Loop over all completed buffer descriptors */
 972                if (!(bdstat & STS_CTRL_APP0_CMPLT))
 973                        break;
 974
 975                dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
 976                                 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
 977                /* The buffer is not valid for DMA anymore */
 978                bd->phys = 0;
 979                bd->len = 0;
 980
 981                length = be32_to_cpu(bd->app4) & 0x3FFF;
 982                skb_put(skb, length);
 983                skb->protocol = eth_type_trans(skb, ndev);
 984                skb_checksum_none_assert(skb);
 985
 986                /* if we're doing rx csum offload, set it up */
 987                if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
 988                    (skb->protocol == htons(ETH_P_IP)) &&
 989                    (skb->len > 64)) {
 990
 991                        /* Convert from device endianness (be32) to cpu
 992                         * endiannes, and if necessary swap the bytes
 993                         * (back) for proper IP checksum byte order
 994                         * (be16).
 995                         */
 996                        skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
 997                        skb->ip_summed = CHECKSUM_COMPLETE;
 998                }
 999
1000                if (!skb_defer_rx_timestamp(skb))
1001                        netif_rx(skb);
1002                /* The skb buffer is now owned by network stack above */
1003                lp->rx_skb[lp->rx_bd_ci] = NULL;
1004
1005                ndev->stats.rx_packets++;
1006                ndev->stats.rx_bytes += length;
1007
1008                rx_bd = lp->rx_bd_ci;
1009                if (++lp->rx_bd_ci >= lp->rx_bd_num)
1010                        lp->rx_bd_ci = 0;
1011        } while (rx_bd != lp->rx_bd_tail);
1012
1013        /* DMA operations will halt when the last buffer descriptor is
1014         * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1015         * When that happens, no more interrupt events will be
1016         * generated.  No IRQ_COAL or IRQ_DLY, and not even an
1017         * IRQ_ERR.  To avoid stalling, we schedule a delayed work
1018         * when there is a potential risk of that happening.  The work
1019         * will call this function, and thus re-schedule itself until
1020         * enough buffers are available again.
1021         */
1022        if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1023                schedule_delayed_work(&lp->restart_work, HZ / 1000);
1024
1025        /* Allocate new buffers for those buffer descriptors that were
1026         * passed to network stack.  Note that GFP_ATOMIC allocations
1027         * can fail (e.g. when a larger burst of GFP_ATOMIC
1028         * allocations occurs), so while we try to allocate all
1029         * buffers in the same interrupt where they were processed, we
1030         * continue with what we could get in case of allocation
1031         * failure.  Allocation of remaining buffers will be retried
1032         * in following calls.
1033         */
1034        while (1) {
1035                struct sk_buff *skb;
1036                struct cdmac_bd *bd;
1037                dma_addr_t skb_dma_addr;
1038
1039                rx_bd = lp->rx_bd_tail + 1;
1040                if (rx_bd >= lp->rx_bd_num)
1041                        rx_bd = 0;
1042                bd = &lp->rx_bd_v[rx_bd];
1043
1044                if (bd->phys)
1045                        break;  /* All skb's allocated */
1046
1047                skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1048                if (!skb) {
1049                        dev_warn(&ndev->dev, "skb alloc failed\n");
1050                        break;
1051                }
1052
1053                skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1054                                              XTE_MAX_JUMBO_FRAME_SIZE,
1055                                              DMA_FROM_DEVICE);
1056                if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1057                                                   skb_dma_addr))) {
1058                        dev_kfree_skb_any(skb);
1059                        break;
1060                }
1061
1062                bd->phys = cpu_to_be32(skb_dma_addr);
1063                bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1064                bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1065                lp->rx_skb[rx_bd] = skb;
1066
1067                lp->rx_bd_tail = rx_bd;
1068                update_tail = true;
1069        }
1070
1071        /* Move tail pointer when buffers have been allocated */
1072        if (update_tail) {
1073                lp->dma_out(lp, RX_TAILDESC_PTR,
1074                        lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1075        }
1076
1077        spin_unlock_irqrestore(&lp->rx_lock, flags);
1078}
1079
1080/* Function scheduled to ensure a restart in case of DMA halt
1081 * condition caused by running out of buffer descriptors.
1082 */
1083static void ll_temac_restart_work_func(struct work_struct *work)
1084{
1085        struct temac_local *lp = container_of(work, struct temac_local,
1086                                              restart_work.work);
1087        struct net_device *ndev = lp->ndev;
1088
1089        ll_temac_recv(ndev);
1090}
1091
1092static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1093{
1094        struct net_device *ndev = _ndev;
1095        struct temac_local *lp = netdev_priv(ndev);
1096        unsigned int status;
1097
1098        status = lp->dma_in(lp, TX_IRQ_REG);
1099        lp->dma_out(lp, TX_IRQ_REG, status);
1100
1101        if (status & (IRQ_COAL | IRQ_DLY))
1102                temac_start_xmit_done(lp->ndev);
1103        if (status & (IRQ_ERR | IRQ_DMAERR))
1104                dev_err_ratelimited(&ndev->dev,
1105                                    "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1106                                    status, lp->dma_in(lp, TX_CHNL_STS));
1107
1108        return IRQ_HANDLED;
1109}
1110
1111static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
1112{
1113        struct net_device *ndev = _ndev;
1114        struct temac_local *lp = netdev_priv(ndev);
1115        unsigned int status;
1116
1117        /* Read and clear the status registers */
1118        status = lp->dma_in(lp, RX_IRQ_REG);
1119        lp->dma_out(lp, RX_IRQ_REG, status);
1120
1121        if (status & (IRQ_COAL | IRQ_DLY))
1122                ll_temac_recv(lp->ndev);
1123        if (status & (IRQ_ERR | IRQ_DMAERR))
1124                dev_err_ratelimited(&ndev->dev,
1125                                    "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1126                                    status, lp->dma_in(lp, RX_CHNL_STS));
1127
1128        return IRQ_HANDLED;
1129}
1130
1131static int temac_open(struct net_device *ndev)
1132{
1133        struct temac_local *lp = netdev_priv(ndev);
1134        struct phy_device *phydev = NULL;
1135        int rc;
1136
1137        dev_dbg(&ndev->dev, "temac_open()\n");
1138
1139        if (lp->phy_node) {
1140                phydev = of_phy_connect(lp->ndev, lp->phy_node,
1141                                        temac_adjust_link, 0, 0);
1142                if (!phydev) {
1143                        dev_err(lp->dev, "of_phy_connect() failed\n");
1144                        return -ENODEV;
1145                }
1146                phy_start(phydev);
1147        } else if (strlen(lp->phy_name) > 0) {
1148                phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
1149                                     lp->phy_interface);
1150                if (IS_ERR(phydev)) {
1151                        dev_err(lp->dev, "phy_connect() failed\n");
1152                        return PTR_ERR(phydev);
1153                }
1154                phy_start(phydev);
1155        }
1156
1157        temac_device_reset(ndev);
1158
1159        rc = request_irq(lp->tx_irq, ll_temac_tx_irq, 0, ndev->name, ndev);
1160        if (rc)
1161                goto err_tx_irq;
1162        rc = request_irq(lp->rx_irq, ll_temac_rx_irq, 0, ndev->name, ndev);
1163        if (rc)
1164                goto err_rx_irq;
1165
1166        return 0;
1167
1168 err_rx_irq:
1169        free_irq(lp->tx_irq, ndev);
1170 err_tx_irq:
1171        if (phydev)
1172                phy_disconnect(phydev);
1173        dev_err(lp->dev, "request_irq() failed\n");
1174        return rc;
1175}
1176
1177static int temac_stop(struct net_device *ndev)
1178{
1179        struct temac_local *lp = netdev_priv(ndev);
1180        struct phy_device *phydev = ndev->phydev;
1181
1182        dev_dbg(&ndev->dev, "temac_close()\n");
1183
1184        cancel_delayed_work_sync(&lp->restart_work);
1185
1186        free_irq(lp->tx_irq, ndev);
1187        free_irq(lp->rx_irq, ndev);
1188
1189        if (phydev)
1190                phy_disconnect(phydev);
1191
1192        temac_dma_bd_release(ndev);
1193
1194        return 0;
1195}
1196
1197#ifdef CONFIG_NET_POLL_CONTROLLER
1198static void
1199temac_poll_controller(struct net_device *ndev)
1200{
1201        struct temac_local *lp = netdev_priv(ndev);
1202
1203        disable_irq(lp->tx_irq);
1204        disable_irq(lp->rx_irq);
1205
1206        ll_temac_rx_irq(lp->tx_irq, ndev);
1207        ll_temac_tx_irq(lp->rx_irq, ndev);
1208
1209        enable_irq(lp->tx_irq);
1210        enable_irq(lp->rx_irq);
1211}
1212#endif
1213
1214static const struct net_device_ops temac_netdev_ops = {
1215        .ndo_open = temac_open,
1216        .ndo_stop = temac_stop,
1217        .ndo_start_xmit = temac_start_xmit,
1218        .ndo_set_rx_mode = temac_set_multicast_list,
1219        .ndo_set_mac_address = temac_set_mac_address,
1220        .ndo_validate_addr = eth_validate_addr,
1221        .ndo_do_ioctl = phy_do_ioctl_running,
1222#ifdef CONFIG_NET_POLL_CONTROLLER
1223        .ndo_poll_controller = temac_poll_controller,
1224#endif
1225};
1226
1227/* ---------------------------------------------------------------------
1228 * SYSFS device attributes
1229 */
1230static ssize_t temac_show_llink_regs(struct device *dev,
1231                                     struct device_attribute *attr, char *buf)
1232{
1233        struct net_device *ndev = dev_get_drvdata(dev);
1234        struct temac_local *lp = netdev_priv(ndev);
1235        int i, len = 0;
1236
1237        for (i = 0; i < 0x11; i++)
1238                len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
1239                               (i % 8) == 7 ? "\n" : " ");
1240        len += sprintf(buf + len, "\n");
1241
1242        return len;
1243}
1244
1245static DEVICE_ATTR(llink_regs, 0440, temac_show_llink_regs, NULL);
1246
1247static struct attribute *temac_device_attrs[] = {
1248        &dev_attr_llink_regs.attr,
1249        NULL,
1250};
1251
1252static const struct attribute_group temac_attr_group = {
1253        .attrs = temac_device_attrs,
1254};
1255
1256/* ---------------------------------------------------------------------
1257 * ethtool support
1258 */
1259
1260static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
1261                                            struct ethtool_ringparam *ering)
1262{
1263        struct temac_local *lp = netdev_priv(ndev);
1264
1265        ering->rx_max_pending = RX_BD_NUM_MAX;
1266        ering->rx_mini_max_pending = 0;
1267        ering->rx_jumbo_max_pending = 0;
1268        ering->tx_max_pending = TX_BD_NUM_MAX;
1269        ering->rx_pending = lp->rx_bd_num;
1270        ering->rx_mini_pending = 0;
1271        ering->rx_jumbo_pending = 0;
1272        ering->tx_pending = lp->tx_bd_num;
1273}
1274
1275static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
1276                                           struct ethtool_ringparam *ering)
1277{
1278        struct temac_local *lp = netdev_priv(ndev);
1279
1280        if (ering->rx_pending > RX_BD_NUM_MAX ||
1281            ering->rx_mini_pending ||
1282            ering->rx_jumbo_pending ||
1283            ering->rx_pending > TX_BD_NUM_MAX)
1284                return -EINVAL;
1285
1286        if (netif_running(ndev))
1287                return -EBUSY;
1288
1289        lp->rx_bd_num = ering->rx_pending;
1290        lp->tx_bd_num = ering->tx_pending;
1291        return 0;
1292}
1293
1294static int ll_temac_ethtools_get_coalesce(struct net_device *ndev,
1295                                          struct ethtool_coalesce *ec)
1296{
1297        struct temac_local *lp = netdev_priv(ndev);
1298
1299        ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
1300        ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
1301        ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
1302        ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
1303        return 0;
1304}
1305
1306static int ll_temac_ethtools_set_coalesce(struct net_device *ndev,
1307                                          struct ethtool_coalesce *ec)
1308{
1309        struct temac_local *lp = netdev_priv(ndev);
1310
1311        if (netif_running(ndev)) {
1312                netdev_err(ndev,
1313                           "Please stop netif before applying configuration\n");
1314                return -EFAULT;
1315        }
1316
1317        if (ec->rx_max_coalesced_frames)
1318                lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
1319        if (ec->tx_max_coalesced_frames)
1320                lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
1321        /* With typical LocalLink clock speed of 200 MHz and
1322         * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1323         */
1324        if (ec->rx_coalesce_usecs)
1325                lp->coalesce_delay_rx =
1326                        min(255U, (ec->rx_coalesce_usecs * 100) / 512);
1327        if (ec->tx_coalesce_usecs)
1328                lp->coalesce_delay_tx =
1329                        min(255U, (ec->tx_coalesce_usecs * 100) / 512);
1330
1331        return 0;
1332}
1333
1334static const struct ethtool_ops temac_ethtool_ops = {
1335        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1336                                     ETHTOOL_COALESCE_MAX_FRAMES,
1337        .nway_reset = phy_ethtool_nway_reset,
1338        .get_link = ethtool_op_get_link,
1339        .get_ts_info = ethtool_op_get_ts_info,
1340        .get_link_ksettings = phy_ethtool_get_link_ksettings,
1341        .set_link_ksettings = phy_ethtool_set_link_ksettings,
1342        .get_ringparam  = ll_temac_ethtools_get_ringparam,
1343        .set_ringparam  = ll_temac_ethtools_set_ringparam,
1344        .get_coalesce   = ll_temac_ethtools_get_coalesce,
1345        .set_coalesce   = ll_temac_ethtools_set_coalesce,
1346};
1347
1348static int temac_probe(struct platform_device *pdev)
1349{
1350        struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
1351        struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
1352        struct temac_local *lp;
1353        struct net_device *ndev;
1354        struct resource *res;
1355        const void *addr;
1356        __be32 *p;
1357        bool little_endian;
1358        int rc = 0;
1359
1360        /* Init network device structure */
1361        ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
1362        if (!ndev)
1363                return -ENOMEM;
1364
1365        platform_set_drvdata(pdev, ndev);
1366        SET_NETDEV_DEV(ndev, &pdev->dev);
1367        ndev->features = NETIF_F_SG;
1368        ndev->netdev_ops = &temac_netdev_ops;
1369        ndev->ethtool_ops = &temac_ethtool_ops;
1370#if 0
1371        ndev->features |= NETIF_F_IP_CSUM; /* Can checksum TCP/UDP over IPv4. */
1372        ndev->features |= NETIF_F_HW_CSUM; /* Can checksum all the packets. */
1373        ndev->features |= NETIF_F_IPV6_CSUM; /* Can checksum IPV6 TCP/UDP */
1374        ndev->features |= NETIF_F_HIGHDMA; /* Can DMA to high memory. */
1375        ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; /* Transmit VLAN hw accel */
1376        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; /* Receive VLAN hw acceleration */
1377        ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; /* Receive VLAN filtering */
1378        ndev->features |= NETIF_F_VLAN_CHALLENGED; /* cannot handle VLAN pkts */
1379        ndev->features |= NETIF_F_GSO; /* Enable software GSO. */
1380        ndev->features |= NETIF_F_MULTI_QUEUE; /* Has multiple TX/RX queues */
1381        ndev->features |= NETIF_F_LRO; /* large receive offload */
1382#endif
1383
1384        /* setup temac private info structure */
1385        lp = netdev_priv(ndev);
1386        lp->ndev = ndev;
1387        lp->dev = &pdev->dev;
1388        lp->options = XTE_OPTION_DEFAULTS;
1389        lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1390        lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1391        spin_lock_init(&lp->rx_lock);
1392        INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1393
1394        /* Setup mutex for synchronization of indirect register access */
1395        if (pdata) {
1396                if (!pdata->indirect_lock) {
1397                        dev_err(&pdev->dev,
1398                                "indirect_lock missing in platform_data\n");
1399                        return -EINVAL;
1400                }
1401                lp->indirect_lock = pdata->indirect_lock;
1402        } else {
1403                lp->indirect_lock = devm_kmalloc(&pdev->dev,
1404                                                 sizeof(*lp->indirect_lock),
1405                                                 GFP_KERNEL);
1406                spin_lock_init(lp->indirect_lock);
1407        }
1408
1409        /* map device registers */
1410        lp->regs = devm_platform_ioremap_resource_byname(pdev, 0);
1411        if (IS_ERR(lp->regs)) {
1412                dev_err(&pdev->dev, "could not map TEMAC registers\n");
1413                return -ENOMEM;
1414        }
1415
1416        /* Select register access functions with the specified
1417         * endianness mode.  Default for OF devices is big-endian.
1418         */
1419        little_endian = false;
1420        if (temac_np) {
1421                if (of_get_property(temac_np, "little-endian", NULL))
1422                        little_endian = true;
1423        } else if (pdata) {
1424                little_endian = pdata->reg_little_endian;
1425        }
1426        if (little_endian) {
1427                lp->temac_ior = _temac_ior_le;
1428                lp->temac_iow = _temac_iow_le;
1429        } else {
1430                lp->temac_ior = _temac_ior_be;
1431                lp->temac_iow = _temac_iow_be;
1432        }
1433
1434        /* Setup checksum offload, but default to off if not specified */
1435        lp->temac_features = 0;
1436        if (temac_np) {
1437                p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
1438                if (p && be32_to_cpu(*p))
1439                        lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1440                p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
1441                if (p && be32_to_cpu(*p))
1442                        lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1443        } else if (pdata) {
1444                if (pdata->txcsum)
1445                        lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
1446                if (pdata->rxcsum)
1447                        lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
1448        }
1449        if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
1450                /* Can checksum TCP/UDP over IPv4. */
1451                ndev->features |= NETIF_F_IP_CSUM;
1452
1453        /* Defaults for IRQ delay/coalescing setup.  These are
1454         * configuration values, so does not belong in device-tree.
1455         */
1456        lp->coalesce_delay_tx = 0x10;
1457        lp->coalesce_count_tx = 0x22;
1458        lp->coalesce_delay_rx = 0xff;
1459        lp->coalesce_count_rx = 0x07;
1460
1461        /* Setup LocalLink DMA */
1462        if (temac_np) {
1463                /* Find the DMA node, map the DMA registers, and
1464                 * decode the DMA IRQs.
1465                 */
1466                dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
1467                if (!dma_np) {
1468                        dev_err(&pdev->dev, "could not find DMA node\n");
1469                        return -ENODEV;
1470                }
1471
1472                /* Setup the DMA register accesses, could be DCR or
1473                 * memory mapped.
1474                 */
1475                if (temac_dcr_setup(lp, pdev, dma_np)) {
1476                        /* no DCR in the device tree, try non-DCR */
1477                        lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
1478                                                      NULL);
1479                        if (IS_ERR(lp->sdma_regs)) {
1480                                dev_err(&pdev->dev,
1481                                        "unable to map DMA registers\n");
1482                                of_node_put(dma_np);
1483                                return PTR_ERR(lp->sdma_regs);
1484                        }
1485                        if (of_get_property(dma_np, "little-endian", NULL)) {
1486                                lp->dma_in = temac_dma_in32_le;
1487                                lp->dma_out = temac_dma_out32_le;
1488                        } else {
1489                                lp->dma_in = temac_dma_in32_be;
1490                                lp->dma_out = temac_dma_out32_be;
1491                        }
1492                        dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
1493                }
1494
1495                /* Get DMA RX and TX interrupts */
1496                lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
1497                lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
1498
1499                /* Finished with the DMA node; drop the reference */
1500                of_node_put(dma_np);
1501        } else if (pdata) {
1502                /* 2nd memory resource specifies DMA registers */
1503                res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1504                lp->sdma_regs = devm_ioremap(&pdev->dev, res->start,
1505                                                     resource_size(res));
1506                if (!lp->sdma_regs) {
1507                        dev_err(&pdev->dev,
1508                                "could not map DMA registers\n");
1509                        return -ENOMEM;
1510                }
1511                if (pdata->dma_little_endian) {
1512                        lp->dma_in = temac_dma_in32_le;
1513                        lp->dma_out = temac_dma_out32_le;
1514                } else {
1515                        lp->dma_in = temac_dma_in32_be;
1516                        lp->dma_out = temac_dma_out32_be;
1517                }
1518
1519                /* Get DMA RX and TX interrupts */
1520                lp->rx_irq = platform_get_irq(pdev, 0);
1521                lp->tx_irq = platform_get_irq(pdev, 1);
1522
1523                /* IRQ delay/coalescing setup */
1524                if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
1525                        lp->coalesce_delay_tx = pdata->tx_irq_timeout;
1526                        lp->coalesce_count_tx = pdata->tx_irq_count;
1527                }
1528                if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1529                        lp->coalesce_delay_rx = pdata->rx_irq_timeout;
1530                        lp->coalesce_count_rx = pdata->rx_irq_count;
1531                }
1532        }
1533
1534        /* Error handle returned DMA RX and TX interrupts */
1535        if (lp->rx_irq < 0) {
1536                if (lp->rx_irq != -EPROBE_DEFER)
1537                        dev_err(&pdev->dev, "could not get DMA RX irq\n");
1538                return lp->rx_irq;
1539        }
1540        if (lp->tx_irq < 0) {
1541                if (lp->tx_irq != -EPROBE_DEFER)
1542                        dev_err(&pdev->dev, "could not get DMA TX irq\n");
1543                return lp->tx_irq;
1544        }
1545
1546        if (temac_np) {
1547                /* Retrieve the MAC address */
1548                addr = of_get_mac_address(temac_np);
1549                if (IS_ERR(addr)) {
1550                        dev_err(&pdev->dev, "could not find MAC address\n");
1551                        return -ENODEV;
1552                }
1553                temac_init_mac_address(ndev, addr);
1554        } else if (pdata) {
1555                temac_init_mac_address(ndev, pdata->mac_addr);
1556        }
1557
1558        rc = temac_mdio_setup(lp, pdev);
1559        if (rc)
1560                dev_warn(&pdev->dev, "error registering MDIO bus\n");
1561
1562        if (temac_np) {
1563                lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
1564                if (lp->phy_node)
1565                        dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
1566        } else if (pdata) {
1567                snprintf(lp->phy_name, sizeof(lp->phy_name),
1568                         PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
1569                lp->phy_interface = pdata->phy_interface;
1570        }
1571
1572        /* Add the device attributes */
1573        rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1574        if (rc) {
1575                dev_err(lp->dev, "Error creating sysfs files\n");
1576                goto err_sysfs_create;
1577        }
1578
1579        rc = register_netdev(lp->ndev);
1580        if (rc) {
1581                dev_err(lp->dev, "register_netdev() error (%i)\n", rc);
1582                goto err_register_ndev;
1583        }
1584
1585        return 0;
1586
1587err_register_ndev:
1588        sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1589err_sysfs_create:
1590        if (lp->phy_node)
1591                of_node_put(lp->phy_node);
1592        temac_mdio_teardown(lp);
1593        return rc;
1594}
1595
1596static int temac_remove(struct platform_device *pdev)
1597{
1598        struct net_device *ndev = platform_get_drvdata(pdev);
1599        struct temac_local *lp = netdev_priv(ndev);
1600
1601        unregister_netdev(ndev);
1602        sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1603        if (lp->phy_node)
1604                of_node_put(lp->phy_node);
1605        temac_mdio_teardown(lp);
1606        return 0;
1607}
1608
1609static const struct of_device_id temac_of_match[] = {
1610        { .compatible = "xlnx,xps-ll-temac-1.01.b", },
1611        { .compatible = "xlnx,xps-ll-temac-2.00.a", },
1612        { .compatible = "xlnx,xps-ll-temac-2.02.a", },
1613        { .compatible = "xlnx,xps-ll-temac-2.03.a", },
1614        {},
1615};
1616MODULE_DEVICE_TABLE(of, temac_of_match);
1617
1618static struct platform_driver temac_driver = {
1619        .probe = temac_probe,
1620        .remove = temac_remove,
1621        .driver = {
1622                .name = "xilinx_temac",
1623                .of_match_table = temac_of_match,
1624        },
1625};
1626
1627module_platform_driver(temac_driver);
1628
1629MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1630MODULE_AUTHOR("Yoshio Kashiwagi");
1631MODULE_LICENSE("GPL");
1632