linux/drivers/net/ethernet/altera/altera_tse_main.c
<<
>>
Prefs
   1/* Altera Triple-Speed Ethernet MAC driver
   2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
   3 *
   4 * Contributors:
   5 *   Dalon Westergreen
   6 *   Thomas Chou
   7 *   Ian Abbott
   8 *   Yuriy Kozlov
   9 *   Tobias Klauser
  10 *   Andriy Smolskyy
  11 *   Roman Bulgakov
  12 *   Dmytro Mytarchuk
  13 *   Matthew Gerlach
  14 *
  15 * Original driver contributed by SLS.
  16 * Major updates contributed by GlobalLogic
  17 *
  18 * This program is free software; you can redistribute it and/or modify it
  19 * under the terms and conditions of the GNU General Public License,
  20 * version 2, as published by the Free Software Foundation.
  21 *
  22 * This program is distributed in the hope it will be useful, but WITHOUT
  23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  24 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  25 * more details.
  26 *
  27 * You should have received a copy of the GNU General Public License along with
  28 * this program.  If not, see <http://www.gnu.org/licenses/>.
  29 */
  30
  31#include <linux/atomic.h>
  32#include <linux/delay.h>
  33#include <linux/etherdevice.h>
  34#include <linux/if_vlan.h>
  35#include <linux/init.h>
  36#include <linux/interrupt.h>
  37#include <linux/io.h>
  38#include <linux/kernel.h>
  39#include <linux/module.h>
  40#include <linux/mii.h>
  41#include <linux/netdevice.h>
  42#include <linux/of_device.h>
  43#include <linux/of_mdio.h>
  44#include <linux/of_net.h>
  45#include <linux/of_platform.h>
  46#include <linux/phy.h>
  47#include <linux/platform_device.h>
  48#include <linux/skbuff.h>
  49#include <asm/cacheflush.h>
  50
  51#include "altera_utils.h"
  52#include "altera_tse.h"
  53#include "altera_sgdma.h"
  54#include "altera_msgdma.h"
  55
  56static atomic_t instance_count = ATOMIC_INIT(~0);
  57/* Module parameters */
  58static int debug = -1;
  59module_param(debug, int, 0644);
  60MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
  61
  62static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
  63                                        NETIF_MSG_LINK | NETIF_MSG_IFUP |
  64                                        NETIF_MSG_IFDOWN);
  65
  66#define RX_DESCRIPTORS 64
  67static int dma_rx_num = RX_DESCRIPTORS;
  68module_param(dma_rx_num, int, 0644);
  69MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
  70
  71#define TX_DESCRIPTORS 64
  72static int dma_tx_num = TX_DESCRIPTORS;
  73module_param(dma_tx_num, int, 0644);
  74MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
  75
  76
  77#define POLL_PHY (-1)
  78
  79/* Make sure DMA buffer size is larger than the max frame size
  80 * plus some alignment offset and a VLAN header. If the max frame size is
  81 * 1518, a VLAN header would be additional 4 bytes and additional
  82 * headroom for alignment is 2 bytes, 2048 is just fine.
  83 */
  84#define ALTERA_RXDMABUFFER_SIZE 2048
  85
  86/* Allow network stack to resume queueing packets after we've
  87 * finished transmitting at least 1/4 of the packets in the queue.
  88 */
  89#define TSE_TX_THRESH(x)        (x->tx_ring_size / 4)
  90
  91#define TXQUEUESTOP_THRESHHOLD  2
  92
  93static const struct of_device_id altera_tse_ids[];
  94
  95static inline u32 tse_tx_avail(struct altera_tse_private *priv)
  96{
  97        return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
  98}
  99
 100/* PCS Register read/write functions
 101 */
 102static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
 103{
 104        return csrrd32(priv->mac_dev,
 105                       tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
 106}
 107
 108static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
 109                                u16 value)
 110{
 111        csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
 112}
 113
 114/* Check PCS scratch memory */
 115static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
 116{
 117        sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
 118        return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
 119}
 120
 121/* MDIO specific functions
 122 */
 123static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 124{
 125        struct net_device *ndev = bus->priv;
 126        struct altera_tse_private *priv = netdev_priv(ndev);
 127
 128        /* set MDIO address */
 129        csrwr32((mii_id & 0x1f), priv->mac_dev,
 130                tse_csroffs(mdio_phy1_addr));
 131
 132        /* get the data */
 133        return csrrd32(priv->mac_dev,
 134                       tse_csroffs(mdio_phy1) + regnum * 4) & 0xffff;
 135}
 136
 137static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
 138                                 u16 value)
 139{
 140        struct net_device *ndev = bus->priv;
 141        struct altera_tse_private *priv = netdev_priv(ndev);
 142
 143        /* set MDIO address */
 144        csrwr32((mii_id & 0x1f), priv->mac_dev,
 145                tse_csroffs(mdio_phy1_addr));
 146
 147        /* write the data */
 148        csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy1) + regnum * 4);
 149        return 0;
 150}
 151
 152static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
 153{
 154        struct altera_tse_private *priv = netdev_priv(dev);
 155        int ret;
 156        struct device_node *mdio_node = NULL;
 157        struct mii_bus *mdio = NULL;
 158        struct device_node *child_node = NULL;
 159
 160        for_each_child_of_node(priv->device->of_node, child_node) {
 161                if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
 162                        mdio_node = child_node;
 163                        break;
 164                }
 165        }
 166
 167        if (mdio_node) {
 168                netdev_dbg(dev, "FOUND MDIO subnode\n");
 169        } else {
 170                netdev_dbg(dev, "NO MDIO subnode\n");
 171                return 0;
 172        }
 173
 174        mdio = mdiobus_alloc();
 175        if (mdio == NULL) {
 176                netdev_err(dev, "Error allocating MDIO bus\n");
 177                return -ENOMEM;
 178        }
 179
 180        mdio->name = ALTERA_TSE_RESOURCE_NAME;
 181        mdio->read = &altera_tse_mdio_read;
 182        mdio->write = &altera_tse_mdio_write;
 183        snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
 184
 185        mdio->priv = dev;
 186        mdio->parent = priv->device;
 187
 188        ret = of_mdiobus_register(mdio, mdio_node);
 189        if (ret != 0) {
 190                netdev_err(dev, "Cannot register MDIO bus %s\n",
 191                           mdio->id);
 192                goto out_free_mdio;
 193        }
 194
 195        if (netif_msg_drv(priv))
 196                netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
 197
 198        priv->mdio = mdio;
 199        return 0;
 200out_free_mdio:
 201        mdiobus_free(mdio);
 202        mdio = NULL;
 203        return ret;
 204}
 205
 206static void altera_tse_mdio_destroy(struct net_device *dev)
 207{
 208        struct altera_tse_private *priv = netdev_priv(dev);
 209
 210        if (priv->mdio == NULL)
 211                return;
 212
 213        if (netif_msg_drv(priv))
 214                netdev_info(dev, "MDIO bus %s: removed\n",
 215                            priv->mdio->id);
 216
 217        mdiobus_unregister(priv->mdio);
 218        mdiobus_free(priv->mdio);
 219        priv->mdio = NULL;
 220}
 221
 222static int tse_init_rx_buffer(struct altera_tse_private *priv,
 223                              struct tse_buffer *rxbuffer, int len)
 224{
 225        rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
 226        if (!rxbuffer->skb)
 227                return -ENOMEM;
 228
 229        rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
 230                                                len,
 231                                                DMA_FROM_DEVICE);
 232
 233        if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
 234                netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 235                dev_kfree_skb_any(rxbuffer->skb);
 236                return -EINVAL;
 237        }
 238        rxbuffer->dma_addr &= (dma_addr_t)~3;
 239        rxbuffer->len = len;
 240        return 0;
 241}
 242
 243static void tse_free_rx_buffer(struct altera_tse_private *priv,
 244                               struct tse_buffer *rxbuffer)
 245{
 246        struct sk_buff *skb = rxbuffer->skb;
 247        dma_addr_t dma_addr = rxbuffer->dma_addr;
 248
 249        if (skb != NULL) {
 250                if (dma_addr)
 251                        dma_unmap_single(priv->device, dma_addr,
 252                                         rxbuffer->len,
 253                                         DMA_FROM_DEVICE);
 254                dev_kfree_skb_any(skb);
 255                rxbuffer->skb = NULL;
 256                rxbuffer->dma_addr = 0;
 257        }
 258}
 259
 260/* Unmap and free Tx buffer resources
 261 */
 262static void tse_free_tx_buffer(struct altera_tse_private *priv,
 263                               struct tse_buffer *buffer)
 264{
 265        if (buffer->dma_addr) {
 266                if (buffer->mapped_as_page)
 267                        dma_unmap_page(priv->device, buffer->dma_addr,
 268                                       buffer->len, DMA_TO_DEVICE);
 269                else
 270                        dma_unmap_single(priv->device, buffer->dma_addr,
 271                                         buffer->len, DMA_TO_DEVICE);
 272                buffer->dma_addr = 0;
 273        }
 274        if (buffer->skb) {
 275                dev_kfree_skb_any(buffer->skb);
 276                buffer->skb = NULL;
 277        }
 278}
 279
 280static int alloc_init_skbufs(struct altera_tse_private *priv)
 281{
 282        unsigned int rx_descs = priv->rx_ring_size;
 283        unsigned int tx_descs = priv->tx_ring_size;
 284        int ret = -ENOMEM;
 285        int i;
 286
 287        /* Create Rx ring buffer */
 288        priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
 289                                GFP_KERNEL);
 290        if (!priv->rx_ring)
 291                goto err_rx_ring;
 292
 293        /* Create Tx ring buffer */
 294        priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
 295                                GFP_KERNEL);
 296        if (!priv->tx_ring)
 297                goto err_tx_ring;
 298
 299        priv->tx_cons = 0;
 300        priv->tx_prod = 0;
 301
 302        /* Init Rx ring */
 303        for (i = 0; i < rx_descs; i++) {
 304                ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
 305                                         priv->rx_dma_buf_sz);
 306                if (ret)
 307                        goto err_init_rx_buffers;
 308        }
 309
 310        priv->rx_cons = 0;
 311        priv->rx_prod = 0;
 312
 313        return 0;
 314err_init_rx_buffers:
 315        while (--i >= 0)
 316                tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 317        kfree(priv->tx_ring);
 318err_tx_ring:
 319        kfree(priv->rx_ring);
 320err_rx_ring:
 321        return ret;
 322}
 323
 324static void free_skbufs(struct net_device *dev)
 325{
 326        struct altera_tse_private *priv = netdev_priv(dev);
 327        unsigned int rx_descs = priv->rx_ring_size;
 328        unsigned int tx_descs = priv->tx_ring_size;
 329        int i;
 330
 331        /* Release the DMA TX/RX socket buffers */
 332        for (i = 0; i < rx_descs; i++)
 333                tse_free_rx_buffer(priv, &priv->rx_ring[i]);
 334        for (i = 0; i < tx_descs; i++)
 335                tse_free_tx_buffer(priv, &priv->tx_ring[i]);
 336
 337
 338        kfree(priv->tx_ring);
 339}
 340
 341/* Reallocate the skb for the reception process
 342 */
 343static inline void tse_rx_refill(struct altera_tse_private *priv)
 344{
 345        unsigned int rxsize = priv->rx_ring_size;
 346        unsigned int entry;
 347        int ret;
 348
 349        for (; priv->rx_cons - priv->rx_prod > 0;
 350                        priv->rx_prod++) {
 351                entry = priv->rx_prod % rxsize;
 352                if (likely(priv->rx_ring[entry].skb == NULL)) {
 353                        ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
 354                                priv->rx_dma_buf_sz);
 355                        if (unlikely(ret != 0))
 356                                break;
 357                        priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
 358                }
 359        }
 360}
 361
 362/* Pull out the VLAN tag and fix up the packet
 363 */
 364static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
 365{
 366        struct ethhdr *eth_hdr;
 367        u16 vid;
 368        if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 369            !__vlan_get_tag(skb, &vid)) {
 370                eth_hdr = (struct ethhdr *)skb->data;
 371                memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
 372                skb_pull(skb, VLAN_HLEN);
 373                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
 374        }
 375}
 376
 377/* Receive a packet: retrieve and pass over to upper levels
 378 */
 379static int tse_rx(struct altera_tse_private *priv, int limit)
 380{
 381        unsigned int count = 0;
 382        unsigned int next_entry;
 383        struct sk_buff *skb;
 384        unsigned int entry = priv->rx_cons % priv->rx_ring_size;
 385        u32 rxstatus;
 386        u16 pktlength;
 387        u16 pktstatus;
 388
 389        /* Check for count < limit first as get_rx_status is changing
 390        * the response-fifo so we must process the next packet
 391        * after calling get_rx_status if a response is pending.
 392        * (reading the last byte of the response pops the value from the fifo.)
 393        */
 394        while ((count < limit) &&
 395               ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0)) {
 396                pktstatus = rxstatus >> 16;
 397                pktlength = rxstatus & 0xffff;
 398
 399                if ((pktstatus & 0xFF) || (pktlength == 0))
 400                        netdev_err(priv->dev,
 401                                   "RCV pktstatus %08X pktlength %08X\n",
 402                                   pktstatus, pktlength);
 403
 404                /* DMA trasfer from TSE starts with 2 aditional bytes for
 405                 * IP payload alignment. Status returned by get_rx_status()
 406                 * contains DMA transfer length. Packet is 2 bytes shorter.
 407                 */
 408                pktlength -= 2;
 409
 410                count++;
 411                next_entry = (++priv->rx_cons) % priv->rx_ring_size;
 412
 413                skb = priv->rx_ring[entry].skb;
 414                if (unlikely(!skb)) {
 415                        netdev_err(priv->dev,
 416                                   "%s: Inconsistent Rx descriptor chain\n",
 417                                   __func__);
 418                        priv->dev->stats.rx_dropped++;
 419                        break;
 420                }
 421                priv->rx_ring[entry].skb = NULL;
 422
 423                skb_put(skb, pktlength);
 424
 425                dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
 426                                 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
 427
 428                if (netif_msg_pktdata(priv)) {
 429                        netdev_info(priv->dev, "frame received %d bytes\n",
 430                                    pktlength);
 431                        print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
 432                                       16, 1, skb->data, pktlength, true);
 433                }
 434
 435                tse_rx_vlan(priv->dev, skb);
 436
 437                skb->protocol = eth_type_trans(skb, priv->dev);
 438                skb_checksum_none_assert(skb);
 439
 440                napi_gro_receive(&priv->napi, skb);
 441
 442                priv->dev->stats.rx_packets++;
 443                priv->dev->stats.rx_bytes += pktlength;
 444
 445                entry = next_entry;
 446
 447                tse_rx_refill(priv);
 448        }
 449
 450        return count;
 451}
 452
 453/* Reclaim resources after transmission completes
 454 */
 455static int tse_tx_complete(struct altera_tse_private *priv)
 456{
 457        unsigned int txsize = priv->tx_ring_size;
 458        u32 ready;
 459        unsigned int entry;
 460        struct tse_buffer *tx_buff;
 461        int txcomplete = 0;
 462
 463        spin_lock(&priv->tx_lock);
 464
 465        ready = priv->dmaops->tx_completions(priv);
 466
 467        /* Free sent buffers */
 468        while (ready && (priv->tx_cons != priv->tx_prod)) {
 469                entry = priv->tx_cons % txsize;
 470                tx_buff = &priv->tx_ring[entry];
 471
 472                if (netif_msg_tx_done(priv))
 473                        netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
 474                                   __func__, priv->tx_prod, priv->tx_cons);
 475
 476                if (likely(tx_buff->skb))
 477                        priv->dev->stats.tx_packets++;
 478
 479                tse_free_tx_buffer(priv, tx_buff);
 480                priv->tx_cons++;
 481
 482                txcomplete++;
 483                ready--;
 484        }
 485
 486        if (unlikely(netif_queue_stopped(priv->dev) &&
 487                     tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
 488                if (netif_queue_stopped(priv->dev) &&
 489                    tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
 490                        if (netif_msg_tx_done(priv))
 491                                netdev_dbg(priv->dev, "%s: restart transmit\n",
 492                                           __func__);
 493                        netif_wake_queue(priv->dev);
 494                }
 495        }
 496
 497        spin_unlock(&priv->tx_lock);
 498        return txcomplete;
 499}
 500
 501/* NAPI polling function
 502 */
 503static int tse_poll(struct napi_struct *napi, int budget)
 504{
 505        struct altera_tse_private *priv =
 506                        container_of(napi, struct altera_tse_private, napi);
 507        int rxcomplete = 0;
 508        unsigned long int flags;
 509
 510        tse_tx_complete(priv);
 511
 512        rxcomplete = tse_rx(priv, budget);
 513
 514        if (rxcomplete < budget) {
 515
 516                napi_complete_done(napi, rxcomplete);
 517
 518                netdev_dbg(priv->dev,
 519                           "NAPI Complete, did %d packets with budget %d\n",
 520                           rxcomplete, budget);
 521
 522                spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
 523                priv->dmaops->enable_rxirq(priv);
 524                priv->dmaops->enable_txirq(priv);
 525                spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
 526        }
 527        return rxcomplete;
 528}
 529
 530/* DMA TX & RX FIFO interrupt routing
 531 */
 532static irqreturn_t altera_isr(int irq, void *dev_id)
 533{
 534        struct net_device *dev = dev_id;
 535        struct altera_tse_private *priv;
 536
 537        if (unlikely(!dev)) {
 538                pr_err("%s: invalid dev pointer\n", __func__);
 539                return IRQ_NONE;
 540        }
 541        priv = netdev_priv(dev);
 542
 543        spin_lock(&priv->rxdma_irq_lock);
 544        /* reset IRQs */
 545        priv->dmaops->clear_rxirq(priv);
 546        priv->dmaops->clear_txirq(priv);
 547        spin_unlock(&priv->rxdma_irq_lock);
 548
 549        if (likely(napi_schedule_prep(&priv->napi))) {
 550                spin_lock(&priv->rxdma_irq_lock);
 551                priv->dmaops->disable_rxirq(priv);
 552                priv->dmaops->disable_txirq(priv);
 553                spin_unlock(&priv->rxdma_irq_lock);
 554                __napi_schedule(&priv->napi);
 555        }
 556
 557
 558        return IRQ_HANDLED;
 559}
 560
 561/* Transmit a packet (called by the kernel). Dispatches
 562 * either the SGDMA method for transmitting or the
 563 * MSGDMA method, assumes no scatter/gather support,
 564 * implying an assumption that there's only one
 565 * physically contiguous fragment starting at
 566 * skb->data, for length of skb_headlen(skb).
 567 */
 568static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
 569{
 570        struct altera_tse_private *priv = netdev_priv(dev);
 571        unsigned int txsize = priv->tx_ring_size;
 572        unsigned int entry;
 573        struct tse_buffer *buffer = NULL;
 574        int nfrags = skb_shinfo(skb)->nr_frags;
 575        unsigned int nopaged_len = skb_headlen(skb);
 576        enum netdev_tx ret = NETDEV_TX_OK;
 577        dma_addr_t dma_addr;
 578
 579        spin_lock_bh(&priv->tx_lock);
 580
 581        if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
 582                if (!netif_queue_stopped(dev)) {
 583                        netif_stop_queue(dev);
 584                        /* This is a hard error, log it. */
 585                        netdev_err(priv->dev,
 586                                   "%s: Tx list full when queue awake\n",
 587                                   __func__);
 588                }
 589                ret = NETDEV_TX_BUSY;
 590                goto out;
 591        }
 592
 593        /* Map the first skb fragment */
 594        entry = priv->tx_prod % txsize;
 595        buffer = &priv->tx_ring[entry];
 596
 597        dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
 598                                  DMA_TO_DEVICE);
 599        if (dma_mapping_error(priv->device, dma_addr)) {
 600                netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
 601                ret = NETDEV_TX_OK;
 602                goto out;
 603        }
 604
 605        buffer->skb = skb;
 606        buffer->dma_addr = dma_addr;
 607        buffer->len = nopaged_len;
 608
 609        priv->dmaops->tx_buffer(priv, buffer);
 610
 611        skb_tx_timestamp(skb);
 612
 613        priv->tx_prod++;
 614        dev->stats.tx_bytes += skb->len;
 615
 616        if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
 617                if (netif_msg_hw(priv))
 618                        netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
 619                                   __func__);
 620                netif_stop_queue(dev);
 621        }
 622
 623out:
 624        spin_unlock_bh(&priv->tx_lock);
 625
 626        return ret;
 627}
 628
 629/* Called every time the controller might need to be made
 630 * aware of new link state.  The PHY code conveys this
 631 * information through variables in the phydev structure, and this
 632 * function converts those variables into the appropriate
 633 * register values, and can bring down the device if needed.
 634 */
 635static void altera_tse_adjust_link(struct net_device *dev)
 636{
 637        struct altera_tse_private *priv = netdev_priv(dev);
 638        struct phy_device *phydev = dev->phydev;
 639        int new_state = 0;
 640
 641        /* only change config if there is a link */
 642        spin_lock(&priv->mac_cfg_lock);
 643        if (phydev->link) {
 644                /* Read old config */
 645                u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
 646
 647                /* Check duplex */
 648                if (phydev->duplex != priv->oldduplex) {
 649                        new_state = 1;
 650                        if (!(phydev->duplex))
 651                                cfg_reg |= MAC_CMDCFG_HD_ENA;
 652                        else
 653                                cfg_reg &= ~MAC_CMDCFG_HD_ENA;
 654
 655                        netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
 656                                   dev->name, phydev->duplex);
 657
 658                        priv->oldduplex = phydev->duplex;
 659                }
 660
 661                /* Check speed */
 662                if (phydev->speed != priv->oldspeed) {
 663                        new_state = 1;
 664                        switch (phydev->speed) {
 665                        case 1000:
 666                                cfg_reg |= MAC_CMDCFG_ETH_SPEED;
 667                                cfg_reg &= ~MAC_CMDCFG_ENA_10;
 668                                break;
 669                        case 100:
 670                                cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 671                                cfg_reg &= ~MAC_CMDCFG_ENA_10;
 672                                break;
 673                        case 10:
 674                                cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
 675                                cfg_reg |= MAC_CMDCFG_ENA_10;
 676                                break;
 677                        default:
 678                                if (netif_msg_link(priv))
 679                                        netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
 680                                                    phydev->speed);
 681                                break;
 682                        }
 683                        priv->oldspeed = phydev->speed;
 684                }
 685                iowrite32(cfg_reg, &priv->mac_dev->command_config);
 686
 687                if (!priv->oldlink) {
 688                        new_state = 1;
 689                        priv->oldlink = 1;
 690                }
 691        } else if (priv->oldlink) {
 692                new_state = 1;
 693                priv->oldlink = 0;
 694                priv->oldspeed = 0;
 695                priv->oldduplex = -1;
 696        }
 697
 698        if (new_state && netif_msg_link(priv))
 699                phy_print_status(phydev);
 700
 701        spin_unlock(&priv->mac_cfg_lock);
 702}
 703static struct phy_device *connect_local_phy(struct net_device *dev)
 704{
 705        struct altera_tse_private *priv = netdev_priv(dev);
 706        struct phy_device *phydev = NULL;
 707        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
 708
 709        if (priv->phy_addr != POLL_PHY) {
 710                snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
 711                         priv->mdio->id, priv->phy_addr);
 712
 713                netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
 714
 715                phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
 716                                     priv->phy_iface);
 717                if (IS_ERR(phydev))
 718                        netdev_err(dev, "Could not attach to PHY\n");
 719
 720        } else {
 721                int ret;
 722                phydev = phy_find_first(priv->mdio);
 723                if (phydev == NULL) {
 724                        netdev_err(dev, "No PHY found\n");
 725                        return phydev;
 726                }
 727
 728                ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
 729                                priv->phy_iface);
 730                if (ret != 0) {
 731                        netdev_err(dev, "Could not attach to PHY\n");
 732                        phydev = NULL;
 733                }
 734        }
 735        return phydev;
 736}
 737
 738static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
 739{
 740        struct altera_tse_private *priv = netdev_priv(dev);
 741        struct device_node *np = priv->device->of_node;
 742        int ret = 0;
 743
 744        priv->phy_iface = of_get_phy_mode(np);
 745
 746        /* Avoid get phy addr and create mdio if no phy is present */
 747        if (!priv->phy_iface)
 748                return 0;
 749
 750        /* try to get PHY address from device tree, use PHY autodetection if
 751         * no valid address is given
 752         */
 753
 754        if (of_property_read_u32(priv->device->of_node, "phy-addr",
 755                         &priv->phy_addr)) {
 756                priv->phy_addr = POLL_PHY;
 757        }
 758
 759        if (!((priv->phy_addr == POLL_PHY) ||
 760                  ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
 761                netdev_err(dev, "invalid phy-addr specified %d\n",
 762                        priv->phy_addr);
 763                return -ENODEV;
 764        }
 765
 766        /* Create/attach to MDIO bus */
 767        ret = altera_tse_mdio_create(dev,
 768                                         atomic_add_return(1, &instance_count));
 769
 770        if (ret)
 771                return -ENODEV;
 772
 773        return 0;
 774}
 775
 776/* Initialize driver's PHY state, and attach to the PHY
 777 */
 778static int init_phy(struct net_device *dev)
 779{
 780        struct altera_tse_private *priv = netdev_priv(dev);
 781        struct phy_device *phydev;
 782        struct device_node *phynode;
 783        bool fixed_link = false;
 784        int rc = 0;
 785
 786        /* Avoid init phy in case of no phy present */
 787        if (!priv->phy_iface)
 788                return 0;
 789
 790        priv->oldlink = 0;
 791        priv->oldspeed = 0;
 792        priv->oldduplex = -1;
 793
 794        phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
 795
 796        if (!phynode) {
 797                /* check if a fixed-link is defined in device-tree */
 798                if (of_phy_is_fixed_link(priv->device->of_node)) {
 799                        rc = of_phy_register_fixed_link(priv->device->of_node);
 800                        if (rc < 0) {
 801                                netdev_err(dev, "cannot register fixed PHY\n");
 802                                return rc;
 803                        }
 804
 805                        /* In the case of a fixed PHY, the DT node associated
 806                         * to the PHY is the Ethernet MAC DT node.
 807                         */
 808                        phynode = of_node_get(priv->device->of_node);
 809                        fixed_link = true;
 810
 811                        netdev_dbg(dev, "fixed-link detected\n");
 812                        phydev = of_phy_connect(dev, phynode,
 813                                                &altera_tse_adjust_link,
 814                                                0, priv->phy_iface);
 815                } else {
 816                        netdev_dbg(dev, "no phy-handle found\n");
 817                        if (!priv->mdio) {
 818                                netdev_err(dev, "No phy-handle nor local mdio specified\n");
 819                                return -ENODEV;
 820                        }
 821                        phydev = connect_local_phy(dev);
 822                }
 823        } else {
 824                netdev_dbg(dev, "phy-handle found\n");
 825                phydev = of_phy_connect(dev, phynode,
 826                        &altera_tse_adjust_link, 0, priv->phy_iface);
 827        }
 828        of_node_put(phynode);
 829
 830        if (!phydev) {
 831                netdev_err(dev, "Could not find the PHY\n");
 832                if (fixed_link)
 833                        of_phy_deregister_fixed_link(priv->device->of_node);
 834                return -ENODEV;
 835        }
 836
 837        /* Stop Advertising 1000BASE Capability if interface is not GMII
 838         */
 839        if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
 840            (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
 841                phy_set_max_speed(phydev, SPEED_100);
 842
 843        /* Broken HW is sometimes missing the pull-up resistor on the
 844         * MDIO line, which results in reads to non-existent devices returning
 845         * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
 846         * device as well. If a fixed-link is used the phy_id is always 0.
 847         * Note: phydev->phy_id is the result of reading the UID PHY registers.
 848         */
 849        if ((phydev->phy_id == 0) && !fixed_link) {
 850                netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
 851                phy_disconnect(phydev);
 852                return -ENODEV;
 853        }
 854
 855        netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
 856                   phydev->mdio.addr, phydev->phy_id, phydev->link);
 857
 858        return 0;
 859}
 860
 861static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
 862{
 863        u32 msb;
 864        u32 lsb;
 865
 866        msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
 867        lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
 868
 869        /* Set primary MAC address */
 870        csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0));
 871        csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1));
 872}
 873
 874/* MAC software reset.
 875 * When reset is triggered, the MAC function completes the current
 876 * transmission or reception, and subsequently disables the transmit and
 877 * receive logic, flushes the receive FIFO buffer, and resets the statistics
 878 * counters.
 879 */
 880static int reset_mac(struct altera_tse_private *priv)
 881{
 882        int counter;
 883        u32 dat;
 884
 885        dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 886        dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 887        dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
 888        csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 889
 890        counter = 0;
 891        while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 892                if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config),
 893                                     MAC_CMDCFG_SW_RESET))
 894                        break;
 895                udelay(1);
 896        }
 897
 898        if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
 899                dat = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 900                dat &= ~MAC_CMDCFG_SW_RESET;
 901                csrwr32(dat, priv->mac_dev, tse_csroffs(command_config));
 902                return -1;
 903        }
 904        return 0;
 905}
 906
 907/* Initialize MAC core registers
 908*/
 909static int init_mac(struct altera_tse_private *priv)
 910{
 911        unsigned int cmd = 0;
 912        u32 frm_length;
 913
 914        /* Setup Rx FIFO */
 915        csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
 916                priv->mac_dev, tse_csroffs(rx_section_empty));
 917
 918        csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev,
 919                tse_csroffs(rx_section_full));
 920
 921        csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev,
 922                tse_csroffs(rx_almost_empty));
 923
 924        csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev,
 925                tse_csroffs(rx_almost_full));
 926
 927        /* Setup Tx FIFO */
 928        csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
 929                priv->mac_dev, tse_csroffs(tx_section_empty));
 930
 931        csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev,
 932                tse_csroffs(tx_section_full));
 933
 934        csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev,
 935                tse_csroffs(tx_almost_empty));
 936
 937        csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev,
 938                tse_csroffs(tx_almost_full));
 939
 940        /* MAC Address Configuration */
 941        tse_update_mac_addr(priv, priv->dev->dev_addr);
 942
 943        /* MAC Function Configuration */
 944        frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
 945        csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length));
 946
 947        csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev,
 948                tse_csroffs(tx_ipg_length));
 949
 950        /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
 951         * start address
 952         */
 953        tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat),
 954                    ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
 955
 956        tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat),
 957                      ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
 958                      ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
 959
 960        /* Set the MAC options */
 961        cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 962        cmd &= ~MAC_CMDCFG_PAD_EN;      /* No padding Removal on Receive */
 963        cmd &= ~MAC_CMDCFG_CRC_FWD;     /* CRC Removal */
 964        cmd |= MAC_CMDCFG_RX_ERR_DISC;  /* Automatically discard frames
 965                                         * with CRC errors
 966                                         */
 967        cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
 968        cmd &= ~MAC_CMDCFG_TX_ENA;
 969        cmd &= ~MAC_CMDCFG_RX_ENA;
 970
 971        /* Default speed and duplex setting, full/100 */
 972        cmd &= ~MAC_CMDCFG_HD_ENA;
 973        cmd &= ~MAC_CMDCFG_ETH_SPEED;
 974        cmd &= ~MAC_CMDCFG_ENA_10;
 975
 976        csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config));
 977
 978        csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev,
 979                tse_csroffs(pause_quanta));
 980
 981        if (netif_msg_hw(priv))
 982                dev_dbg(priv->device,
 983                        "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
 984
 985        return 0;
 986}
 987
 988/* Start/stop MAC transmission logic
 989 */
 990static void tse_set_mac(struct altera_tse_private *priv, bool enable)
 991{
 992        u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config));
 993
 994        if (enable)
 995                value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
 996        else
 997                value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
 998
 999        csrwr32(value, priv->mac_dev, tse_csroffs(command_config));
1000}
1001
1002/* Change the MTU
1003 */
1004static int tse_change_mtu(struct net_device *dev, int new_mtu)
1005{
1006        if (netif_running(dev)) {
1007                netdev_err(dev, "must be stopped to change its MTU\n");
1008                return -EBUSY;
1009        }
1010
1011        dev->mtu = new_mtu;
1012        netdev_update_features(dev);
1013
1014        return 0;
1015}
1016
1017static void altera_tse_set_mcfilter(struct net_device *dev)
1018{
1019        struct altera_tse_private *priv = netdev_priv(dev);
1020        int i;
1021        struct netdev_hw_addr *ha;
1022
1023        /* clear the hash filter */
1024        for (i = 0; i < 64; i++)
1025                csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1026
1027        netdev_for_each_mc_addr(ha, dev) {
1028                unsigned int hash = 0;
1029                int mac_octet;
1030
1031                for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
1032                        unsigned char xor_bit = 0;
1033                        unsigned char octet = ha->addr[mac_octet];
1034                        unsigned int bitshift;
1035
1036                        for (bitshift = 0; bitshift < 8; bitshift++)
1037                                xor_bit ^= ((octet >> bitshift) & 0x01);
1038
1039                        hash = (hash << 1) | xor_bit;
1040                }
1041                csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4);
1042        }
1043}
1044
1045
1046static void altera_tse_set_mcfilterall(struct net_device *dev)
1047{
1048        struct altera_tse_private *priv = netdev_priv(dev);
1049        int i;
1050
1051        /* set the hash filter */
1052        for (i = 0; i < 64; i++)
1053                csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4);
1054}
1055
1056/* Set or clear the multicast filter for this adaptor
1057 */
1058static void tse_set_rx_mode_hashfilter(struct net_device *dev)
1059{
1060        struct altera_tse_private *priv = netdev_priv(dev);
1061
1062        spin_lock(&priv->mac_cfg_lock);
1063
1064        if (dev->flags & IFF_PROMISC)
1065                tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1066                            MAC_CMDCFG_PROMIS_EN);
1067
1068        if (dev->flags & IFF_ALLMULTI)
1069                altera_tse_set_mcfilterall(dev);
1070        else
1071                altera_tse_set_mcfilter(dev);
1072
1073        spin_unlock(&priv->mac_cfg_lock);
1074}
1075
1076/* Set or clear the multicast filter for this adaptor
1077 */
1078static void tse_set_rx_mode(struct net_device *dev)
1079{
1080        struct altera_tse_private *priv = netdev_priv(dev);
1081
1082        spin_lock(&priv->mac_cfg_lock);
1083
1084        if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1085            !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1086                tse_set_bit(priv->mac_dev, tse_csroffs(command_config),
1087                            MAC_CMDCFG_PROMIS_EN);
1088        else
1089                tse_clear_bit(priv->mac_dev, tse_csroffs(command_config),
1090                              MAC_CMDCFG_PROMIS_EN);
1091
1092        spin_unlock(&priv->mac_cfg_lock);
1093}
1094
1095/* Initialise (if necessary) the SGMII PCS component
1096 */
1097static int init_sgmii_pcs(struct net_device *dev)
1098{
1099        struct altera_tse_private *priv = netdev_priv(dev);
1100        int n;
1101        unsigned int tmp_reg = 0;
1102
1103        if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
1104                return 0; /* Nothing to do, not in SGMII mode */
1105
1106        /* The TSE SGMII PCS block looks a little like a PHY, it is
1107         * mapped into the zeroth MDIO space of the MAC and it has
1108         * ID registers like a PHY would.  Sadly this is often
1109         * configured to zeroes, so don't be surprised if it does
1110         * show 0x00000000.
1111         */
1112
1113        if (sgmii_pcs_scratch_test(priv, 0x0000) &&
1114                sgmii_pcs_scratch_test(priv, 0xffff) &&
1115                sgmii_pcs_scratch_test(priv, 0xa5a5) &&
1116                sgmii_pcs_scratch_test(priv, 0x5a5a)) {
1117                netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
1118                                sgmii_pcs_read(priv, MII_PHYSID1),
1119                                sgmii_pcs_read(priv, MII_PHYSID2));
1120        } else {
1121                netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
1122                return -ENOMEM;
1123        }
1124
1125        /* Starting on page 5-29 of the MegaCore Function User Guide
1126         * Set SGMII Link timer to 1.6ms
1127         */
1128        sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
1129        sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
1130
1131        /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
1132        sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
1133
1134        /* Enable Autonegotiation */
1135        tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
1136        tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
1137        sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1138
1139        /* Reset PCS block */
1140        tmp_reg |= BMCR_RESET;
1141        sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
1142        for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
1143                if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
1144                        netdev_info(dev, "SGMII PCS block initialised OK\n");
1145                        return 0;
1146                }
1147                udelay(1);
1148        }
1149
1150        /* We failed to reset the block, return a timeout */
1151        netdev_err(dev, "SGMII PCS block reset failed.\n");
1152        return -ETIMEDOUT;
1153}
1154
1155/* Open and initialize the interface
1156 */
1157static int tse_open(struct net_device *dev)
1158{
1159        struct altera_tse_private *priv = netdev_priv(dev);
1160        int ret = 0;
1161        int i;
1162        unsigned long int flags;
1163
1164        /* Reset and configure TSE MAC and probe associated PHY */
1165        ret = priv->dmaops->init_dma(priv);
1166        if (ret != 0) {
1167                netdev_err(dev, "Cannot initialize DMA\n");
1168                goto phy_error;
1169        }
1170
1171        if (netif_msg_ifup(priv))
1172                netdev_warn(dev, "device MAC address %pM\n",
1173                            dev->dev_addr);
1174
1175        if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1176                netdev_warn(dev, "TSE revision %x\n", priv->revision);
1177
1178        spin_lock(&priv->mac_cfg_lock);
1179        /* no-op if MAC not operating in SGMII mode*/
1180        ret = init_sgmii_pcs(dev);
1181        if (ret) {
1182                netdev_err(dev,
1183                           "Cannot init the SGMII PCS (error: %d)\n", ret);
1184                spin_unlock(&priv->mac_cfg_lock);
1185                goto phy_error;
1186        }
1187
1188        ret = reset_mac(priv);
1189        /* Note that reset_mac will fail if the clocks are gated by the PHY
1190         * due to the PHY being put into isolation or power down mode.
1191         * This is not an error if reset fails due to no clock.
1192         */
1193        if (ret)
1194                netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1195
1196        ret = init_mac(priv);
1197        spin_unlock(&priv->mac_cfg_lock);
1198        if (ret) {
1199                netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1200                goto alloc_skbuf_error;
1201        }
1202
1203        priv->dmaops->reset_dma(priv);
1204
1205        /* Create and initialize the TX/RX descriptors chains. */
1206        priv->rx_ring_size = dma_rx_num;
1207        priv->tx_ring_size = dma_tx_num;
1208        ret = alloc_init_skbufs(priv);
1209        if (ret) {
1210                netdev_err(dev, "DMA descriptors initialization failed\n");
1211                goto alloc_skbuf_error;
1212        }
1213
1214
1215        /* Register RX interrupt */
1216        ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1217                          dev->name, dev);
1218        if (ret) {
1219                netdev_err(dev, "Unable to register RX interrupt %d\n",
1220                           priv->rx_irq);
1221                goto init_error;
1222        }
1223
1224        /* Register TX interrupt */
1225        ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1226                          dev->name, dev);
1227        if (ret) {
1228                netdev_err(dev, "Unable to register TX interrupt %d\n",
1229                           priv->tx_irq);
1230                goto tx_request_irq_error;
1231        }
1232
1233        /* Enable DMA interrupts */
1234        spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1235        priv->dmaops->enable_rxirq(priv);
1236        priv->dmaops->enable_txirq(priv);
1237
1238        /* Setup RX descriptor chain */
1239        for (i = 0; i < priv->rx_ring_size; i++)
1240                priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1241
1242        spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1243
1244        if (dev->phydev)
1245                phy_start(dev->phydev);
1246
1247        napi_enable(&priv->napi);
1248        netif_start_queue(dev);
1249
1250        priv->dmaops->start_rxdma(priv);
1251
1252        /* Start MAC Rx/Tx */
1253        spin_lock(&priv->mac_cfg_lock);
1254        tse_set_mac(priv, true);
1255        spin_unlock(&priv->mac_cfg_lock);
1256
1257        return 0;
1258
1259tx_request_irq_error:
1260        free_irq(priv->rx_irq, dev);
1261init_error:
1262        free_skbufs(dev);
1263alloc_skbuf_error:
1264phy_error:
1265        return ret;
1266}
1267
1268/* Stop TSE MAC interface and put the device in an inactive state
1269 */
1270static int tse_shutdown(struct net_device *dev)
1271{
1272        struct altera_tse_private *priv = netdev_priv(dev);
1273        int ret;
1274        unsigned long int flags;
1275
1276        /* Stop the PHY */
1277        if (dev->phydev)
1278                phy_stop(dev->phydev);
1279
1280        netif_stop_queue(dev);
1281        napi_disable(&priv->napi);
1282
1283        /* Disable DMA interrupts */
1284        spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1285        priv->dmaops->disable_rxirq(priv);
1286        priv->dmaops->disable_txirq(priv);
1287        spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1288
1289        /* Free the IRQ lines */
1290        free_irq(priv->rx_irq, dev);
1291        free_irq(priv->tx_irq, dev);
1292
1293        /* disable and reset the MAC, empties fifo */
1294        spin_lock(&priv->mac_cfg_lock);
1295        spin_lock(&priv->tx_lock);
1296
1297        ret = reset_mac(priv);
1298        /* Note that reset_mac will fail if the clocks are gated by the PHY
1299         * due to the PHY being put into isolation or power down mode.
1300         * This is not an error if reset fails due to no clock.
1301         */
1302        if (ret)
1303                netdev_dbg(dev, "Cannot reset MAC core (error: %d)\n", ret);
1304        priv->dmaops->reset_dma(priv);
1305        free_skbufs(dev);
1306
1307        spin_unlock(&priv->tx_lock);
1308        spin_unlock(&priv->mac_cfg_lock);
1309
1310        priv->dmaops->uninit_dma(priv);
1311
1312        return 0;
1313}
1314
1315static struct net_device_ops altera_tse_netdev_ops = {
1316        .ndo_open               = tse_open,
1317        .ndo_stop               = tse_shutdown,
1318        .ndo_start_xmit         = tse_start_xmit,
1319        .ndo_set_mac_address    = eth_mac_addr,
1320        .ndo_set_rx_mode        = tse_set_rx_mode,
1321        .ndo_change_mtu         = tse_change_mtu,
1322        .ndo_validate_addr      = eth_validate_addr,
1323};
1324
1325static int request_and_map(struct platform_device *pdev, const char *name,
1326                           struct resource **res, void __iomem **ptr)
1327{
1328        struct resource *region;
1329        struct device *device = &pdev->dev;
1330
1331        *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1332        if (*res == NULL) {
1333                dev_err(device, "resource %s not defined\n", name);
1334                return -ENODEV;
1335        }
1336
1337        region = devm_request_mem_region(device, (*res)->start,
1338                                         resource_size(*res), dev_name(device));
1339        if (region == NULL) {
1340                dev_err(device, "unable to request %s\n", name);
1341                return -EBUSY;
1342        }
1343
1344        *ptr = devm_ioremap_nocache(device, region->start,
1345                                    resource_size(region));
1346        if (*ptr == NULL) {
1347                dev_err(device, "ioremap_nocache of %s failed!", name);
1348                return -ENOMEM;
1349        }
1350
1351        return 0;
1352}
1353
1354/* Probe Altera TSE MAC device
1355 */
1356static int altera_tse_probe(struct platform_device *pdev)
1357{
1358        struct net_device *ndev;
1359        int ret = -ENODEV;
1360        struct resource *control_port;
1361        struct resource *dma_res;
1362        struct altera_tse_private *priv;
1363        const unsigned char *macaddr;
1364        void __iomem *descmap;
1365        const struct of_device_id *of_id = NULL;
1366
1367        ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1368        if (!ndev) {
1369                dev_err(&pdev->dev, "Could not allocate network device\n");
1370                return -ENODEV;
1371        }
1372
1373        SET_NETDEV_DEV(ndev, &pdev->dev);
1374
1375        priv = netdev_priv(ndev);
1376        priv->device = &pdev->dev;
1377        priv->dev = ndev;
1378        priv->msg_enable = netif_msg_init(debug, default_msg_level);
1379
1380        of_id = of_match_device(altera_tse_ids, &pdev->dev);
1381
1382        if (of_id)
1383                priv->dmaops = (struct altera_dmaops *)of_id->data;
1384
1385
1386        if (priv->dmaops &&
1387            priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1388                /* Get the mapped address to the SGDMA descriptor memory */
1389                ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1390                if (ret)
1391                        goto err_free_netdev;
1392
1393                /* Start of that memory is for transmit descriptors */
1394                priv->tx_dma_desc = descmap;
1395
1396                /* First half is for tx descriptors, other half for tx */
1397                priv->txdescmem = resource_size(dma_res)/2;
1398
1399                priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1400
1401                priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1402                                                     priv->txdescmem));
1403                priv->rxdescmem = resource_size(dma_res)/2;
1404                priv->rxdescmem_busaddr = dma_res->start;
1405                priv->rxdescmem_busaddr += priv->txdescmem;
1406
1407                if (upper_32_bits(priv->rxdescmem_busaddr)) {
1408                        dev_dbg(priv->device,
1409                                "SGDMA bus addresses greater than 32-bits\n");
1410                        ret = -EINVAL;
1411                        goto err_free_netdev;
1412                }
1413                if (upper_32_bits(priv->txdescmem_busaddr)) {
1414                        dev_dbg(priv->device,
1415                                "SGDMA bus addresses greater than 32-bits\n");
1416                        ret = -EINVAL;
1417                        goto err_free_netdev;
1418                }
1419        } else if (priv->dmaops &&
1420                   priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1421                ret = request_and_map(pdev, "rx_resp", &dma_res,
1422                                      &priv->rx_dma_resp);
1423                if (ret)
1424                        goto err_free_netdev;
1425
1426                ret = request_and_map(pdev, "tx_desc", &dma_res,
1427                                      &priv->tx_dma_desc);
1428                if (ret)
1429                        goto err_free_netdev;
1430
1431                priv->txdescmem = resource_size(dma_res);
1432                priv->txdescmem_busaddr = dma_res->start;
1433
1434                ret = request_and_map(pdev, "rx_desc", &dma_res,
1435                                      &priv->rx_dma_desc);
1436                if (ret)
1437                        goto err_free_netdev;
1438
1439                priv->rxdescmem = resource_size(dma_res);
1440                priv->rxdescmem_busaddr = dma_res->start;
1441
1442        } else {
1443                goto err_free_netdev;
1444        }
1445
1446        if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1447                dma_set_coherent_mask(priv->device,
1448                                      DMA_BIT_MASK(priv->dmaops->dmamask));
1449        else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1450                dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1451        else
1452                goto err_free_netdev;
1453
1454        /* MAC address space */
1455        ret = request_and_map(pdev, "control_port", &control_port,
1456                              (void __iomem **)&priv->mac_dev);
1457        if (ret)
1458                goto err_free_netdev;
1459
1460        /* xSGDMA Rx Dispatcher address space */
1461        ret = request_and_map(pdev, "rx_csr", &dma_res,
1462                              &priv->rx_dma_csr);
1463        if (ret)
1464                goto err_free_netdev;
1465
1466
1467        /* xSGDMA Tx Dispatcher address space */
1468        ret = request_and_map(pdev, "tx_csr", &dma_res,
1469                              &priv->tx_dma_csr);
1470        if (ret)
1471                goto err_free_netdev;
1472
1473
1474        /* Rx IRQ */
1475        priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1476        if (priv->rx_irq == -ENXIO) {
1477                dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1478                ret = -ENXIO;
1479                goto err_free_netdev;
1480        }
1481
1482        /* Tx IRQ */
1483        priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1484        if (priv->tx_irq == -ENXIO) {
1485                dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1486                ret = -ENXIO;
1487                goto err_free_netdev;
1488        }
1489
1490        /* get FIFO depths from device tree */
1491        if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1492                                 &priv->rx_fifo_depth)) {
1493                dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1494                ret = -ENXIO;
1495                goto err_free_netdev;
1496        }
1497
1498        if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1499                                 &priv->tx_fifo_depth)) {
1500                dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1501                ret = -ENXIO;
1502                goto err_free_netdev;
1503        }
1504
1505        /* get hash filter settings for this instance */
1506        priv->hash_filter =
1507                of_property_read_bool(pdev->dev.of_node,
1508                                      "altr,has-hash-multicast-filter");
1509
1510        /* Set hash filter to not set for now until the
1511         * multicast filter receive issue is debugged
1512         */
1513        priv->hash_filter = 0;
1514
1515        /* get supplemental address settings for this instance */
1516        priv->added_unicast =
1517                of_property_read_bool(pdev->dev.of_node,
1518                                      "altr,has-supplementary-unicast");
1519
1520        priv->dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
1521        /* Max MTU is 1500, ETH_DATA_LEN */
1522        priv->dev->max_mtu = ETH_DATA_LEN;
1523
1524        /* Get the max mtu from the device tree. Note that the
1525         * "max-frame-size" parameter is actually max mtu. Definition
1526         * in the ePAPR v1.1 spec and usage differ, so go with usage.
1527         */
1528        of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1529                             &priv->dev->max_mtu);
1530
1531        /* The DMA buffer size already accounts for an alignment bias
1532         * to avoid unaligned access exceptions for the NIOS processor,
1533         */
1534        priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1535
1536        /* get default MAC address from device tree */
1537        macaddr = of_get_mac_address(pdev->dev.of_node);
1538        if (macaddr)
1539                ether_addr_copy(ndev->dev_addr, macaddr);
1540        else
1541                eth_hw_addr_random(ndev);
1542
1543        /* get phy addr and create mdio */
1544        ret = altera_tse_phy_get_addr_mdio_create(ndev);
1545
1546        if (ret)
1547                goto err_free_netdev;
1548
1549        /* initialize netdev */
1550        ndev->mem_start = control_port->start;
1551        ndev->mem_end = control_port->end;
1552        ndev->netdev_ops = &altera_tse_netdev_ops;
1553        altera_tse_set_ethtool_ops(ndev);
1554
1555        altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1556
1557        if (priv->hash_filter)
1558                altera_tse_netdev_ops.ndo_set_rx_mode =
1559                        tse_set_rx_mode_hashfilter;
1560
1561        /* Scatter/gather IO is not supported,
1562         * so it is turned off
1563         */
1564        ndev->hw_features &= ~NETIF_F_SG;
1565        ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1566
1567        /* VLAN offloading of tagging, stripping and filtering is not
1568         * supported by hardware, but driver will accommodate the
1569         * extra 4-byte VLAN tag for processing by upper layers
1570         */
1571        ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1572
1573        /* setup NAPI interface */
1574        netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1575
1576        spin_lock_init(&priv->mac_cfg_lock);
1577        spin_lock_init(&priv->tx_lock);
1578        spin_lock_init(&priv->rxdma_irq_lock);
1579
1580        netif_carrier_off(ndev);
1581        ret = register_netdev(ndev);
1582        if (ret) {
1583                dev_err(&pdev->dev, "failed to register TSE net device\n");
1584                goto err_register_netdev;
1585        }
1586
1587        platform_set_drvdata(pdev, ndev);
1588
1589        priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1590
1591        if (netif_msg_probe(priv))
1592                dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1593                         (priv->revision >> 8) & 0xff,
1594                         priv->revision & 0xff,
1595                         (unsigned long) control_port->start, priv->rx_irq,
1596                         priv->tx_irq);
1597
1598        ret = init_phy(ndev);
1599        if (ret != 0) {
1600                netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1601                goto err_init_phy;
1602        }
1603        return 0;
1604
1605err_init_phy:
1606        unregister_netdev(ndev);
1607err_register_netdev:
1608        netif_napi_del(&priv->napi);
1609        altera_tse_mdio_destroy(ndev);
1610err_free_netdev:
1611        free_netdev(ndev);
1612        return ret;
1613}
1614
1615/* Remove Altera TSE MAC device
1616 */
1617static int altera_tse_remove(struct platform_device *pdev)
1618{
1619        struct net_device *ndev = platform_get_drvdata(pdev);
1620        struct altera_tse_private *priv = netdev_priv(ndev);
1621
1622        if (ndev->phydev) {
1623                phy_disconnect(ndev->phydev);
1624
1625                if (of_phy_is_fixed_link(priv->device->of_node))
1626                        of_phy_deregister_fixed_link(priv->device->of_node);
1627        }
1628
1629        platform_set_drvdata(pdev, NULL);
1630        altera_tse_mdio_destroy(ndev);
1631        unregister_netdev(ndev);
1632        free_netdev(ndev);
1633
1634        return 0;
1635}
1636
1637static const struct altera_dmaops altera_dtype_sgdma = {
1638        .altera_dtype = ALTERA_DTYPE_SGDMA,
1639        .dmamask = 32,
1640        .reset_dma = sgdma_reset,
1641        .enable_txirq = sgdma_enable_txirq,
1642        .enable_rxirq = sgdma_enable_rxirq,
1643        .disable_txirq = sgdma_disable_txirq,
1644        .disable_rxirq = sgdma_disable_rxirq,
1645        .clear_txirq = sgdma_clear_txirq,
1646        .clear_rxirq = sgdma_clear_rxirq,
1647        .tx_buffer = sgdma_tx_buffer,
1648        .tx_completions = sgdma_tx_completions,
1649        .add_rx_desc = sgdma_add_rx_desc,
1650        .get_rx_status = sgdma_rx_status,
1651        .init_dma = sgdma_initialize,
1652        .uninit_dma = sgdma_uninitialize,
1653        .start_rxdma = sgdma_start_rxdma,
1654};
1655
1656static const struct altera_dmaops altera_dtype_msgdma = {
1657        .altera_dtype = ALTERA_DTYPE_MSGDMA,
1658        .dmamask = 64,
1659        .reset_dma = msgdma_reset,
1660        .enable_txirq = msgdma_enable_txirq,
1661        .enable_rxirq = msgdma_enable_rxirq,
1662        .disable_txirq = msgdma_disable_txirq,
1663        .disable_rxirq = msgdma_disable_rxirq,
1664        .clear_txirq = msgdma_clear_txirq,
1665        .clear_rxirq = msgdma_clear_rxirq,
1666        .tx_buffer = msgdma_tx_buffer,
1667        .tx_completions = msgdma_tx_completions,
1668        .add_rx_desc = msgdma_add_rx_desc,
1669        .get_rx_status = msgdma_rx_status,
1670        .init_dma = msgdma_initialize,
1671        .uninit_dma = msgdma_uninitialize,
1672        .start_rxdma = msgdma_start_rxdma,
1673};
1674
1675static const struct of_device_id altera_tse_ids[] = {
1676        { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1677        { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1678        { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1679        {},
1680};
1681MODULE_DEVICE_TABLE(of, altera_tse_ids);
1682
1683static struct platform_driver altera_tse_driver = {
1684        .probe          = altera_tse_probe,
1685        .remove         = altera_tse_remove,
1686        .suspend        = NULL,
1687        .resume         = NULL,
1688        .driver         = {
1689                .name   = ALTERA_TSE_RESOURCE_NAME,
1690                .of_match_table = altera_tse_ids,
1691        },
1692};
1693
1694module_platform_driver(altera_tse_driver);
1695
1696MODULE_AUTHOR("Altera Corporation");
1697MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1698MODULE_LICENSE("GPL v2");
1699