linux/drivers/net/ethernet/faraday/ftgmac100.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Faraday FTGMAC100 Gigabit Ethernet
   4 *
   5 * (C) Copyright 2009-2011 Faraday Technology
   6 * Po-Yu Chuang <ratbert@faraday-tech.com>
   7 */
   8
   9#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  10
  11#include <linux/clk.h>
  12#include <linux/dma-mapping.h>
  13#include <linux/etherdevice.h>
  14#include <linux/ethtool.h>
  15#include <linux/interrupt.h>
  16#include <linux/io.h>
  17#include <linux/module.h>
  18#include <linux/netdevice.h>
  19#include <linux/of.h>
  20#include <linux/of_mdio.h>
  21#include <linux/phy.h>
  22#include <linux/platform_device.h>
  23#include <linux/property.h>
  24#include <linux/crc32.h>
  25#include <linux/if_vlan.h>
  26#include <linux/of_net.h>
  27#include <net/ip.h>
  28#include <net/ncsi.h>
  29
  30#include "ftgmac100.h"
  31
  32#define DRV_NAME        "ftgmac100"
  33
  34/* Arbitrary values, I am not sure the HW has limits */
  35#define MAX_RX_QUEUE_ENTRIES    1024
  36#define MAX_TX_QUEUE_ENTRIES    1024
  37#define MIN_RX_QUEUE_ENTRIES    32
  38#define MIN_TX_QUEUE_ENTRIES    32
  39
  40/* Defaults */
  41#define DEF_RX_QUEUE_ENTRIES    128
  42#define DEF_TX_QUEUE_ENTRIES    128
  43
  44#define MAX_PKT_SIZE            1536
  45#define RX_BUF_SIZE             MAX_PKT_SIZE    /* must be smaller than 0x3fff */
  46
  47/* Min number of tx ring entries before stopping queue */
  48#define TX_THRESHOLD            (MAX_SKB_FRAGS + 1)
  49
  50#define FTGMAC_100MHZ           100000000
  51#define FTGMAC_25MHZ            25000000
  52
  53struct ftgmac100 {
  54        /* Registers */
  55        struct resource *res;
  56        void __iomem *base;
  57
  58        /* Rx ring */
  59        unsigned int rx_q_entries;
  60        struct ftgmac100_rxdes *rxdes;
  61        dma_addr_t rxdes_dma;
  62        struct sk_buff **rx_skbs;
  63        unsigned int rx_pointer;
  64        u32 rxdes0_edorr_mask;
  65
  66        /* Tx ring */
  67        unsigned int tx_q_entries;
  68        struct ftgmac100_txdes *txdes;
  69        dma_addr_t txdes_dma;
  70        struct sk_buff **tx_skbs;
  71        unsigned int tx_clean_pointer;
  72        unsigned int tx_pointer;
  73        u32 txdes0_edotr_mask;
  74
  75        /* Used to signal the reset task of ring change request */
  76        unsigned int new_rx_q_entries;
  77        unsigned int new_tx_q_entries;
  78
  79        /* Scratch page to use when rx skb alloc fails */
  80        void *rx_scratch;
  81        dma_addr_t rx_scratch_dma;
  82
  83        /* Component structures */
  84        struct net_device *netdev;
  85        struct device *dev;
  86        struct ncsi_dev *ndev;
  87        struct napi_struct napi;
  88        struct work_struct reset_task;
  89        struct mii_bus *mii_bus;
  90        struct clk *clk;
  91
  92        /* AST2500/AST2600 RMII ref clock gate */
  93        struct clk *rclk;
  94
  95        /* Link management */
  96        int cur_speed;
  97        int cur_duplex;
  98        bool use_ncsi;
  99
 100        /* Multicast filter settings */
 101        u32 maht0;
 102        u32 maht1;
 103
 104        /* Flow control settings */
 105        bool tx_pause;
 106        bool rx_pause;
 107        bool aneg_pause;
 108
 109        /* Misc */
 110        bool need_mac_restart;
 111        bool is_aspeed;
 112};
 113
 114static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
 115{
 116        struct net_device *netdev = priv->netdev;
 117        int i;
 118
 119        /* NOTE: reset clears all registers */
 120        iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
 121        iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
 122                  priv->base + FTGMAC100_OFFSET_MACCR);
 123        for (i = 0; i < 200; i++) {
 124                unsigned int maccr;
 125
 126                maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
 127                if (!(maccr & FTGMAC100_MACCR_SW_RST))
 128                        return 0;
 129
 130                udelay(1);
 131        }
 132
 133        netdev_err(netdev, "Hardware reset failed\n");
 134        return -EIO;
 135}
 136
 137static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
 138{
 139        u32 maccr = 0;
 140
 141        switch (priv->cur_speed) {
 142        case SPEED_10:
 143        case 0: /* no link */
 144                break;
 145
 146        case SPEED_100:
 147                maccr |= FTGMAC100_MACCR_FAST_MODE;
 148                break;
 149
 150        case SPEED_1000:
 151                maccr |= FTGMAC100_MACCR_GIGA_MODE;
 152                break;
 153        default:
 154                netdev_err(priv->netdev, "Unknown speed %d !\n",
 155                           priv->cur_speed);
 156                break;
 157        }
 158
 159        /* (Re)initialize the queue pointers */
 160        priv->rx_pointer = 0;
 161        priv->tx_clean_pointer = 0;
 162        priv->tx_pointer = 0;
 163
 164        /* The doc says reset twice with 10us interval */
 165        if (ftgmac100_reset_mac(priv, maccr))
 166                return -EIO;
 167        usleep_range(10, 1000);
 168        return ftgmac100_reset_mac(priv, maccr);
 169}
 170
 171static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
 172{
 173        unsigned int maddr = mac[0] << 8 | mac[1];
 174        unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
 175
 176        iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
 177        iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
 178}
 179
 180static void ftgmac100_initial_mac(struct ftgmac100 *priv)
 181{
 182        u8 mac[ETH_ALEN];
 183        unsigned int m;
 184        unsigned int l;
 185        void *addr;
 186
 187        addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
 188        if (addr) {
 189                ether_addr_copy(priv->netdev->dev_addr, mac);
 190                dev_info(priv->dev, "Read MAC address %pM from device tree\n",
 191                         mac);
 192                return;
 193        }
 194
 195        m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
 196        l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
 197
 198        mac[0] = (m >> 8) & 0xff;
 199        mac[1] = m & 0xff;
 200        mac[2] = (l >> 24) & 0xff;
 201        mac[3] = (l >> 16) & 0xff;
 202        mac[4] = (l >> 8) & 0xff;
 203        mac[5] = l & 0xff;
 204
 205        if (is_valid_ether_addr(mac)) {
 206                ether_addr_copy(priv->netdev->dev_addr, mac);
 207                dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
 208        } else {
 209                eth_hw_addr_random(priv->netdev);
 210                dev_info(priv->dev, "Generated random MAC address %pM\n",
 211                         priv->netdev->dev_addr);
 212        }
 213}
 214
 215static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
 216{
 217        int ret;
 218
 219        ret = eth_prepare_mac_addr_change(dev, p);
 220        if (ret < 0)
 221                return ret;
 222
 223        eth_commit_mac_addr_change(dev, p);
 224        ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
 225
 226        return 0;
 227}
 228
 229static void ftgmac100_config_pause(struct ftgmac100 *priv)
 230{
 231        u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
 232
 233        /* Throttle tx queue when receiving pause frames */
 234        if (priv->rx_pause)
 235                fcr |= FTGMAC100_FCR_FC_EN;
 236
 237        /* Enables sending pause frames when the RX queue is past a
 238         * certain threshold.
 239         */
 240        if (priv->tx_pause)
 241                fcr |= FTGMAC100_FCR_FCTHR_EN;
 242
 243        iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
 244}
 245
 246static void ftgmac100_init_hw(struct ftgmac100 *priv)
 247{
 248        u32 reg, rfifo_sz, tfifo_sz;
 249
 250        /* Clear stale interrupts */
 251        reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
 252        iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
 253
 254        /* Setup RX ring buffer base */
 255        iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
 256
 257        /* Setup TX ring buffer base */
 258        iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
 259
 260        /* Configure RX buffer size */
 261        iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
 262                  priv->base + FTGMAC100_OFFSET_RBSR);
 263
 264        /* Set RX descriptor autopoll */
 265        iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
 266                  priv->base + FTGMAC100_OFFSET_APTC);
 267
 268        /* Write MAC address */
 269        ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
 270
 271        /* Write multicast filter */
 272        iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
 273        iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
 274
 275        /* Configure descriptor sizes and increase burst sizes according
 276         * to values in Aspeed SDK. The FIFO arbitration is enabled and
 277         * the thresholds set based on the recommended values in the
 278         * AST2400 specification.
 279         */
 280        iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) |   /* 2*8 bytes RX descs */
 281                  FTGMAC100_DBLAC_TXDES_SIZE(2) |   /* 2*8 bytes TX descs */
 282                  FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
 283                  FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
 284                  FTGMAC100_DBLAC_RX_THR_EN |       /* Enable fifo threshold arb */
 285                  FTGMAC100_DBLAC_RXFIFO_HTHR(6) |  /* 6/8 of FIFO high threshold */
 286                  FTGMAC100_DBLAC_RXFIFO_LTHR(2),   /* 2/8 of FIFO low threshold */
 287                  priv->base + FTGMAC100_OFFSET_DBLAC);
 288
 289        /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
 290         * mitigation doesn't seem to provide any benefit with NAPI so leave
 291         * it at that.
 292         */
 293        iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
 294                  FTGMAC100_ITC_TXINT_THR(1),
 295                  priv->base + FTGMAC100_OFFSET_ITC);
 296
 297        /* Configure FIFO sizes in the TPAFCR register */
 298        reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
 299        rfifo_sz = reg & 0x00000007;
 300        tfifo_sz = (reg >> 3) & 0x00000007;
 301        reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
 302        reg &= ~0x3f000000;
 303        reg |= (tfifo_sz << 27);
 304        reg |= (rfifo_sz << 24);
 305        iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
 306}
 307
 308static void ftgmac100_start_hw(struct ftgmac100 *priv)
 309{
 310        u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
 311
 312        /* Keep the original GMAC and FAST bits */
 313        maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
 314
 315        /* Add all the main enable bits */
 316        maccr |= FTGMAC100_MACCR_TXDMA_EN       |
 317                 FTGMAC100_MACCR_RXDMA_EN       |
 318                 FTGMAC100_MACCR_TXMAC_EN       |
 319                 FTGMAC100_MACCR_RXMAC_EN       |
 320                 FTGMAC100_MACCR_CRC_APD        |
 321                 FTGMAC100_MACCR_PHY_LINK_LEVEL |
 322                 FTGMAC100_MACCR_RX_RUNT        |
 323                 FTGMAC100_MACCR_RX_BROADPKT;
 324
 325        /* Add other bits as needed */
 326        if (priv->cur_duplex == DUPLEX_FULL)
 327                maccr |= FTGMAC100_MACCR_FULLDUP;
 328        if (priv->netdev->flags & IFF_PROMISC)
 329                maccr |= FTGMAC100_MACCR_RX_ALL;
 330        if (priv->netdev->flags & IFF_ALLMULTI)
 331                maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
 332        else if (netdev_mc_count(priv->netdev))
 333                maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
 334
 335        /* Vlan filtering enabled */
 336        if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
 337                maccr |= FTGMAC100_MACCR_RM_VLAN;
 338
 339        /* Hit the HW */
 340        iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
 341}
 342
 343static void ftgmac100_stop_hw(struct ftgmac100 *priv)
 344{
 345        iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
 346}
 347
 348static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
 349{
 350        struct netdev_hw_addr *ha;
 351
 352        priv->maht1 = 0;
 353        priv->maht0 = 0;
 354        netdev_for_each_mc_addr(ha, priv->netdev) {
 355                u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
 356
 357                crc_val = (~(crc_val >> 2)) & 0x3f;
 358                if (crc_val >= 32)
 359                        priv->maht1 |= 1ul << (crc_val - 32);
 360                else
 361                        priv->maht0 |= 1ul << (crc_val);
 362        }
 363}
 364
 365static void ftgmac100_set_rx_mode(struct net_device *netdev)
 366{
 367        struct ftgmac100 *priv = netdev_priv(netdev);
 368
 369        /* Setup the hash filter */
 370        ftgmac100_calc_mc_hash(priv);
 371
 372        /* Interface down ? that's all there is to do */
 373        if (!netif_running(netdev))
 374                return;
 375
 376        /* Update the HW */
 377        iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
 378        iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
 379
 380        /* Reconfigure MACCR */
 381        ftgmac100_start_hw(priv);
 382}
 383
 384static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
 385                                  struct ftgmac100_rxdes *rxdes, gfp_t gfp)
 386{
 387        struct net_device *netdev = priv->netdev;
 388        struct sk_buff *skb;
 389        dma_addr_t map;
 390        int err = 0;
 391
 392        skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
 393        if (unlikely(!skb)) {
 394                if (net_ratelimit())
 395                        netdev_warn(netdev, "failed to allocate rx skb\n");
 396                err = -ENOMEM;
 397                map = priv->rx_scratch_dma;
 398        } else {
 399                map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
 400                                     DMA_FROM_DEVICE);
 401                if (unlikely(dma_mapping_error(priv->dev, map))) {
 402                        if (net_ratelimit())
 403                                netdev_err(netdev, "failed to map rx page\n");
 404                        dev_kfree_skb_any(skb);
 405                        map = priv->rx_scratch_dma;
 406                        skb = NULL;
 407                        err = -ENOMEM;
 408                }
 409        }
 410
 411        /* Store skb */
 412        priv->rx_skbs[entry] = skb;
 413
 414        /* Store DMA address into RX desc */
 415        rxdes->rxdes3 = cpu_to_le32(map);
 416
 417        /* Ensure the above is ordered vs clearing the OWN bit */
 418        dma_wmb();
 419
 420        /* Clean status (which resets own bit) */
 421        if (entry == (priv->rx_q_entries - 1))
 422                rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
 423        else
 424                rxdes->rxdes0 = 0;
 425
 426        return err;
 427}
 428
 429static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
 430                                              unsigned int pointer)
 431{
 432        return (pointer + 1) & (priv->rx_q_entries - 1);
 433}
 434
 435static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
 436{
 437        struct net_device *netdev = priv->netdev;
 438
 439        if (status & FTGMAC100_RXDES0_RX_ERR)
 440                netdev->stats.rx_errors++;
 441
 442        if (status & FTGMAC100_RXDES0_CRC_ERR)
 443                netdev->stats.rx_crc_errors++;
 444
 445        if (status & (FTGMAC100_RXDES0_FTL |
 446                      FTGMAC100_RXDES0_RUNT |
 447                      FTGMAC100_RXDES0_RX_ODD_NB))
 448                netdev->stats.rx_length_errors++;
 449}
 450
 451static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
 452{
 453        struct net_device *netdev = priv->netdev;
 454        struct ftgmac100_rxdes *rxdes;
 455        struct sk_buff *skb;
 456        unsigned int pointer, size;
 457        u32 status, csum_vlan;
 458        dma_addr_t map;
 459
 460        /* Grab next RX descriptor */
 461        pointer = priv->rx_pointer;
 462        rxdes = &priv->rxdes[pointer];
 463
 464        /* Grab descriptor status */
 465        status = le32_to_cpu(rxdes->rxdes0);
 466
 467        /* Do we have a packet ? */
 468        if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
 469                return false;
 470
 471        /* Order subsequent reads with the test for the ready bit */
 472        dma_rmb();
 473
 474        /* We don't cope with fragmented RX packets */
 475        if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
 476                     !(status & FTGMAC100_RXDES0_LRS)))
 477                goto drop;
 478
 479        /* Grab received size and csum vlan field in the descriptor */
 480        size = status & FTGMAC100_RXDES0_VDBC;
 481        csum_vlan = le32_to_cpu(rxdes->rxdes1);
 482
 483        /* Any error (other than csum offload) flagged ? */
 484        if (unlikely(status & RXDES0_ANY_ERROR)) {
 485                /* Correct for incorrect flagging of runt packets
 486                 * with vlan tags... Just accept a runt packet that
 487                 * has been flagged as vlan and whose size is at
 488                 * least 60 bytes.
 489                 */
 490                if ((status & FTGMAC100_RXDES0_RUNT) &&
 491                    (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
 492                    (size >= 60))
 493                        status &= ~FTGMAC100_RXDES0_RUNT;
 494
 495                /* Any error still in there ? */
 496                if (status & RXDES0_ANY_ERROR) {
 497                        ftgmac100_rx_packet_error(priv, status);
 498                        goto drop;
 499                }
 500        }
 501
 502        /* If the packet had no skb (failed to allocate earlier)
 503         * then try to allocate one and skip
 504         */
 505        skb = priv->rx_skbs[pointer];
 506        if (!unlikely(skb)) {
 507                ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
 508                goto drop;
 509        }
 510
 511        if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
 512                netdev->stats.multicast++;
 513
 514        /* If the HW found checksum errors, bounce it to software.
 515         *
 516         * If we didn't, we need to see if the packet was recognized
 517         * by HW as one of the supported checksummed protocols before
 518         * we accept the HW test results.
 519         */
 520        if (netdev->features & NETIF_F_RXCSUM) {
 521                u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
 522                        FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
 523                        FTGMAC100_RXDES1_IP_CHKSUM_ERR;
 524                if ((csum_vlan & err_bits) ||
 525                    !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
 526                        skb->ip_summed = CHECKSUM_NONE;
 527                else
 528                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 529        }
 530
 531        /* Transfer received size to skb */
 532        skb_put(skb, size);
 533
 534        /* Extract vlan tag */
 535        if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
 536            (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
 537                __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
 538                                       csum_vlan & 0xffff);
 539
 540        /* Tear down DMA mapping, do necessary cache management */
 541        map = le32_to_cpu(rxdes->rxdes3);
 542
 543#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
 544        /* When we don't have an iommu, we can save cycles by not
 545         * invalidating the cache for the part of the packet that
 546         * wasn't received.
 547         */
 548        dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
 549#else
 550        dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
 551#endif
 552
 553
 554        /* Resplenish rx ring */
 555        ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
 556        priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
 557
 558        skb->protocol = eth_type_trans(skb, netdev);
 559
 560        netdev->stats.rx_packets++;
 561        netdev->stats.rx_bytes += size;
 562
 563        /* push packet to protocol stack */
 564        if (skb->ip_summed == CHECKSUM_NONE)
 565                netif_receive_skb(skb);
 566        else
 567                napi_gro_receive(&priv->napi, skb);
 568
 569        (*processed)++;
 570        return true;
 571
 572 drop:
 573        /* Clean rxdes0 (which resets own bit) */
 574        rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
 575        priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
 576        netdev->stats.rx_dropped++;
 577        return true;
 578}
 579
 580static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
 581                                     unsigned int index)
 582{
 583        if (index == (priv->tx_q_entries - 1))
 584                return priv->txdes0_edotr_mask;
 585        else
 586                return 0;
 587}
 588
 589static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
 590                                              unsigned int pointer)
 591{
 592        return (pointer + 1) & (priv->tx_q_entries - 1);
 593}
 594
 595static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
 596{
 597        /* Returns the number of available slots in the TX queue
 598         *
 599         * This always leaves one free slot so we don't have to
 600         * worry about empty vs. full, and this simplifies the
 601         * test for ftgmac100_tx_buf_cleanable() below
 602         */
 603        return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
 604                (priv->tx_q_entries - 1);
 605}
 606
 607static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
 608{
 609        return priv->tx_pointer != priv->tx_clean_pointer;
 610}
 611
 612static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
 613                                     unsigned int pointer,
 614                                     struct sk_buff *skb,
 615                                     struct ftgmac100_txdes *txdes,
 616                                     u32 ctl_stat)
 617{
 618        dma_addr_t map = le32_to_cpu(txdes->txdes3);
 619        size_t len;
 620
 621        if (ctl_stat & FTGMAC100_TXDES0_FTS) {
 622                len = skb_headlen(skb);
 623                dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
 624        } else {
 625                len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
 626                dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
 627        }
 628
 629        /* Free SKB on last segment */
 630        if (ctl_stat & FTGMAC100_TXDES0_LTS)
 631                dev_kfree_skb(skb);
 632        priv->tx_skbs[pointer] = NULL;
 633}
 634
 635static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
 636{
 637        struct net_device *netdev = priv->netdev;
 638        struct ftgmac100_txdes *txdes;
 639        struct sk_buff *skb;
 640        unsigned int pointer;
 641        u32 ctl_stat;
 642
 643        pointer = priv->tx_clean_pointer;
 644        txdes = &priv->txdes[pointer];
 645
 646        ctl_stat = le32_to_cpu(txdes->txdes0);
 647        if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
 648                return false;
 649
 650        skb = priv->tx_skbs[pointer];
 651        netdev->stats.tx_packets++;
 652        netdev->stats.tx_bytes += skb->len;
 653        ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
 654        txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
 655
 656        priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
 657
 658        return true;
 659}
 660
 661static void ftgmac100_tx_complete(struct ftgmac100 *priv)
 662{
 663        struct net_device *netdev = priv->netdev;
 664
 665        /* Process all completed packets */
 666        while (ftgmac100_tx_buf_cleanable(priv) &&
 667               ftgmac100_tx_complete_packet(priv))
 668                ;
 669
 670        /* Restart queue if needed */
 671        smp_mb();
 672        if (unlikely(netif_queue_stopped(netdev) &&
 673                     ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
 674                struct netdev_queue *txq;
 675
 676                txq = netdev_get_tx_queue(netdev, 0);
 677                __netif_tx_lock(txq, smp_processor_id());
 678                if (netif_queue_stopped(netdev) &&
 679                    ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
 680                        netif_wake_queue(netdev);
 681                __netif_tx_unlock(txq);
 682        }
 683}
 684
 685static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
 686{
 687        if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
 688                u8 ip_proto = ip_hdr(skb)->protocol;
 689
 690                *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
 691                switch(ip_proto) {
 692                case IPPROTO_TCP:
 693                        *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
 694                        return true;
 695                case IPPROTO_UDP:
 696                        *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
 697                        return true;
 698                case IPPROTO_IP:
 699                        return true;
 700                }
 701        }
 702        return skb_checksum_help(skb) == 0;
 703}
 704
 705static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
 706                                             struct net_device *netdev)
 707{
 708        struct ftgmac100 *priv = netdev_priv(netdev);
 709        struct ftgmac100_txdes *txdes, *first;
 710        unsigned int pointer, nfrags, len, i, j;
 711        u32 f_ctl_stat, ctl_stat, csum_vlan;
 712        dma_addr_t map;
 713
 714        /* The HW doesn't pad small frames */
 715        if (eth_skb_pad(skb)) {
 716                netdev->stats.tx_dropped++;
 717                return NETDEV_TX_OK;
 718        }
 719
 720        /* Reject oversize packets */
 721        if (unlikely(skb->len > MAX_PKT_SIZE)) {
 722                if (net_ratelimit())
 723                        netdev_dbg(netdev, "tx packet too big\n");
 724                goto drop;
 725        }
 726
 727        /* Do we have a limit on #fragments ? I yet have to get a reply
 728         * from Aspeed. If there's one I haven't hit it.
 729         */
 730        nfrags = skb_shinfo(skb)->nr_frags;
 731
 732        /* Setup HW checksumming */
 733        csum_vlan = 0;
 734        if (skb->ip_summed == CHECKSUM_PARTIAL &&
 735            !ftgmac100_prep_tx_csum(skb, &csum_vlan))
 736                goto drop;
 737
 738        /* Add VLAN tag */
 739        if (skb_vlan_tag_present(skb)) {
 740                csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
 741                csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
 742        }
 743
 744        /* Get header len */
 745        len = skb_headlen(skb);
 746
 747        /* Map the packet head */
 748        map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
 749        if (dma_mapping_error(priv->dev, map)) {
 750                if (net_ratelimit())
 751                        netdev_err(netdev, "map tx packet head failed\n");
 752                goto drop;
 753        }
 754
 755        /* Grab the next free tx descriptor */
 756        pointer = priv->tx_pointer;
 757        txdes = first = &priv->txdes[pointer];
 758
 759        /* Setup it up with the packet head. Don't write the head to the
 760         * ring just yet
 761         */
 762        priv->tx_skbs[pointer] = skb;
 763        f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
 764        f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
 765        f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
 766        f_ctl_stat |= FTGMAC100_TXDES0_FTS;
 767        if (nfrags == 0)
 768                f_ctl_stat |= FTGMAC100_TXDES0_LTS;
 769        txdes->txdes3 = cpu_to_le32(map);
 770        txdes->txdes1 = cpu_to_le32(csum_vlan);
 771
 772        /* Next descriptor */
 773        pointer = ftgmac100_next_tx_pointer(priv, pointer);
 774
 775        /* Add the fragments */
 776        for (i = 0; i < nfrags; i++) {
 777                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 778
 779                len = skb_frag_size(frag);
 780
 781                /* Map it */
 782                map = skb_frag_dma_map(priv->dev, frag, 0, len,
 783                                       DMA_TO_DEVICE);
 784                if (dma_mapping_error(priv->dev, map))
 785                        goto dma_err;
 786
 787                /* Setup descriptor */
 788                priv->tx_skbs[pointer] = skb;
 789                txdes = &priv->txdes[pointer];
 790                ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
 791                ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
 792                ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
 793                if (i == (nfrags - 1))
 794                        ctl_stat |= FTGMAC100_TXDES0_LTS;
 795                txdes->txdes0 = cpu_to_le32(ctl_stat);
 796                txdes->txdes1 = 0;
 797                txdes->txdes3 = cpu_to_le32(map);
 798
 799                /* Next one */
 800                pointer = ftgmac100_next_tx_pointer(priv, pointer);
 801        }
 802
 803        /* Order the previous packet and descriptor udpates
 804         * before setting the OWN bit on the first descriptor.
 805         */
 806        dma_wmb();
 807        first->txdes0 = cpu_to_le32(f_ctl_stat);
 808
 809        /* Update next TX pointer */
 810        priv->tx_pointer = pointer;
 811
 812        /* If there isn't enough room for all the fragments of a new packet
 813         * in the TX ring, stop the queue. The sequence below is race free
 814         * vs. a concurrent restart in ftgmac100_poll()
 815         */
 816        if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
 817                netif_stop_queue(netdev);
 818                /* Order the queue stop with the test below */
 819                smp_mb();
 820                if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
 821                        netif_wake_queue(netdev);
 822        }
 823
 824        /* Poke transmitter to read the updated TX descriptors */
 825        iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
 826
 827        return NETDEV_TX_OK;
 828
 829 dma_err:
 830        if (net_ratelimit())
 831                netdev_err(netdev, "map tx fragment failed\n");
 832
 833        /* Free head */
 834        pointer = priv->tx_pointer;
 835        ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
 836        first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
 837
 838        /* Then all fragments */
 839        for (j = 0; j < i; j++) {
 840                pointer = ftgmac100_next_tx_pointer(priv, pointer);
 841                txdes = &priv->txdes[pointer];
 842                ctl_stat = le32_to_cpu(txdes->txdes0);
 843                ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
 844                txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
 845        }
 846
 847        /* This cannot be reached if we successfully mapped the
 848         * last fragment, so we know ftgmac100_free_tx_packet()
 849         * hasn't freed the skb yet.
 850         */
 851 drop:
 852        /* Drop the packet */
 853        dev_kfree_skb_any(skb);
 854        netdev->stats.tx_dropped++;
 855
 856        return NETDEV_TX_OK;
 857}
 858
 859static void ftgmac100_free_buffers(struct ftgmac100 *priv)
 860{
 861        int i;
 862
 863        /* Free all RX buffers */
 864        for (i = 0; i < priv->rx_q_entries; i++) {
 865                struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
 866                struct sk_buff *skb = priv->rx_skbs[i];
 867                dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
 868
 869                if (!skb)
 870                        continue;
 871
 872                priv->rx_skbs[i] = NULL;
 873                dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
 874                dev_kfree_skb_any(skb);
 875        }
 876
 877        /* Free all TX buffers */
 878        for (i = 0; i < priv->tx_q_entries; i++) {
 879                struct ftgmac100_txdes *txdes = &priv->txdes[i];
 880                struct sk_buff *skb = priv->tx_skbs[i];
 881
 882                if (!skb)
 883                        continue;
 884                ftgmac100_free_tx_packet(priv, i, skb, txdes,
 885                                         le32_to_cpu(txdes->txdes0));
 886        }
 887}
 888
 889static void ftgmac100_free_rings(struct ftgmac100 *priv)
 890{
 891        /* Free skb arrays */
 892        kfree(priv->rx_skbs);
 893        kfree(priv->tx_skbs);
 894
 895        /* Free descriptors */
 896        if (priv->rxdes)
 897                dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
 898                                  sizeof(struct ftgmac100_rxdes),
 899                                  priv->rxdes, priv->rxdes_dma);
 900        priv->rxdes = NULL;
 901
 902        if (priv->txdes)
 903                dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
 904                                  sizeof(struct ftgmac100_txdes),
 905                                  priv->txdes, priv->txdes_dma);
 906        priv->txdes = NULL;
 907
 908        /* Free scratch packet buffer */
 909        if (priv->rx_scratch)
 910                dma_free_coherent(priv->dev, RX_BUF_SIZE,
 911                                  priv->rx_scratch, priv->rx_scratch_dma);
 912}
 913
 914static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
 915{
 916        /* Allocate skb arrays */
 917        priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
 918                                GFP_KERNEL);
 919        if (!priv->rx_skbs)
 920                return -ENOMEM;
 921        priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
 922                                GFP_KERNEL);
 923        if (!priv->tx_skbs)
 924                return -ENOMEM;
 925
 926        /* Allocate descriptors */
 927        priv->rxdes = dma_alloc_coherent(priv->dev,
 928                                         MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
 929                                         &priv->rxdes_dma, GFP_KERNEL);
 930        if (!priv->rxdes)
 931                return -ENOMEM;
 932        priv->txdes = dma_alloc_coherent(priv->dev,
 933                                         MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
 934                                         &priv->txdes_dma, GFP_KERNEL);
 935        if (!priv->txdes)
 936                return -ENOMEM;
 937
 938        /* Allocate scratch packet buffer */
 939        priv->rx_scratch = dma_alloc_coherent(priv->dev,
 940                                              RX_BUF_SIZE,
 941                                              &priv->rx_scratch_dma,
 942                                              GFP_KERNEL);
 943        if (!priv->rx_scratch)
 944                return -ENOMEM;
 945
 946        return 0;
 947}
 948
 949static void ftgmac100_init_rings(struct ftgmac100 *priv)
 950{
 951        struct ftgmac100_rxdes *rxdes = NULL;
 952        struct ftgmac100_txdes *txdes = NULL;
 953        int i;
 954
 955        /* Update entries counts */
 956        priv->rx_q_entries = priv->new_rx_q_entries;
 957        priv->tx_q_entries = priv->new_tx_q_entries;
 958
 959        if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
 960                return;
 961
 962        /* Initialize RX ring */
 963        for (i = 0; i < priv->rx_q_entries; i++) {
 964                rxdes = &priv->rxdes[i];
 965                rxdes->rxdes0 = 0;
 966                rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
 967        }
 968        /* Mark the end of the ring */
 969        rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
 970
 971        if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
 972                return;
 973
 974        /* Initialize TX ring */
 975        for (i = 0; i < priv->tx_q_entries; i++) {
 976                txdes = &priv->txdes[i];
 977                txdes->txdes0 = 0;
 978        }
 979        txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
 980}
 981
 982static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
 983{
 984        int i;
 985
 986        for (i = 0; i < priv->rx_q_entries; i++) {
 987                struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
 988
 989                if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
 990                        return -ENOMEM;
 991        }
 992        return 0;
 993}
 994
 995static void ftgmac100_adjust_link(struct net_device *netdev)
 996{
 997        struct ftgmac100 *priv = netdev_priv(netdev);
 998        struct phy_device *phydev = netdev->phydev;
 999        bool tx_pause, rx_pause;
1000        int new_speed;
1001
1002        /* We store "no link" as speed 0 */
1003        if (!phydev->link)
1004                new_speed = 0;
1005        else
1006                new_speed = phydev->speed;
1007
1008        /* Grab pause settings from PHY if configured to do so */
1009        if (priv->aneg_pause) {
1010                rx_pause = tx_pause = phydev->pause;
1011                if (phydev->asym_pause)
1012                        tx_pause = !rx_pause;
1013        } else {
1014                rx_pause = priv->rx_pause;
1015                tx_pause = priv->tx_pause;
1016        }
1017
1018        /* Link hasn't changed, do nothing */
1019        if (phydev->speed == priv->cur_speed &&
1020            phydev->duplex == priv->cur_duplex &&
1021            rx_pause == priv->rx_pause &&
1022            tx_pause == priv->tx_pause)
1023                return;
1024
1025        /* Print status if we have a link or we had one and just lost it,
1026         * don't print otherwise.
1027         */
1028        if (new_speed || priv->cur_speed)
1029                phy_print_status(phydev);
1030
1031        priv->cur_speed = new_speed;
1032        priv->cur_duplex = phydev->duplex;
1033        priv->rx_pause = rx_pause;
1034        priv->tx_pause = tx_pause;
1035
1036        /* Link is down, do nothing else */
1037        if (!new_speed)
1038                return;
1039
1040        /* Disable all interrupts */
1041        iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1042
1043        /* Reset the adapter asynchronously */
1044        schedule_work(&priv->reset_task);
1045}
1046
1047static int ftgmac100_mii_probe(struct net_device *netdev)
1048{
1049        struct ftgmac100 *priv = netdev_priv(netdev);
1050        struct platform_device *pdev = to_platform_device(priv->dev);
1051        struct device_node *np = pdev->dev.of_node;
1052        struct phy_device *phydev;
1053        phy_interface_t phy_intf;
1054        int err;
1055
1056        /* Default to RGMII. It's a gigabit part after all */
1057        err = of_get_phy_mode(np, &phy_intf);
1058        if (err)
1059                phy_intf = PHY_INTERFACE_MODE_RGMII;
1060
1061        /* Aspeed only supports these. I don't know about other IP
1062         * block vendors so I'm going to just let them through for
1063         * now. Note that this is only a warning if for some obscure
1064         * reason the DT really means to lie about it or it's a newer
1065         * part we don't know about.
1066         *
1067         * On the Aspeed SoC there are additionally straps and SCU
1068         * control bits that could tell us what the interface is
1069         * (or allow us to configure it while the IP block is held
1070         * in reset). For now I chose to keep this driver away from
1071         * those SoC specific bits and assume the device-tree is
1072         * right and the SCU has been configured properly by pinmux
1073         * or the firmware.
1074         */
1075        if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
1076                netdev_warn(netdev,
1077                            "Unsupported PHY mode %s !\n",
1078                            phy_modes(phy_intf));
1079        }
1080
1081        phydev = phy_find_first(priv->mii_bus);
1082        if (!phydev) {
1083                netdev_info(netdev, "%s: no PHY found\n", netdev->name);
1084                return -ENODEV;
1085        }
1086
1087        phydev = phy_connect(netdev, phydev_name(phydev),
1088                             &ftgmac100_adjust_link, phy_intf);
1089
1090        if (IS_ERR(phydev)) {
1091                netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
1092                return PTR_ERR(phydev);
1093        }
1094
1095        /* Indicate that we support PAUSE frames (see comment in
1096         * Documentation/networking/phy.rst)
1097         */
1098        phy_support_asym_pause(phydev);
1099
1100        /* Display what we found */
1101        phy_attached_info(phydev);
1102
1103        return 0;
1104}
1105
1106static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
1107{
1108        struct net_device *netdev = bus->priv;
1109        struct ftgmac100 *priv = netdev_priv(netdev);
1110        unsigned int phycr;
1111        int i;
1112
1113        phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1114
1115        /* preserve MDC cycle threshold */
1116        phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
1117
1118        phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
1119                 FTGMAC100_PHYCR_REGAD(regnum) |
1120                 FTGMAC100_PHYCR_MIIRD;
1121
1122        iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
1123
1124        for (i = 0; i < 10; i++) {
1125                phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1126
1127                if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
1128                        int data;
1129
1130                        data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
1131                        return FTGMAC100_PHYDATA_MIIRDATA(data);
1132                }
1133
1134                udelay(100);
1135        }
1136
1137        netdev_err(netdev, "mdio read timed out\n");
1138        return -EIO;
1139}
1140
1141static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
1142                                   int regnum, u16 value)
1143{
1144        struct net_device *netdev = bus->priv;
1145        struct ftgmac100 *priv = netdev_priv(netdev);
1146        unsigned int phycr;
1147        int data;
1148        int i;
1149
1150        phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1151
1152        /* preserve MDC cycle threshold */
1153        phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
1154
1155        phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
1156                 FTGMAC100_PHYCR_REGAD(regnum) |
1157                 FTGMAC100_PHYCR_MIIWR;
1158
1159        data = FTGMAC100_PHYDATA_MIIWDATA(value);
1160
1161        iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
1162        iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
1163
1164        for (i = 0; i < 10; i++) {
1165                phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1166
1167                if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
1168                        return 0;
1169
1170                udelay(100);
1171        }
1172
1173        netdev_err(netdev, "mdio write timed out\n");
1174        return -EIO;
1175}
1176
1177static void ftgmac100_get_drvinfo(struct net_device *netdev,
1178                                  struct ethtool_drvinfo *info)
1179{
1180        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1181        strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
1182}
1183
1184static void ftgmac100_get_ringparam(struct net_device *netdev,
1185                                    struct ethtool_ringparam *ering)
1186{
1187        struct ftgmac100 *priv = netdev_priv(netdev);
1188
1189        memset(ering, 0, sizeof(*ering));
1190        ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
1191        ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
1192        ering->rx_pending = priv->rx_q_entries;
1193        ering->tx_pending = priv->tx_q_entries;
1194}
1195
1196static int ftgmac100_set_ringparam(struct net_device *netdev,
1197                                   struct ethtool_ringparam *ering)
1198{
1199        struct ftgmac100 *priv = netdev_priv(netdev);
1200
1201        if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
1202            ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
1203            ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
1204            ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
1205            !is_power_of_2(ering->rx_pending) ||
1206            !is_power_of_2(ering->tx_pending))
1207                return -EINVAL;
1208
1209        priv->new_rx_q_entries = ering->rx_pending;
1210        priv->new_tx_q_entries = ering->tx_pending;
1211        if (netif_running(netdev))
1212                schedule_work(&priv->reset_task);
1213
1214        return 0;
1215}
1216
1217static void ftgmac100_get_pauseparam(struct net_device *netdev,
1218                                     struct ethtool_pauseparam *pause)
1219{
1220        struct ftgmac100 *priv = netdev_priv(netdev);
1221
1222        pause->autoneg = priv->aneg_pause;
1223        pause->tx_pause = priv->tx_pause;
1224        pause->rx_pause = priv->rx_pause;
1225}
1226
1227static int ftgmac100_set_pauseparam(struct net_device *netdev,
1228                                    struct ethtool_pauseparam *pause)
1229{
1230        struct ftgmac100 *priv = netdev_priv(netdev);
1231        struct phy_device *phydev = netdev->phydev;
1232
1233        priv->aneg_pause = pause->autoneg;
1234        priv->tx_pause = pause->tx_pause;
1235        priv->rx_pause = pause->rx_pause;
1236
1237        if (phydev)
1238                phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
1239
1240        if (netif_running(netdev)) {
1241                if (!(phydev && priv->aneg_pause))
1242                        ftgmac100_config_pause(priv);
1243        }
1244
1245        return 0;
1246}
1247
1248static const struct ethtool_ops ftgmac100_ethtool_ops = {
1249        .get_drvinfo            = ftgmac100_get_drvinfo,
1250        .get_link               = ethtool_op_get_link,
1251        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
1252        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
1253        .nway_reset             = phy_ethtool_nway_reset,
1254        .get_ringparam          = ftgmac100_get_ringparam,
1255        .set_ringparam          = ftgmac100_set_ringparam,
1256        .get_pauseparam         = ftgmac100_get_pauseparam,
1257        .set_pauseparam         = ftgmac100_set_pauseparam,
1258};
1259
1260static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
1261{
1262        struct net_device *netdev = dev_id;
1263        struct ftgmac100 *priv = netdev_priv(netdev);
1264        unsigned int status, new_mask = FTGMAC100_INT_BAD;
1265
1266        /* Fetch and clear interrupt bits, process abnormal ones */
1267        status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
1268        iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
1269        if (unlikely(status & FTGMAC100_INT_BAD)) {
1270
1271                /* RX buffer unavailable */
1272                if (status & FTGMAC100_INT_NO_RXBUF)
1273                        netdev->stats.rx_over_errors++;
1274
1275                /* received packet lost due to RX FIFO full */
1276                if (status & FTGMAC100_INT_RPKT_LOST)
1277                        netdev->stats.rx_fifo_errors++;
1278
1279                /* sent packet lost due to excessive TX collision */
1280                if (status & FTGMAC100_INT_XPKT_LOST)
1281                        netdev->stats.tx_fifo_errors++;
1282
1283                /* AHB error -> Reset the chip */
1284                if (status & FTGMAC100_INT_AHB_ERR) {
1285                        if (net_ratelimit())
1286                                netdev_warn(netdev,
1287                                           "AHB bus error ! Resetting chip.\n");
1288                        iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1289                        schedule_work(&priv->reset_task);
1290                        return IRQ_HANDLED;
1291                }
1292
1293                /* We may need to restart the MAC after such errors, delay
1294                 * this until after we have freed some Rx buffers though
1295                 */
1296                priv->need_mac_restart = true;
1297
1298                /* Disable those errors until we restart */
1299                new_mask &= ~status;
1300        }
1301
1302        /* Only enable "bad" interrupts while NAPI is on */
1303        iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
1304
1305        /* Schedule NAPI bh */
1306        napi_schedule_irqoff(&priv->napi);
1307
1308        return IRQ_HANDLED;
1309}
1310
1311static bool ftgmac100_check_rx(struct ftgmac100 *priv)
1312{
1313        struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
1314
1315        /* Do we have a packet ? */
1316        return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
1317}
1318
1319static int ftgmac100_poll(struct napi_struct *napi, int budget)
1320{
1321        struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
1322        int work_done = 0;
1323        bool more;
1324
1325        /* Handle TX completions */
1326        if (ftgmac100_tx_buf_cleanable(priv))
1327                ftgmac100_tx_complete(priv);
1328
1329        /* Handle RX packets */
1330        do {
1331                more = ftgmac100_rx_packet(priv, &work_done);
1332        } while (more && work_done < budget);
1333
1334
1335        /* The interrupt is telling us to kick the MAC back to life
1336         * after an RX overflow
1337         */
1338        if (unlikely(priv->need_mac_restart)) {
1339                ftgmac100_start_hw(priv);
1340                priv->need_mac_restart = false;
1341
1342                /* Re-enable "bad" interrupts */
1343                iowrite32(FTGMAC100_INT_BAD,
1344                          priv->base + FTGMAC100_OFFSET_IER);
1345        }
1346
1347        /* As long as we are waiting for transmit packets to be
1348         * completed we keep NAPI going
1349         */
1350        if (ftgmac100_tx_buf_cleanable(priv))
1351                work_done = budget;
1352
1353        if (work_done < budget) {
1354                /* We are about to re-enable all interrupts. However
1355                 * the HW has been latching RX/TX packet interrupts while
1356                 * they were masked. So we clear them first, then we need
1357                 * to re-check if there's something to process
1358                 */
1359                iowrite32(FTGMAC100_INT_RXTX,
1360                          priv->base + FTGMAC100_OFFSET_ISR);
1361
1362                /* Push the above (and provides a barrier vs. subsequent
1363                 * reads of the descriptor).
1364                 */
1365                ioread32(priv->base + FTGMAC100_OFFSET_ISR);
1366
1367                /* Check RX and TX descriptors for more work to do */
1368                if (ftgmac100_check_rx(priv) ||
1369                    ftgmac100_tx_buf_cleanable(priv))
1370                        return budget;
1371
1372                /* deschedule NAPI */
1373                napi_complete(napi);
1374
1375                /* enable all interrupts */
1376                iowrite32(FTGMAC100_INT_ALL,
1377                          priv->base + FTGMAC100_OFFSET_IER);
1378        }
1379
1380        return work_done;
1381}
1382
1383static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
1384{
1385        int err = 0;
1386
1387        /* Re-init descriptors (adjust queue sizes) */
1388        ftgmac100_init_rings(priv);
1389
1390        /* Realloc rx descriptors */
1391        err = ftgmac100_alloc_rx_buffers(priv);
1392        if (err && !ignore_alloc_err)
1393                return err;
1394
1395        /* Reinit and restart HW */
1396        ftgmac100_init_hw(priv);
1397        ftgmac100_config_pause(priv);
1398        ftgmac100_start_hw(priv);
1399
1400        /* Re-enable the device */
1401        napi_enable(&priv->napi);
1402        netif_start_queue(priv->netdev);
1403
1404        /* Enable all interrupts */
1405        iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
1406
1407        return err;
1408}
1409
1410static void ftgmac100_reset_task(struct work_struct *work)
1411{
1412        struct ftgmac100 *priv = container_of(work, struct ftgmac100,
1413                                              reset_task);
1414        struct net_device *netdev = priv->netdev;
1415        int err;
1416
1417        netdev_dbg(netdev, "Resetting NIC...\n");
1418
1419        /* Lock the world */
1420        rtnl_lock();
1421        if (netdev->phydev)
1422                mutex_lock(&netdev->phydev->lock);
1423        if (priv->mii_bus)
1424                mutex_lock(&priv->mii_bus->mdio_lock);
1425
1426
1427        /* Check if the interface is still up */
1428        if (!netif_running(netdev))
1429                goto bail;
1430
1431        /* Stop the network stack */
1432        netif_trans_update(netdev);
1433        napi_disable(&priv->napi);
1434        netif_tx_disable(netdev);
1435
1436        /* Stop and reset the MAC */
1437        ftgmac100_stop_hw(priv);
1438        err = ftgmac100_reset_and_config_mac(priv);
1439        if (err) {
1440                /* Not much we can do ... it might come back... */
1441                netdev_err(netdev, "attempting to continue...\n");
1442        }
1443
1444        /* Free all rx and tx buffers */
1445        ftgmac100_free_buffers(priv);
1446
1447        /* Setup everything again and restart chip */
1448        ftgmac100_init_all(priv, true);
1449
1450        netdev_dbg(netdev, "Reset done !\n");
1451 bail:
1452        if (priv->mii_bus)
1453                mutex_unlock(&priv->mii_bus->mdio_lock);
1454        if (netdev->phydev)
1455                mutex_unlock(&netdev->phydev->lock);
1456        rtnl_unlock();
1457}
1458
1459static int ftgmac100_open(struct net_device *netdev)
1460{
1461        struct ftgmac100 *priv = netdev_priv(netdev);
1462        int err;
1463
1464        /* Allocate ring buffers  */
1465        err = ftgmac100_alloc_rings(priv);
1466        if (err) {
1467                netdev_err(netdev, "Failed to allocate descriptors\n");
1468                return err;
1469        }
1470
1471        /* When using NC-SI we force the speed to 100Mbit/s full duplex,
1472         *
1473         * Otherwise we leave it set to 0 (no link), the link
1474         * message from the PHY layer will handle setting it up to
1475         * something else if needed.
1476         */
1477        if (priv->use_ncsi) {
1478                priv->cur_duplex = DUPLEX_FULL;
1479                priv->cur_speed = SPEED_100;
1480        } else {
1481                priv->cur_duplex = 0;
1482                priv->cur_speed = 0;
1483        }
1484
1485        /* Reset the hardware */
1486        err = ftgmac100_reset_and_config_mac(priv);
1487        if (err)
1488                goto err_hw;
1489
1490        /* Initialize NAPI */
1491        netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
1492
1493        /* Grab our interrupt */
1494        err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
1495        if (err) {
1496                netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
1497                goto err_irq;
1498        }
1499
1500        /* Start things up */
1501        err = ftgmac100_init_all(priv, false);
1502        if (err) {
1503                netdev_err(netdev, "Failed to allocate packet buffers\n");
1504                goto err_alloc;
1505        }
1506
1507        if (netdev->phydev) {
1508                /* If we have a PHY, start polling */
1509                phy_start(netdev->phydev);
1510        } else if (priv->use_ncsi) {
1511                /* If using NC-SI, set our carrier on and start the stack */
1512                netif_carrier_on(netdev);
1513
1514                /* Start the NCSI device */
1515                err = ncsi_start_dev(priv->ndev);
1516                if (err)
1517                        goto err_ncsi;
1518        }
1519
1520        return 0;
1521
1522 err_ncsi:
1523        napi_disable(&priv->napi);
1524        netif_stop_queue(netdev);
1525 err_alloc:
1526        ftgmac100_free_buffers(priv);
1527        free_irq(netdev->irq, netdev);
1528 err_irq:
1529        netif_napi_del(&priv->napi);
1530 err_hw:
1531        iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1532        ftgmac100_free_rings(priv);
1533        return err;
1534}
1535
1536static int ftgmac100_stop(struct net_device *netdev)
1537{
1538        struct ftgmac100 *priv = netdev_priv(netdev);
1539
1540        /* Note about the reset task: We are called with the rtnl lock
1541         * held, so we are synchronized against the core of the reset
1542         * task. We must not try to synchronously cancel it otherwise
1543         * we can deadlock. But since it will test for netif_running()
1544         * which has already been cleared by the net core, we don't
1545         * anything special to do.
1546         */
1547
1548        /* disable all interrupts */
1549        iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1550
1551        netif_stop_queue(netdev);
1552        napi_disable(&priv->napi);
1553        netif_napi_del(&priv->napi);
1554        if (netdev->phydev)
1555                phy_stop(netdev->phydev);
1556        else if (priv->use_ncsi)
1557                ncsi_stop_dev(priv->ndev);
1558
1559        ftgmac100_stop_hw(priv);
1560        free_irq(netdev->irq, netdev);
1561        ftgmac100_free_buffers(priv);
1562        ftgmac100_free_rings(priv);
1563
1564        return 0;
1565}
1566
1567static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1568{
1569        struct ftgmac100 *priv = netdev_priv(netdev);
1570
1571        /* Disable all interrupts */
1572        iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1573
1574        /* Do the reset outside of interrupt context */
1575        schedule_work(&priv->reset_task);
1576}
1577
1578static int ftgmac100_set_features(struct net_device *netdev,
1579                                  netdev_features_t features)
1580{
1581        struct ftgmac100 *priv = netdev_priv(netdev);
1582        netdev_features_t changed = netdev->features ^ features;
1583
1584        if (!netif_running(netdev))
1585                return 0;
1586
1587        /* Update the vlan filtering bit */
1588        if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1589                u32 maccr;
1590
1591                maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
1592                if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1593                        maccr |= FTGMAC100_MACCR_RM_VLAN;
1594                else
1595                        maccr &= ~FTGMAC100_MACCR_RM_VLAN;
1596                iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
1597        }
1598
1599        return 0;
1600}
1601
1602#ifdef CONFIG_NET_POLL_CONTROLLER
1603static void ftgmac100_poll_controller(struct net_device *netdev)
1604{
1605        unsigned long flags;
1606
1607        local_irq_save(flags);
1608        ftgmac100_interrupt(netdev->irq, netdev);
1609        local_irq_restore(flags);
1610}
1611#endif
1612
1613static const struct net_device_ops ftgmac100_netdev_ops = {
1614        .ndo_open               = ftgmac100_open,
1615        .ndo_stop               = ftgmac100_stop,
1616        .ndo_start_xmit         = ftgmac100_hard_start_xmit,
1617        .ndo_set_mac_address    = ftgmac100_set_mac_addr,
1618        .ndo_validate_addr      = eth_validate_addr,
1619        .ndo_do_ioctl           = phy_do_ioctl,
1620        .ndo_tx_timeout         = ftgmac100_tx_timeout,
1621        .ndo_set_rx_mode        = ftgmac100_set_rx_mode,
1622        .ndo_set_features       = ftgmac100_set_features,
1623#ifdef CONFIG_NET_POLL_CONTROLLER
1624        .ndo_poll_controller    = ftgmac100_poll_controller,
1625#endif
1626        .ndo_vlan_rx_add_vid    = ncsi_vlan_rx_add_vid,
1627        .ndo_vlan_rx_kill_vid   = ncsi_vlan_rx_kill_vid,
1628};
1629
1630static int ftgmac100_setup_mdio(struct net_device *netdev)
1631{
1632        struct ftgmac100 *priv = netdev_priv(netdev);
1633        struct platform_device *pdev = to_platform_device(priv->dev);
1634        struct device_node *np = pdev->dev.of_node;
1635        struct device_node *mdio_np;
1636        int i, err = 0;
1637        u32 reg;
1638
1639        /* initialize mdio bus */
1640        priv->mii_bus = mdiobus_alloc();
1641        if (!priv->mii_bus)
1642                return -EIO;
1643
1644        if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
1645            of_device_is_compatible(np, "aspeed,ast2500-mac")) {
1646                /* The AST2600 has a separate MDIO controller */
1647
1648                /* For the AST2400 and AST2500 this driver only supports the
1649                 * old MDIO interface
1650                 */
1651                reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
1652                reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
1653                iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
1654        }
1655
1656        priv->mii_bus->name = "ftgmac100_mdio";
1657        snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1658                 pdev->name, pdev->id);
1659        priv->mii_bus->parent = priv->dev;
1660        priv->mii_bus->priv = priv->netdev;
1661        priv->mii_bus->read = ftgmac100_mdiobus_read;
1662        priv->mii_bus->write = ftgmac100_mdiobus_write;
1663
1664        for (i = 0; i < PHY_MAX_ADDR; i++)
1665                priv->mii_bus->irq[i] = PHY_POLL;
1666
1667        mdio_np = of_get_child_by_name(np, "mdio");
1668
1669        err = of_mdiobus_register(priv->mii_bus, mdio_np);
1670        if (err) {
1671                dev_err(priv->dev, "Cannot register MDIO bus!\n");
1672                goto err_register_mdiobus;
1673        }
1674
1675        of_node_put(mdio_np);
1676
1677        return 0;
1678
1679err_register_mdiobus:
1680        mdiobus_free(priv->mii_bus);
1681        return err;
1682}
1683
1684static void ftgmac100_phy_disconnect(struct net_device *netdev)
1685{
1686        if (!netdev->phydev)
1687                return;
1688
1689        phy_disconnect(netdev->phydev);
1690}
1691
1692static void ftgmac100_destroy_mdio(struct net_device *netdev)
1693{
1694        struct ftgmac100 *priv = netdev_priv(netdev);
1695
1696        if (!priv->mii_bus)
1697                return;
1698
1699        mdiobus_unregister(priv->mii_bus);
1700        mdiobus_free(priv->mii_bus);
1701}
1702
1703static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
1704{
1705        if (unlikely(nd->state != ncsi_dev_state_functional))
1706                return;
1707
1708        netdev_dbg(nd->dev, "NCSI interface %s\n",
1709                   nd->link_up ? "up" : "down");
1710}
1711
1712static int ftgmac100_setup_clk(struct ftgmac100 *priv)
1713{
1714        struct clk *clk;
1715        int rc;
1716
1717        clk = devm_clk_get(priv->dev, NULL /* MACCLK */);
1718        if (IS_ERR(clk))
1719                return PTR_ERR(clk);
1720        priv->clk = clk;
1721        rc = clk_prepare_enable(priv->clk);
1722        if (rc)
1723                return rc;
1724
1725        /* Aspeed specifies a 100MHz clock is required for up to
1726         * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
1727         * is sufficient
1728         */
1729        rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
1730                          FTGMAC_100MHZ);
1731        if (rc)
1732                goto cleanup_clk;
1733
1734        /* RCLK is for RMII, typically used for NCSI. Optional because it's not
1735         * necessary if it's the AST2400 MAC, or the MAC is configured for
1736         * RGMII, or the controller is not an ASPEED-based controller.
1737         */
1738        priv->rclk = devm_clk_get_optional(priv->dev, "RCLK");
1739        rc = clk_prepare_enable(priv->rclk);
1740        if (!rc)
1741                return 0;
1742
1743cleanup_clk:
1744        clk_disable_unprepare(priv->clk);
1745
1746        return rc;
1747}
1748
1749static int ftgmac100_probe(struct platform_device *pdev)
1750{
1751        struct resource *res;
1752        int irq;
1753        struct net_device *netdev;
1754        struct ftgmac100 *priv;
1755        struct device_node *np;
1756        int err = 0;
1757
1758        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1759        if (!res)
1760                return -ENXIO;
1761
1762        irq = platform_get_irq(pdev, 0);
1763        if (irq < 0)
1764                return irq;
1765
1766        /* setup net_device */
1767        netdev = alloc_etherdev(sizeof(*priv));
1768        if (!netdev) {
1769                err = -ENOMEM;
1770                goto err_alloc_etherdev;
1771        }
1772
1773        SET_NETDEV_DEV(netdev, &pdev->dev);
1774
1775        netdev->ethtool_ops = &ftgmac100_ethtool_ops;
1776        netdev->netdev_ops = &ftgmac100_netdev_ops;
1777        netdev->watchdog_timeo = 5 * HZ;
1778
1779        platform_set_drvdata(pdev, netdev);
1780
1781        /* setup private data */
1782        priv = netdev_priv(netdev);
1783        priv->netdev = netdev;
1784        priv->dev = &pdev->dev;
1785        INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
1786
1787        /* map io memory */
1788        priv->res = request_mem_region(res->start, resource_size(res),
1789                                       dev_name(&pdev->dev));
1790        if (!priv->res) {
1791                dev_err(&pdev->dev, "Could not reserve memory region\n");
1792                err = -ENOMEM;
1793                goto err_req_mem;
1794        }
1795
1796        priv->base = ioremap(res->start, resource_size(res));
1797        if (!priv->base) {
1798                dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
1799                err = -EIO;
1800                goto err_ioremap;
1801        }
1802
1803        netdev->irq = irq;
1804
1805        /* Enable pause */
1806        priv->tx_pause = true;
1807        priv->rx_pause = true;
1808        priv->aneg_pause = true;
1809
1810        /* MAC address from chip or random one */
1811        ftgmac100_initial_mac(priv);
1812
1813        np = pdev->dev.of_node;
1814        if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
1815                   of_device_is_compatible(np, "aspeed,ast2500-mac") ||
1816                   of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
1817                priv->rxdes0_edorr_mask = BIT(30);
1818                priv->txdes0_edotr_mask = BIT(30);
1819                priv->is_aspeed = true;
1820                /* Disable ast2600 problematic HW arbitration */
1821                if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
1822                        iowrite32(FTGMAC100_TM_DEFAULT,
1823                                  priv->base + FTGMAC100_OFFSET_TM);
1824                }
1825        } else {
1826                priv->rxdes0_edorr_mask = BIT(15);
1827                priv->txdes0_edotr_mask = BIT(15);
1828        }
1829
1830        if (np && of_get_property(np, "use-ncsi", NULL)) {
1831                if (!IS_ENABLED(CONFIG_NET_NCSI)) {
1832                        dev_err(&pdev->dev, "NCSI stack not enabled\n");
1833                        err = -EINVAL;
1834                        goto err_phy_connect;
1835                }
1836
1837                dev_info(&pdev->dev, "Using NCSI interface\n");
1838                priv->use_ncsi = true;
1839                priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
1840                if (!priv->ndev) {
1841                        err = -EINVAL;
1842                        goto err_phy_connect;
1843                }
1844        } else if (np && of_get_property(np, "phy-handle", NULL)) {
1845                struct phy_device *phy;
1846
1847                /* Support "mdio"/"phy" child nodes for ast2400/2500 with
1848                 * an embedded MDIO controller. Automatically scan the DTS for
1849                 * available PHYs and register them.
1850                 */
1851                if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
1852                    of_device_is_compatible(np, "aspeed,ast2500-mac")) {
1853                        err = ftgmac100_setup_mdio(netdev);
1854                        if (err)
1855                                goto err_setup_mdio;
1856                }
1857
1858                phy = of_phy_get_and_connect(priv->netdev, np,
1859                                             &ftgmac100_adjust_link);
1860                if (!phy) {
1861                        dev_err(&pdev->dev, "Failed to connect to phy\n");
1862                        err = -EINVAL;
1863                        goto err_phy_connect;
1864                }
1865
1866                /* Indicate that we support PAUSE frames (see comment in
1867                 * Documentation/networking/phy.rst)
1868                 */
1869                phy_support_asym_pause(phy);
1870
1871                /* Display what we found */
1872                phy_attached_info(phy);
1873        } else if (np && !of_get_child_by_name(np, "mdio")) {
1874                /* Support legacy ASPEED devicetree descriptions that decribe a
1875                 * MAC with an embedded MDIO controller but have no "mdio"
1876                 * child node. Automatically scan the MDIO bus for available
1877                 * PHYs.
1878                 */
1879                priv->use_ncsi = false;
1880                err = ftgmac100_setup_mdio(netdev);
1881                if (err)
1882                        goto err_setup_mdio;
1883
1884                err = ftgmac100_mii_probe(netdev);
1885                if (err) {
1886                        dev_err(priv->dev, "MII probe failed!\n");
1887                        goto err_ncsi_dev;
1888                }
1889
1890        }
1891
1892        if (priv->is_aspeed) {
1893                err = ftgmac100_setup_clk(priv);
1894                if (err)
1895                        goto err_phy_connect;
1896        }
1897
1898        /* Default ring sizes */
1899        priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
1900        priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
1901
1902        /* Base feature set */
1903        netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
1904                NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
1905                NETIF_F_HW_VLAN_CTAG_TX;
1906
1907        if (priv->use_ncsi)
1908                netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1909
1910        /* AST2400  doesn't have working HW checksum generation */
1911        if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac")))
1912                netdev->hw_features &= ~NETIF_F_HW_CSUM;
1913        if (np && of_get_property(np, "no-hw-checksum", NULL))
1914                netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
1915        netdev->features |= netdev->hw_features;
1916
1917        /* register network device */
1918        err = register_netdev(netdev);
1919        if (err) {
1920                dev_err(&pdev->dev, "Failed to register netdev\n");
1921                goto err_register_netdev;
1922        }
1923
1924        netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
1925
1926        return 0;
1927
1928err_register_netdev:
1929        clk_disable_unprepare(priv->rclk);
1930        clk_disable_unprepare(priv->clk);
1931err_phy_connect:
1932        ftgmac100_phy_disconnect(netdev);
1933err_ncsi_dev:
1934        if (priv->ndev)
1935                ncsi_unregister_dev(priv->ndev);
1936        ftgmac100_destroy_mdio(netdev);
1937err_setup_mdio:
1938        iounmap(priv->base);
1939err_ioremap:
1940        release_resource(priv->res);
1941err_req_mem:
1942        free_netdev(netdev);
1943err_alloc_etherdev:
1944        return err;
1945}
1946
1947static int ftgmac100_remove(struct platform_device *pdev)
1948{
1949        struct net_device *netdev;
1950        struct ftgmac100 *priv;
1951
1952        netdev = platform_get_drvdata(pdev);
1953        priv = netdev_priv(netdev);
1954
1955        if (priv->ndev)
1956                ncsi_unregister_dev(priv->ndev);
1957        unregister_netdev(netdev);
1958
1959        clk_disable_unprepare(priv->rclk);
1960        clk_disable_unprepare(priv->clk);
1961
1962        /* There's a small chance the reset task will have been re-queued,
1963         * during stop, make sure it's gone before we free the structure.
1964         */
1965        cancel_work_sync(&priv->reset_task);
1966
1967        ftgmac100_phy_disconnect(netdev);
1968        ftgmac100_destroy_mdio(netdev);
1969
1970        iounmap(priv->base);
1971        release_resource(priv->res);
1972
1973        netif_napi_del(&priv->napi);
1974        free_netdev(netdev);
1975        return 0;
1976}
1977
1978static const struct of_device_id ftgmac100_of_match[] = {
1979        { .compatible = "faraday,ftgmac100" },
1980        { }
1981};
1982MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
1983
1984static struct platform_driver ftgmac100_driver = {
1985        .probe  = ftgmac100_probe,
1986        .remove = ftgmac100_remove,
1987        .driver = {
1988                .name           = DRV_NAME,
1989                .of_match_table = ftgmac100_of_match,
1990        },
1991};
1992module_platform_driver(ftgmac100_driver);
1993
1994MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1995MODULE_DESCRIPTION("FTGMAC100 driver");
1996MODULE_LICENSE("GPL");
1997