linux/drivers/net/ethernet/hisilicon/hip04_eth.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3/* Copyright (c) 2014 Linaro Ltd.
   4 * Copyright (c) 2014 Hisilicon Limited.
   5 */
   6
   7#include <linux/module.h>
   8#include <linux/etherdevice.h>
   9#include <linux/platform_device.h>
  10#include <linux/interrupt.h>
  11#include <linux/ktime.h>
  12#include <linux/of_address.h>
  13#include <linux/phy.h>
  14#include <linux/of_mdio.h>
  15#include <linux/of_net.h>
  16#include <linux/mfd/syscon.h>
  17#include <linux/regmap.h>
  18
  19#define PPE_CFG_RX_ADDR                 0x100
  20#define PPE_CFG_POOL_GRP                0x300
  21#define PPE_CFG_RX_BUF_SIZE             0x400
  22#define PPE_CFG_RX_FIFO_SIZE            0x500
  23#define PPE_CURR_BUF_CNT                0xa200
  24
  25#define GE_DUPLEX_TYPE                  0x08
  26#define GE_MAX_FRM_SIZE_REG             0x3c
  27#define GE_PORT_MODE                    0x40
  28#define GE_PORT_EN                      0x44
  29#define GE_SHORT_RUNTS_THR_REG          0x50
  30#define GE_TX_LOCAL_PAGE_REG            0x5c
  31#define GE_TRANSMIT_CONTROL_REG         0x60
  32#define GE_CF_CRC_STRIP_REG             0x1b0
  33#define GE_MODE_CHANGE_REG              0x1b4
  34#define GE_RECV_CONTROL_REG             0x1e0
  35#define GE_STATION_MAC_ADDRESS          0x210
  36#define PPE_CFG_CPU_ADD_ADDR            0x580
  37#define PPE_CFG_MAX_FRAME_LEN_REG       0x408
  38#define PPE_CFG_BUS_CTRL_REG            0x424
  39#define PPE_CFG_RX_CTRL_REG             0x428
  40#define PPE_CFG_RX_PKT_MODE_REG         0x438
  41#define PPE_CFG_QOS_VMID_GEN            0x500
  42#define PPE_CFG_RX_PKT_INT              0x538
  43#define PPE_INTEN                       0x600
  44#define PPE_INTSTS                      0x608
  45#define PPE_RINT                        0x604
  46#define PPE_CFG_STS_MODE                0x700
  47#define PPE_HIS_RX_PKT_CNT              0x804
  48
  49/* REG_INTERRUPT */
  50#define RCV_INT                         BIT(10)
  51#define RCV_NOBUF                       BIT(8)
  52#define RCV_DROP                        BIT(7)
  53#define TX_DROP                         BIT(6)
  54#define DEF_INT_ERR                     (RCV_NOBUF | RCV_DROP | TX_DROP)
  55#define DEF_INT_MASK                    (RCV_INT | DEF_INT_ERR)
  56
  57/* TX descriptor config */
  58#define TX_FREE_MEM                     BIT(0)
  59#define TX_READ_ALLOC_L3                BIT(1)
  60#define TX_FINISH_CACHE_INV             BIT(2)
  61#define TX_CLEAR_WB                     BIT(4)
  62#define TX_L3_CHECKSUM                  BIT(5)
  63#define TX_LOOP_BACK                    BIT(11)
  64
  65/* RX error */
  66#define RX_PKT_DROP                     BIT(0)
  67#define RX_L2_ERR                       BIT(1)
  68#define RX_PKT_ERR                      (RX_PKT_DROP | RX_L2_ERR)
  69
  70#define SGMII_SPEED_1000                0x08
  71#define SGMII_SPEED_100                 0x07
  72#define SGMII_SPEED_10                  0x06
  73#define MII_SPEED_100                   0x01
  74#define MII_SPEED_10                    0x00
  75
  76#define GE_DUPLEX_FULL                  BIT(0)
  77#define GE_DUPLEX_HALF                  0x00
  78#define GE_MODE_CHANGE_EN               BIT(0)
  79
  80#define GE_TX_AUTO_NEG                  BIT(5)
  81#define GE_TX_ADD_CRC                   BIT(6)
  82#define GE_TX_SHORT_PAD_THROUGH         BIT(7)
  83
  84#define GE_RX_STRIP_CRC                 BIT(0)
  85#define GE_RX_STRIP_PAD                 BIT(3)
  86#define GE_RX_PAD_EN                    BIT(4)
  87
  88#define GE_AUTO_NEG_CTL                 BIT(0)
  89
  90#define GE_RX_INT_THRESHOLD             BIT(6)
  91#define GE_RX_TIMEOUT                   0x04
  92
  93#define GE_RX_PORT_EN                   BIT(1)
  94#define GE_TX_PORT_EN                   BIT(2)
  95
  96#define PPE_CFG_STS_RX_PKT_CNT_RC       BIT(12)
  97
  98#define PPE_CFG_RX_PKT_ALIGN            BIT(18)
  99#define PPE_CFG_QOS_VMID_MODE           BIT(14)
 100#define PPE_CFG_QOS_VMID_GRP_SHIFT      8
 101
 102#define PPE_CFG_RX_FIFO_FSFU            BIT(11)
 103#define PPE_CFG_RX_DEPTH_SHIFT          16
 104#define PPE_CFG_RX_START_SHIFT          0
 105#define PPE_CFG_RX_CTRL_ALIGN_SHIFT     11
 106
 107#define PPE_CFG_BUS_LOCAL_REL           BIT(14)
 108#define PPE_CFG_BUS_BIG_ENDIEN          BIT(0)
 109
 110#define RX_DESC_NUM                     128
 111#define TX_DESC_NUM                     256
 112#define TX_NEXT(N)                      (((N) + 1) & (TX_DESC_NUM-1))
 113#define RX_NEXT(N)                      (((N) + 1) & (RX_DESC_NUM-1))
 114
 115#define GMAC_PPE_RX_PKT_MAX_LEN         379
 116#define GMAC_MAX_PKT_LEN                1516
 117#define GMAC_MIN_PKT_LEN                31
 118#define RX_BUF_SIZE                     1600
 119#define RESET_TIMEOUT                   1000
 120#define TX_TIMEOUT                      (6 * HZ)
 121
 122#define DRV_NAME                        "hip04-ether"
 123#define DRV_VERSION                     "v1.0"
 124
 125#define HIP04_MAX_TX_COALESCE_USECS     200
 126#define HIP04_MIN_TX_COALESCE_USECS     100
 127#define HIP04_MAX_TX_COALESCE_FRAMES    200
 128#define HIP04_MIN_TX_COALESCE_FRAMES    100
 129
 130struct tx_desc {
 131        u32 send_addr;
 132        u32 send_size;
 133        u32 next_addr;
 134        u32 cfg;
 135        u32 wb_addr;
 136} __aligned(64);
 137
 138struct rx_desc {
 139        u16 reserved_16;
 140        u16 pkt_len;
 141        u32 reserve1[3];
 142        u32 pkt_err;
 143        u32 reserve2[4];
 144};
 145
 146struct hip04_priv {
 147        void __iomem *base;
 148        int phy_mode;
 149        int chan;
 150        unsigned int port;
 151        unsigned int speed;
 152        unsigned int duplex;
 153        unsigned int reg_inten;
 154
 155        struct napi_struct napi;
 156        struct net_device *ndev;
 157
 158        struct tx_desc *tx_desc;
 159        dma_addr_t tx_desc_dma;
 160        struct sk_buff *tx_skb[TX_DESC_NUM];
 161        dma_addr_t tx_phys[TX_DESC_NUM];
 162        unsigned int tx_head;
 163
 164        int tx_coalesce_frames;
 165        int tx_coalesce_usecs;
 166        struct hrtimer tx_coalesce_timer;
 167
 168        unsigned char *rx_buf[RX_DESC_NUM];
 169        dma_addr_t rx_phys[RX_DESC_NUM];
 170        unsigned int rx_head;
 171        unsigned int rx_buf_size;
 172
 173        struct device_node *phy_node;
 174        struct phy_device *phy;
 175        struct regmap *map;
 176        struct work_struct tx_timeout_task;
 177
 178        /* written only by tx cleanup */
 179        unsigned int tx_tail ____cacheline_aligned_in_smp;
 180};
 181
 182static inline unsigned int tx_count(unsigned int head, unsigned int tail)
 183{
 184        return (head - tail) % (TX_DESC_NUM - 1);
 185}
 186
 187static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
 188{
 189        struct hip04_priv *priv = netdev_priv(ndev);
 190        u32 val;
 191
 192        priv->speed = speed;
 193        priv->duplex = duplex;
 194
 195        switch (priv->phy_mode) {
 196        case PHY_INTERFACE_MODE_SGMII:
 197                if (speed == SPEED_1000)
 198                        val = SGMII_SPEED_1000;
 199                else if (speed == SPEED_100)
 200                        val = SGMII_SPEED_100;
 201                else
 202                        val = SGMII_SPEED_10;
 203                break;
 204        case PHY_INTERFACE_MODE_MII:
 205                if (speed == SPEED_100)
 206                        val = MII_SPEED_100;
 207                else
 208                        val = MII_SPEED_10;
 209                break;
 210        default:
 211                netdev_warn(ndev, "not supported mode\n");
 212                val = MII_SPEED_10;
 213                break;
 214        }
 215        writel_relaxed(val, priv->base + GE_PORT_MODE);
 216
 217        val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
 218        writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
 219
 220        val = GE_MODE_CHANGE_EN;
 221        writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
 222}
 223
 224static void hip04_reset_ppe(struct hip04_priv *priv)
 225{
 226        u32 val, tmp, timeout = 0;
 227
 228        do {
 229                regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
 230                regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
 231                if (timeout++ > RESET_TIMEOUT)
 232                        break;
 233        } while (val & 0xfff);
 234}
 235
 236static void hip04_config_fifo(struct hip04_priv *priv)
 237{
 238        u32 val;
 239
 240        val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
 241        val |= PPE_CFG_STS_RX_PKT_CNT_RC;
 242        writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
 243
 244        val = BIT(priv->port);
 245        regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
 246
 247        val = priv->port << PPE_CFG_QOS_VMID_GRP_SHIFT;
 248        val |= PPE_CFG_QOS_VMID_MODE;
 249        writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
 250
 251        val = RX_BUF_SIZE;
 252        regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
 253
 254        val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
 255        val |= PPE_CFG_RX_FIFO_FSFU;
 256        val |= priv->chan << PPE_CFG_RX_START_SHIFT;
 257        regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
 258
 259        val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
 260        writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
 261
 262        val = PPE_CFG_RX_PKT_ALIGN;
 263        writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
 264
 265        val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
 266        writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
 267
 268        val = GMAC_PPE_RX_PKT_MAX_LEN;
 269        writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
 270
 271        val = GMAC_MAX_PKT_LEN;
 272        writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
 273
 274        val = GMAC_MIN_PKT_LEN;
 275        writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
 276
 277        val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
 278        val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
 279        writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
 280
 281        val = GE_RX_STRIP_CRC;
 282        writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
 283
 284        val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
 285        val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
 286        writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
 287
 288        val = GE_AUTO_NEG_CTL;
 289        writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
 290}
 291
 292static void hip04_mac_enable(struct net_device *ndev)
 293{
 294        struct hip04_priv *priv = netdev_priv(ndev);
 295        u32 val;
 296
 297        /* enable tx & rx */
 298        val = readl_relaxed(priv->base + GE_PORT_EN);
 299        val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
 300        writel_relaxed(val, priv->base + GE_PORT_EN);
 301
 302        /* clear rx int */
 303        val = RCV_INT;
 304        writel_relaxed(val, priv->base + PPE_RINT);
 305
 306        /* config recv int */
 307        val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
 308        writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
 309
 310        /* enable interrupt */
 311        priv->reg_inten = DEF_INT_MASK;
 312        writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
 313}
 314
 315static void hip04_mac_disable(struct net_device *ndev)
 316{
 317        struct hip04_priv *priv = netdev_priv(ndev);
 318        u32 val;
 319
 320        /* disable int */
 321        priv->reg_inten &= ~(DEF_INT_MASK);
 322        writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
 323
 324        /* disable tx & rx */
 325        val = readl_relaxed(priv->base + GE_PORT_EN);
 326        val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
 327        writel_relaxed(val, priv->base + GE_PORT_EN);
 328}
 329
 330static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
 331{
 332        writel(phys, priv->base + PPE_CFG_CPU_ADD_ADDR);
 333}
 334
 335static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
 336{
 337        regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, phys);
 338}
 339
 340static u32 hip04_recv_cnt(struct hip04_priv *priv)
 341{
 342        return readl(priv->base + PPE_HIS_RX_PKT_CNT);
 343}
 344
 345static void hip04_update_mac_address(struct net_device *ndev)
 346{
 347        struct hip04_priv *priv = netdev_priv(ndev);
 348
 349        writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
 350                       priv->base + GE_STATION_MAC_ADDRESS);
 351        writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
 352                        (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
 353                       priv->base + GE_STATION_MAC_ADDRESS + 4);
 354}
 355
 356static int hip04_set_mac_address(struct net_device *ndev, void *addr)
 357{
 358        eth_mac_addr(ndev, addr);
 359        hip04_update_mac_address(ndev);
 360        return 0;
 361}
 362
 363static int hip04_tx_reclaim(struct net_device *ndev, bool force)
 364{
 365        struct hip04_priv *priv = netdev_priv(ndev);
 366        unsigned tx_tail = priv->tx_tail;
 367        struct tx_desc *desc;
 368        unsigned int bytes_compl = 0, pkts_compl = 0;
 369        unsigned int count;
 370
 371        smp_rmb();
 372        count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
 373        if (count == 0)
 374                goto out;
 375
 376        while (count) {
 377                desc = &priv->tx_desc[tx_tail];
 378                if (desc->send_addr != 0) {
 379                        if (force)
 380                                desc->send_addr = 0;
 381                        else
 382                                break;
 383                }
 384
 385                if (priv->tx_phys[tx_tail]) {
 386                        dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
 387                                         priv->tx_skb[tx_tail]->len,
 388                                         DMA_TO_DEVICE);
 389                        priv->tx_phys[tx_tail] = 0;
 390                }
 391                pkts_compl++;
 392                bytes_compl += priv->tx_skb[tx_tail]->len;
 393                dev_kfree_skb(priv->tx_skb[tx_tail]);
 394                priv->tx_skb[tx_tail] = NULL;
 395                tx_tail = TX_NEXT(tx_tail);
 396                count--;
 397        }
 398
 399        priv->tx_tail = tx_tail;
 400        smp_wmb(); /* Ensure tx_tail visible to xmit */
 401
 402out:
 403        if (pkts_compl || bytes_compl)
 404                netdev_completed_queue(ndev, pkts_compl, bytes_compl);
 405
 406        if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
 407                netif_wake_queue(ndev);
 408
 409        return count;
 410}
 411
 412static void hip04_start_tx_timer(struct hip04_priv *priv)
 413{
 414        unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
 415
 416        /* allow timer to fire after half the time at the earliest */
 417        hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
 418                               ns, HRTIMER_MODE_REL);
 419}
 420
 421static netdev_tx_t
 422hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 423{
 424        struct hip04_priv *priv = netdev_priv(ndev);
 425        struct net_device_stats *stats = &ndev->stats;
 426        unsigned int tx_head = priv->tx_head, count;
 427        struct tx_desc *desc = &priv->tx_desc[tx_head];
 428        dma_addr_t phys;
 429
 430        smp_rmb();
 431        count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
 432        if (count == (TX_DESC_NUM - 1)) {
 433                netif_stop_queue(ndev);
 434                return NETDEV_TX_BUSY;
 435        }
 436
 437        phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
 438        if (dma_mapping_error(&ndev->dev, phys)) {
 439                dev_kfree_skb(skb);
 440                return NETDEV_TX_OK;
 441        }
 442
 443        priv->tx_skb[tx_head] = skb;
 444        priv->tx_phys[tx_head] = phys;
 445        desc->send_addr = cpu_to_be32(phys);
 446        desc->send_size = cpu_to_be32(skb->len);
 447        desc->cfg = cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
 448        phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
 449        desc->wb_addr = cpu_to_be32(phys);
 450        skb_tx_timestamp(skb);
 451
 452        hip04_set_xmit_desc(priv, phys);
 453        priv->tx_head = TX_NEXT(tx_head);
 454        count++;
 455        netdev_sent_queue(ndev, skb->len);
 456
 457        stats->tx_bytes += skb->len;
 458        stats->tx_packets++;
 459
 460        /* Ensure tx_head update visible to tx reclaim */
 461        smp_wmb();
 462
 463        /* queue is getting full, better start cleaning up now */
 464        if (count >= priv->tx_coalesce_frames) {
 465                if (napi_schedule_prep(&priv->napi)) {
 466                        /* disable rx interrupt and timer */
 467                        priv->reg_inten &= ~(RCV_INT);
 468                        writel_relaxed(DEF_INT_MASK & ~RCV_INT,
 469                                       priv->base + PPE_INTEN);
 470                        hrtimer_cancel(&priv->tx_coalesce_timer);
 471                        __napi_schedule(&priv->napi);
 472                }
 473        } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
 474                /* cleanup not pending yet, start a new timer */
 475                hip04_start_tx_timer(priv);
 476        }
 477
 478        return NETDEV_TX_OK;
 479}
 480
 481static int hip04_rx_poll(struct napi_struct *napi, int budget)
 482{
 483        struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
 484        struct net_device *ndev = priv->ndev;
 485        struct net_device_stats *stats = &ndev->stats;
 486        unsigned int cnt = hip04_recv_cnt(priv);
 487        struct rx_desc *desc;
 488        struct sk_buff *skb;
 489        unsigned char *buf;
 490        bool last = false;
 491        dma_addr_t phys;
 492        int rx = 0;
 493        int tx_remaining;
 494        u16 len;
 495        u32 err;
 496
 497        while (cnt && !last) {
 498                buf = priv->rx_buf[priv->rx_head];
 499                skb = build_skb(buf, priv->rx_buf_size);
 500                if (unlikely(!skb)) {
 501                        net_dbg_ratelimited("build_skb failed\n");
 502                        goto refill;
 503                }
 504
 505                dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
 506                                 RX_BUF_SIZE, DMA_FROM_DEVICE);
 507                priv->rx_phys[priv->rx_head] = 0;
 508
 509                desc = (struct rx_desc *)skb->data;
 510                len = be16_to_cpu(desc->pkt_len);
 511                err = be32_to_cpu(desc->pkt_err);
 512
 513                if (0 == len) {
 514                        dev_kfree_skb_any(skb);
 515                        last = true;
 516                } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
 517                        dev_kfree_skb_any(skb);
 518                        stats->rx_dropped++;
 519                        stats->rx_errors++;
 520                } else {
 521                        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 522                        skb_put(skb, len);
 523                        skb->protocol = eth_type_trans(skb, ndev);
 524                        napi_gro_receive(&priv->napi, skb);
 525                        stats->rx_packets++;
 526                        stats->rx_bytes += len;
 527                        rx++;
 528                }
 529
 530refill:
 531                buf = netdev_alloc_frag(priv->rx_buf_size);
 532                if (!buf)
 533                        goto done;
 534                phys = dma_map_single(&ndev->dev, buf,
 535                                      RX_BUF_SIZE, DMA_FROM_DEVICE);
 536                if (dma_mapping_error(&ndev->dev, phys))
 537                        goto done;
 538                priv->rx_buf[priv->rx_head] = buf;
 539                priv->rx_phys[priv->rx_head] = phys;
 540                hip04_set_recv_desc(priv, phys);
 541
 542                priv->rx_head = RX_NEXT(priv->rx_head);
 543                if (rx >= budget)
 544                        goto done;
 545
 546                if (--cnt == 0)
 547                        cnt = hip04_recv_cnt(priv);
 548        }
 549
 550        if (!(priv->reg_inten & RCV_INT)) {
 551                /* enable rx interrupt */
 552                priv->reg_inten |= RCV_INT;
 553                writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
 554        }
 555        napi_complete_done(napi, rx);
 556done:
 557        /* clean up tx descriptors and start a new timer if necessary */
 558        tx_remaining = hip04_tx_reclaim(ndev, false);
 559        if (rx < budget && tx_remaining)
 560                hip04_start_tx_timer(priv);
 561
 562        return rx;
 563}
 564
 565static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
 566{
 567        struct net_device *ndev = (struct net_device *)dev_id;
 568        struct hip04_priv *priv = netdev_priv(ndev);
 569        struct net_device_stats *stats = &ndev->stats;
 570        u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
 571
 572        if (!ists)
 573                return IRQ_NONE;
 574
 575        writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
 576
 577        if (unlikely(ists & DEF_INT_ERR)) {
 578                if (ists & (RCV_NOBUF | RCV_DROP)) {
 579                        stats->rx_errors++;
 580                        stats->rx_dropped++;
 581                        netdev_err(ndev, "rx drop\n");
 582                }
 583                if (ists & TX_DROP) {
 584                        stats->tx_dropped++;
 585                        netdev_err(ndev, "tx drop\n");
 586                }
 587        }
 588
 589        if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
 590                /* disable rx interrupt */
 591                priv->reg_inten &= ~(RCV_INT);
 592                writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
 593                hrtimer_cancel(&priv->tx_coalesce_timer);
 594                __napi_schedule(&priv->napi);
 595        }
 596
 597        return IRQ_HANDLED;
 598}
 599
 600static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
 601{
 602        struct hip04_priv *priv;
 603
 604        priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
 605
 606        if (napi_schedule_prep(&priv->napi)) {
 607                /* disable rx interrupt */
 608                priv->reg_inten &= ~(RCV_INT);
 609                writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
 610                __napi_schedule(&priv->napi);
 611        }
 612
 613        return HRTIMER_NORESTART;
 614}
 615
 616static void hip04_adjust_link(struct net_device *ndev)
 617{
 618        struct hip04_priv *priv = netdev_priv(ndev);
 619        struct phy_device *phy = priv->phy;
 620
 621        if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
 622                hip04_config_port(ndev, phy->speed, phy->duplex);
 623                phy_print_status(phy);
 624        }
 625}
 626
 627static int hip04_mac_open(struct net_device *ndev)
 628{
 629        struct hip04_priv *priv = netdev_priv(ndev);
 630        int i;
 631
 632        priv->rx_head = 0;
 633        priv->tx_head = 0;
 634        priv->tx_tail = 0;
 635        hip04_reset_ppe(priv);
 636
 637        for (i = 0; i < RX_DESC_NUM; i++) {
 638                dma_addr_t phys;
 639
 640                phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
 641                                      RX_BUF_SIZE, DMA_FROM_DEVICE);
 642                if (dma_mapping_error(&ndev->dev, phys))
 643                        return -EIO;
 644
 645                priv->rx_phys[i] = phys;
 646                hip04_set_recv_desc(priv, phys);
 647        }
 648
 649        if (priv->phy)
 650                phy_start(priv->phy);
 651
 652        netdev_reset_queue(ndev);
 653        netif_start_queue(ndev);
 654        hip04_mac_enable(ndev);
 655        napi_enable(&priv->napi);
 656
 657        return 0;
 658}
 659
 660static int hip04_mac_stop(struct net_device *ndev)
 661{
 662        struct hip04_priv *priv = netdev_priv(ndev);
 663        int i;
 664
 665        napi_disable(&priv->napi);
 666        netif_stop_queue(ndev);
 667        hip04_mac_disable(ndev);
 668        hip04_tx_reclaim(ndev, true);
 669        hip04_reset_ppe(priv);
 670
 671        if (priv->phy)
 672                phy_stop(priv->phy);
 673
 674        for (i = 0; i < RX_DESC_NUM; i++) {
 675                if (priv->rx_phys[i]) {
 676                        dma_unmap_single(&ndev->dev, priv->rx_phys[i],
 677                                         RX_BUF_SIZE, DMA_FROM_DEVICE);
 678                        priv->rx_phys[i] = 0;
 679                }
 680        }
 681
 682        return 0;
 683}
 684
 685static void hip04_timeout(struct net_device *ndev)
 686{
 687        struct hip04_priv *priv = netdev_priv(ndev);
 688
 689        schedule_work(&priv->tx_timeout_task);
 690}
 691
 692static void hip04_tx_timeout_task(struct work_struct *work)
 693{
 694        struct hip04_priv *priv;
 695
 696        priv = container_of(work, struct hip04_priv, tx_timeout_task);
 697        hip04_mac_stop(priv->ndev);
 698        hip04_mac_open(priv->ndev);
 699}
 700
 701static int hip04_get_coalesce(struct net_device *netdev,
 702                              struct ethtool_coalesce *ec)
 703{
 704        struct hip04_priv *priv = netdev_priv(netdev);
 705
 706        ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
 707        ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
 708
 709        return 0;
 710}
 711
 712static int hip04_set_coalesce(struct net_device *netdev,
 713                              struct ethtool_coalesce *ec)
 714{
 715        struct hip04_priv *priv = netdev_priv(netdev);
 716
 717        /* Check not supported parameters  */
 718        if ((ec->rx_max_coalesced_frames) || (ec->rx_coalesce_usecs_irq) ||
 719            (ec->rx_max_coalesced_frames_irq) || (ec->tx_coalesce_usecs_irq) ||
 720            (ec->use_adaptive_rx_coalesce) || (ec->use_adaptive_tx_coalesce) ||
 721            (ec->pkt_rate_low) || (ec->rx_coalesce_usecs_low) ||
 722            (ec->rx_max_coalesced_frames_low) || (ec->tx_coalesce_usecs_high) ||
 723            (ec->tx_max_coalesced_frames_low) || (ec->pkt_rate_high) ||
 724            (ec->tx_coalesce_usecs_low) || (ec->rx_coalesce_usecs_high) ||
 725            (ec->rx_max_coalesced_frames_high) || (ec->rx_coalesce_usecs) ||
 726            (ec->tx_max_coalesced_frames_irq) ||
 727            (ec->stats_block_coalesce_usecs) ||
 728            (ec->tx_max_coalesced_frames_high) || (ec->rate_sample_interval))
 729                return -EOPNOTSUPP;
 730
 731        if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
 732             ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
 733            (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
 734             ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
 735                return -EINVAL;
 736
 737        priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
 738        priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
 739
 740        return 0;
 741}
 742
 743static void hip04_get_drvinfo(struct net_device *netdev,
 744                              struct ethtool_drvinfo *drvinfo)
 745{
 746        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
 747        strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
 748}
 749
 750static const struct ethtool_ops hip04_ethtool_ops = {
 751        .get_coalesce           = hip04_get_coalesce,
 752        .set_coalesce           = hip04_set_coalesce,
 753        .get_drvinfo            = hip04_get_drvinfo,
 754};
 755
 756static const struct net_device_ops hip04_netdev_ops = {
 757        .ndo_open               = hip04_mac_open,
 758        .ndo_stop               = hip04_mac_stop,
 759        .ndo_start_xmit         = hip04_mac_start_xmit,
 760        .ndo_set_mac_address    = hip04_set_mac_address,
 761        .ndo_tx_timeout         = hip04_timeout,
 762        .ndo_validate_addr      = eth_validate_addr,
 763};
 764
 765static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
 766{
 767        struct hip04_priv *priv = netdev_priv(ndev);
 768        int i;
 769
 770        priv->tx_desc = dma_alloc_coherent(d,
 771                                           TX_DESC_NUM * sizeof(struct tx_desc),
 772                                           &priv->tx_desc_dma, GFP_KERNEL);
 773        if (!priv->tx_desc)
 774                return -ENOMEM;
 775
 776        priv->rx_buf_size = RX_BUF_SIZE +
 777                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 778        for (i = 0; i < RX_DESC_NUM; i++) {
 779                priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
 780                if (!priv->rx_buf[i])
 781                        return -ENOMEM;
 782        }
 783
 784        return 0;
 785}
 786
 787static void hip04_free_ring(struct net_device *ndev, struct device *d)
 788{
 789        struct hip04_priv *priv = netdev_priv(ndev);
 790        int i;
 791
 792        for (i = 0; i < RX_DESC_NUM; i++)
 793                if (priv->rx_buf[i])
 794                        skb_free_frag(priv->rx_buf[i]);
 795
 796        for (i = 0; i < TX_DESC_NUM; i++)
 797                if (priv->tx_skb[i])
 798                        dev_kfree_skb_any(priv->tx_skb[i]);
 799
 800        dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
 801                          priv->tx_desc, priv->tx_desc_dma);
 802}
 803
 804static int hip04_mac_probe(struct platform_device *pdev)
 805{
 806        struct device *d = &pdev->dev;
 807        struct device_node *node = d->of_node;
 808        struct of_phandle_args arg;
 809        struct net_device *ndev;
 810        struct hip04_priv *priv;
 811        struct resource *res;
 812        int irq;
 813        int ret;
 814
 815        ndev = alloc_etherdev(sizeof(struct hip04_priv));
 816        if (!ndev)
 817                return -ENOMEM;
 818
 819        priv = netdev_priv(ndev);
 820        priv->ndev = ndev;
 821        platform_set_drvdata(pdev, ndev);
 822        SET_NETDEV_DEV(ndev, &pdev->dev);
 823
 824        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 825        priv->base = devm_ioremap_resource(d, res);
 826        if (IS_ERR(priv->base)) {
 827                ret = PTR_ERR(priv->base);
 828                goto init_fail;
 829        }
 830
 831        ret = of_parse_phandle_with_fixed_args(node, "port-handle", 2, 0, &arg);
 832        if (ret < 0) {
 833                dev_warn(d, "no port-handle\n");
 834                goto init_fail;
 835        }
 836
 837        priv->port = arg.args[0];
 838        priv->chan = arg.args[1] * RX_DESC_NUM;
 839
 840        hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 841
 842        /* BQL will try to keep the TX queue as short as possible, but it can't
 843         * be faster than tx_coalesce_usecs, so we need a fast timeout here,
 844         * but also long enough to gather up enough frames to ensure we don't
 845         * get more interrupts than necessary.
 846         * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
 847         */
 848        priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
 849        priv->tx_coalesce_usecs = 200;
 850        priv->tx_coalesce_timer.function = tx_done;
 851
 852        priv->map = syscon_node_to_regmap(arg.np);
 853        if (IS_ERR(priv->map)) {
 854                dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
 855                ret = PTR_ERR(priv->map);
 856                goto init_fail;
 857        }
 858
 859        priv->phy_mode = of_get_phy_mode(node);
 860        if (priv->phy_mode < 0) {
 861                dev_warn(d, "not find phy-mode\n");
 862                ret = -EINVAL;
 863                goto init_fail;
 864        }
 865
 866        irq = platform_get_irq(pdev, 0);
 867        if (irq <= 0) {
 868                ret = -EINVAL;
 869                goto init_fail;
 870        }
 871
 872        ret = devm_request_irq(d, irq, hip04_mac_interrupt,
 873                               0, pdev->name, ndev);
 874        if (ret) {
 875                netdev_err(ndev, "devm_request_irq failed\n");
 876                goto init_fail;
 877        }
 878
 879        priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
 880        if (priv->phy_node) {
 881                priv->phy = of_phy_connect(ndev, priv->phy_node,
 882                                           &hip04_adjust_link,
 883                                           0, priv->phy_mode);
 884                if (!priv->phy) {
 885                        ret = -EPROBE_DEFER;
 886                        goto init_fail;
 887                }
 888        }
 889
 890        INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
 891
 892        ndev->netdev_ops = &hip04_netdev_ops;
 893        ndev->ethtool_ops = &hip04_ethtool_ops;
 894        ndev->watchdog_timeo = TX_TIMEOUT;
 895        ndev->priv_flags |= IFF_UNICAST_FLT;
 896        ndev->irq = irq;
 897        netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
 898
 899        hip04_reset_ppe(priv);
 900        if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
 901                hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
 902
 903        hip04_config_fifo(priv);
 904        eth_random_addr(ndev->dev_addr);
 905        hip04_update_mac_address(ndev);
 906
 907        ret = hip04_alloc_ring(ndev, d);
 908        if (ret) {
 909                netdev_err(ndev, "alloc ring fail\n");
 910                goto alloc_fail;
 911        }
 912
 913        ret = register_netdev(ndev);
 914        if (ret)
 915                goto alloc_fail;
 916
 917        return 0;
 918
 919alloc_fail:
 920        hip04_free_ring(ndev, d);
 921init_fail:
 922        of_node_put(priv->phy_node);
 923        free_netdev(ndev);
 924        return ret;
 925}
 926
 927static int hip04_remove(struct platform_device *pdev)
 928{
 929        struct net_device *ndev = platform_get_drvdata(pdev);
 930        struct hip04_priv *priv = netdev_priv(ndev);
 931        struct device *d = &pdev->dev;
 932
 933        if (priv->phy)
 934                phy_disconnect(priv->phy);
 935
 936        hip04_free_ring(ndev, d);
 937        unregister_netdev(ndev);
 938        free_irq(ndev->irq, ndev);
 939        of_node_put(priv->phy_node);
 940        cancel_work_sync(&priv->tx_timeout_task);
 941        free_netdev(ndev);
 942
 943        return 0;
 944}
 945
 946static const struct of_device_id hip04_mac_match[] = {
 947        { .compatible = "hisilicon,hip04-mac" },
 948        { }
 949};
 950
 951MODULE_DEVICE_TABLE(of, hip04_mac_match);
 952
 953static struct platform_driver hip04_mac_driver = {
 954        .probe  = hip04_mac_probe,
 955        .remove = hip04_remove,
 956        .driver = {
 957                .name           = DRV_NAME,
 958                .of_match_table = hip04_mac_match,
 959        },
 960};
 961module_platform_driver(hip04_mac_driver);
 962
 963MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
 964MODULE_LICENSE("GPL");
 965