linux/drivers/net/ethernet/socionext/netsec.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/types.h>
   4#include <linux/clk.h>
   5#include <linux/platform_device.h>
   6#include <linux/pm_runtime.h>
   7#include <linux/acpi.h>
   8#include <linux/of_mdio.h>
   9#include <linux/etherdevice.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/netlink.h>
  13#include <linux/bpf.h>
  14#include <linux/bpf_trace.h>
  15
  16#include <net/tcp.h>
  17#include <net/page_pool.h>
  18#include <net/ip6_checksum.h>
  19
  20#define NETSEC_REG_SOFT_RST                     0x104
  21#define NETSEC_REG_COM_INIT                     0x120
  22
  23#define NETSEC_REG_TOP_STATUS                   0x200
  24#define NETSEC_IRQ_RX                           BIT(1)
  25#define NETSEC_IRQ_TX                           BIT(0)
  26
  27#define NETSEC_REG_TOP_INTEN                    0x204
  28#define NETSEC_REG_INTEN_SET                    0x234
  29#define NETSEC_REG_INTEN_CLR                    0x238
  30
  31#define NETSEC_REG_NRM_TX_STATUS                0x400
  32#define NETSEC_REG_NRM_TX_INTEN                 0x404
  33#define NETSEC_REG_NRM_TX_INTEN_SET             0x428
  34#define NETSEC_REG_NRM_TX_INTEN_CLR             0x42c
  35#define NRM_TX_ST_NTOWNR        BIT(17)
  36#define NRM_TX_ST_TR_ERR        BIT(16)
  37#define NRM_TX_ST_TXDONE        BIT(15)
  38#define NRM_TX_ST_TMREXP        BIT(14)
  39
  40#define NETSEC_REG_NRM_RX_STATUS                0x440
  41#define NETSEC_REG_NRM_RX_INTEN                 0x444
  42#define NETSEC_REG_NRM_RX_INTEN_SET             0x468
  43#define NETSEC_REG_NRM_RX_INTEN_CLR             0x46c
  44#define NRM_RX_ST_RC_ERR        BIT(16)
  45#define NRM_RX_ST_PKTCNT        BIT(15)
  46#define NRM_RX_ST_TMREXP        BIT(14)
  47
  48#define NETSEC_REG_PKT_CMD_BUF                  0xd0
  49
  50#define NETSEC_REG_CLK_EN                       0x100
  51
  52#define NETSEC_REG_PKT_CTRL                     0x140
  53
  54#define NETSEC_REG_DMA_TMR_CTRL                 0x20c
  55#define NETSEC_REG_F_TAIKI_MC_VER               0x22c
  56#define NETSEC_REG_F_TAIKI_VER                  0x230
  57#define NETSEC_REG_DMA_HM_CTRL                  0x214
  58#define NETSEC_REG_DMA_MH_CTRL                  0x220
  59#define NETSEC_REG_ADDR_DIS_CORE                0x218
  60#define NETSEC_REG_DMAC_HM_CMD_BUF              0x210
  61#define NETSEC_REG_DMAC_MH_CMD_BUF              0x21c
  62
  63#define NETSEC_REG_NRM_TX_PKTCNT                0x410
  64
  65#define NETSEC_REG_NRM_TX_DONE_PKTCNT           0x414
  66#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT     0x418
  67
  68#define NETSEC_REG_NRM_TX_TMR                   0x41c
  69
  70#define NETSEC_REG_NRM_RX_PKTCNT                0x454
  71#define NETSEC_REG_NRM_RX_RXINT_PKTCNT          0x458
  72#define NETSEC_REG_NRM_TX_TXINT_TMR             0x420
  73#define NETSEC_REG_NRM_RX_RXINT_TMR             0x460
  74
  75#define NETSEC_REG_NRM_RX_TMR                   0x45c
  76
  77#define NETSEC_REG_NRM_TX_DESC_START_UP         0x434
  78#define NETSEC_REG_NRM_TX_DESC_START_LW         0x408
  79#define NETSEC_REG_NRM_RX_DESC_START_UP         0x474
  80#define NETSEC_REG_NRM_RX_DESC_START_LW         0x448
  81
  82#define NETSEC_REG_NRM_TX_CONFIG                0x430
  83#define NETSEC_REG_NRM_RX_CONFIG                0x470
  84
  85#define MAC_REG_STATUS                          0x1024
  86#define MAC_REG_DATA                            0x11c0
  87#define MAC_REG_CMD                             0x11c4
  88#define MAC_REG_FLOW_TH                         0x11cc
  89#define MAC_REG_INTF_SEL                        0x11d4
  90#define MAC_REG_DESC_INIT                       0x11fc
  91#define MAC_REG_DESC_SOFT_RST                   0x1204
  92#define NETSEC_REG_MODE_TRANS_COMP_STATUS       0x500
  93
  94#define GMAC_REG_MCR                            0x0000
  95#define GMAC_REG_MFFR                           0x0004
  96#define GMAC_REG_GAR                            0x0010
  97#define GMAC_REG_GDR                            0x0014
  98#define GMAC_REG_FCR                            0x0018
  99#define GMAC_REG_BMR                            0x1000
 100#define GMAC_REG_RDLAR                          0x100c
 101#define GMAC_REG_TDLAR                          0x1010
 102#define GMAC_REG_OMR                            0x1018
 103
 104#define MHZ(n)          ((n) * 1000 * 1000)
 105
 106#define NETSEC_TX_SHIFT_OWN_FIELD               31
 107#define NETSEC_TX_SHIFT_LD_FIELD                30
 108#define NETSEC_TX_SHIFT_DRID_FIELD              24
 109#define NETSEC_TX_SHIFT_PT_FIELD                21
 110#define NETSEC_TX_SHIFT_TDRID_FIELD             16
 111#define NETSEC_TX_SHIFT_CC_FIELD                15
 112#define NETSEC_TX_SHIFT_FS_FIELD                9
 113#define NETSEC_TX_LAST                          8
 114#define NETSEC_TX_SHIFT_CO                      7
 115#define NETSEC_TX_SHIFT_SO                      6
 116#define NETSEC_TX_SHIFT_TRS_FIELD               4
 117
 118#define NETSEC_RX_PKT_OWN_FIELD                 31
 119#define NETSEC_RX_PKT_LD_FIELD                  30
 120#define NETSEC_RX_PKT_SDRID_FIELD               24
 121#define NETSEC_RX_PKT_FR_FIELD                  23
 122#define NETSEC_RX_PKT_ER_FIELD                  21
 123#define NETSEC_RX_PKT_ERR_FIELD                 16
 124#define NETSEC_RX_PKT_TDRID_FIELD               12
 125#define NETSEC_RX_PKT_FS_FIELD                  9
 126#define NETSEC_RX_PKT_LS_FIELD                  8
 127#define NETSEC_RX_PKT_CO_FIELD                  6
 128
 129#define NETSEC_RX_PKT_ERR_MASK                  3
 130
 131#define NETSEC_MAX_TX_PKT_LEN                   1518
 132#define NETSEC_MAX_TX_JUMBO_PKT_LEN             9018
 133
 134#define NETSEC_RING_GMAC                        15
 135#define NETSEC_RING_MAX                         2
 136
 137#define NETSEC_TCP_SEG_LEN_MAX                  1460
 138#define NETSEC_TCP_JUMBO_SEG_LEN_MAX            8960
 139
 140#define NETSEC_RX_CKSUM_NOTAVAIL                0
 141#define NETSEC_RX_CKSUM_OK                      1
 142#define NETSEC_RX_CKSUM_NG                      2
 143
 144#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END        BIT(20)
 145#define NETSEC_IRQ_TRANSITION_COMPLETE          BIT(4)
 146
 147#define NETSEC_MODE_TRANS_COMP_IRQ_N2T          BIT(20)
 148#define NETSEC_MODE_TRANS_COMP_IRQ_T2N          BIT(19)
 149
 150#define NETSEC_INT_PKTCNT_MAX                   2047
 151
 152#define NETSEC_FLOW_START_TH_MAX                95
 153#define NETSEC_FLOW_STOP_TH_MAX                 95
 154#define NETSEC_FLOW_PAUSE_TIME_MIN              5
 155
 156#define NETSEC_CLK_EN_REG_DOM_ALL               0x3f
 157
 158#define NETSEC_PKT_CTRL_REG_MODE_NRM            BIT(28)
 159#define NETSEC_PKT_CTRL_REG_EN_JUMBO            BIT(27)
 160#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER       BIT(3)
 161#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE   BIT(2)
 162#define NETSEC_PKT_CTRL_REG_LOG_HD_ER           BIT(1)
 163#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH        BIT(0)
 164
 165#define NETSEC_CLK_EN_REG_DOM_G                 BIT(5)
 166#define NETSEC_CLK_EN_REG_DOM_C                 BIT(1)
 167#define NETSEC_CLK_EN_REG_DOM_D                 BIT(0)
 168
 169#define NETSEC_COM_INIT_REG_DB                  BIT(2)
 170#define NETSEC_COM_INIT_REG_CLS                 BIT(1)
 171#define NETSEC_COM_INIT_REG_ALL                 (NETSEC_COM_INIT_REG_CLS | \
 172                                                 NETSEC_COM_INIT_REG_DB)
 173
 174#define NETSEC_SOFT_RST_REG_RESET               0
 175#define NETSEC_SOFT_RST_REG_RUN                 BIT(31)
 176
 177#define NETSEC_DMA_CTRL_REG_STOP                1
 178#define MH_CTRL__MODE_TRANS                     BIT(20)
 179
 180#define NETSEC_GMAC_CMD_ST_READ                 0
 181#define NETSEC_GMAC_CMD_ST_WRITE                BIT(28)
 182#define NETSEC_GMAC_CMD_ST_BUSY                 BIT(31)
 183
 184#define NETSEC_GMAC_BMR_REG_COMMON              0x00412080
 185#define NETSEC_GMAC_BMR_REG_RESET               0x00020181
 186#define NETSEC_GMAC_BMR_REG_SWR                 0x00000001
 187
 188#define NETSEC_GMAC_OMR_REG_ST                  BIT(13)
 189#define NETSEC_GMAC_OMR_REG_SR                  BIT(1)
 190
 191#define NETSEC_GMAC_MCR_REG_IBN                 BIT(30)
 192#define NETSEC_GMAC_MCR_REG_CST                 BIT(25)
 193#define NETSEC_GMAC_MCR_REG_JE                  BIT(20)
 194#define NETSEC_MCR_PS                           BIT(15)
 195#define NETSEC_GMAC_MCR_REG_FES                 BIT(14)
 196#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON  0x0000280c
 197#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON  0x0001a00c
 198
 199#define NETSEC_FCR_RFE                          BIT(2)
 200#define NETSEC_FCR_TFE                          BIT(1)
 201
 202#define NETSEC_GMAC_GAR_REG_GW                  BIT(1)
 203#define NETSEC_GMAC_GAR_REG_GB                  BIT(0)
 204
 205#define NETSEC_GMAC_GAR_REG_SHIFT_PA            11
 206#define NETSEC_GMAC_GAR_REG_SHIFT_GR            6
 207#define GMAC_REG_SHIFT_CR_GAR                   2
 208
 209#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ        2
 210#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ        3
 211#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ       0
 212#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ      1
 213#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ      4
 214#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ      5
 215
 216#define NETSEC_GMAC_RDLAR_REG_COMMON            0x18000
 217#define NETSEC_GMAC_TDLAR_REG_COMMON            0x1c000
 218
 219#define NETSEC_REG_NETSEC_VER_F_TAIKI           0x50000
 220
 221#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP      BIT(31)
 222#define NETSEC_REG_DESC_RING_CONFIG_CH_RST      BIT(30)
 223#define NETSEC_REG_DESC_TMR_MODE                4
 224#define NETSEC_REG_DESC_ENDIAN                  0
 225
 226#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST       1
 227#define NETSEC_MAC_DESC_INIT_REG_INIT           1
 228
 229#define NETSEC_EEPROM_MAC_ADDRESS               0x00
 230#define NETSEC_EEPROM_HM_ME_ADDRESS_H           0x08
 231#define NETSEC_EEPROM_HM_ME_ADDRESS_L           0x0C
 232#define NETSEC_EEPROM_HM_ME_SIZE                0x10
 233#define NETSEC_EEPROM_MH_ME_ADDRESS_H           0x14
 234#define NETSEC_EEPROM_MH_ME_ADDRESS_L           0x18
 235#define NETSEC_EEPROM_MH_ME_SIZE                0x1C
 236#define NETSEC_EEPROM_PKT_ME_ADDRESS            0x20
 237#define NETSEC_EEPROM_PKT_ME_SIZE               0x24
 238
 239#define DESC_NUM        256
 240
 241#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
 242#define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
 243                               NET_IP_ALIGN)
 244#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
 245                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 246
 247#define DESC_SZ sizeof(struct netsec_de)
 248
 249#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)        ((x) & 0xffff0000)
 250
 251#define NETSEC_XDP_PASS          0
 252#define NETSEC_XDP_CONSUMED      BIT(0)
 253#define NETSEC_XDP_TX            BIT(1)
 254#define NETSEC_XDP_REDIR         BIT(2)
 255#define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
 256
 257enum ring_id {
 258        NETSEC_RING_TX = 0,
 259        NETSEC_RING_RX
 260};
 261
 262enum buf_type {
 263        TYPE_NETSEC_SKB = 0,
 264        TYPE_NETSEC_XDP_TX,
 265        TYPE_NETSEC_XDP_NDO,
 266};
 267
 268struct netsec_desc {
 269        union {
 270                struct sk_buff *skb;
 271                struct xdp_frame *xdpf;
 272        };
 273        dma_addr_t dma_addr;
 274        void *addr;
 275        u16 len;
 276        u8 buf_type;
 277};
 278
 279struct netsec_desc_ring {
 280        dma_addr_t desc_dma;
 281        struct netsec_desc *desc;
 282        void *vaddr;
 283        u16 head, tail;
 284        u16 xdp_xmit; /* netsec_xdp_xmit packets */
 285        struct page_pool *page_pool;
 286        struct xdp_rxq_info xdp_rxq;
 287        spinlock_t lock; /* XDP tx queue locking */
 288};
 289
 290struct netsec_priv {
 291        struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
 292        struct ethtool_coalesce et_coalesce;
 293        struct bpf_prog *xdp_prog;
 294        spinlock_t reglock; /* protect reg access */
 295        struct napi_struct napi;
 296        phy_interface_t phy_interface;
 297        struct net_device *ndev;
 298        struct device_node *phy_np;
 299        struct phy_device *phydev;
 300        struct mii_bus *mii_bus;
 301        void __iomem *ioaddr;
 302        void __iomem *eeprom_base;
 303        struct device *dev;
 304        struct clk *clk;
 305        u32 msg_enable;
 306        u32 freq;
 307        u32 phy_addr;
 308        bool rx_cksum_offload_flag;
 309};
 310
 311struct netsec_de { /* Netsec Descriptor layout */
 312        u32 attr;
 313        u32 data_buf_addr_up;
 314        u32 data_buf_addr_lw;
 315        u32 buf_len_info;
 316};
 317
 318struct netsec_tx_pkt_ctrl {
 319        u16 tcp_seg_len;
 320        bool tcp_seg_offload_flag;
 321        bool cksum_offload_flag;
 322};
 323
 324struct netsec_rx_pkt_info {
 325        int rx_cksum_result;
 326        int err_code;
 327        bool err_flag;
 328};
 329
 330static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
 331{
 332        writel(val, priv->ioaddr + reg_addr);
 333}
 334
 335static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
 336{
 337        return readl(priv->ioaddr + reg_addr);
 338}
 339
 340/************* MDIO BUS OPS FOLLOW *************/
 341
 342#define TIMEOUT_SPINS_MAC               1000
 343#define TIMEOUT_SECONDARY_MS_MAC        100
 344
 345static u32 netsec_clk_type(u32 freq)
 346{
 347        if (freq < MHZ(35))
 348                return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
 349        if (freq < MHZ(60))
 350                return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
 351        if (freq < MHZ(100))
 352                return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
 353        if (freq < MHZ(150))
 354                return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
 355        if (freq < MHZ(250))
 356                return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
 357
 358        return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
 359}
 360
 361static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
 362{
 363        u32 timeout = TIMEOUT_SPINS_MAC;
 364
 365        while (--timeout && netsec_read(priv, addr) & mask)
 366                cpu_relax();
 367        if (timeout)
 368                return 0;
 369
 370        timeout = TIMEOUT_SECONDARY_MS_MAC;
 371        while (--timeout && netsec_read(priv, addr) & mask)
 372                usleep_range(1000, 2000);
 373
 374        if (timeout)
 375                return 0;
 376
 377        netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
 378
 379        return -ETIMEDOUT;
 380}
 381
 382static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
 383{
 384        netsec_write(priv, MAC_REG_DATA, value);
 385        netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
 386        return netsec_wait_while_busy(priv,
 387                                      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
 388}
 389
 390static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
 391{
 392        int ret;
 393
 394        netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
 395        ret = netsec_wait_while_busy(priv,
 396                                     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
 397        if (ret)
 398                return ret;
 399
 400        *read = netsec_read(priv, MAC_REG_DATA);
 401
 402        return 0;
 403}
 404
 405static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
 406                                      u32 addr, u32 mask)
 407{
 408        u32 timeout = TIMEOUT_SPINS_MAC;
 409        int ret, data;
 410
 411        do {
 412                ret = netsec_mac_read(priv, addr, &data);
 413                if (ret)
 414                        break;
 415                cpu_relax();
 416        } while (--timeout && (data & mask));
 417
 418        if (timeout)
 419                return 0;
 420
 421        timeout = TIMEOUT_SECONDARY_MS_MAC;
 422        do {
 423                usleep_range(1000, 2000);
 424
 425                ret = netsec_mac_read(priv, addr, &data);
 426                if (ret)
 427                        break;
 428                cpu_relax();
 429        } while (--timeout && (data & mask));
 430
 431        if (timeout && !ret)
 432                return 0;
 433
 434        netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
 435
 436        return -ETIMEDOUT;
 437}
 438
 439static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
 440{
 441        struct phy_device *phydev = priv->ndev->phydev;
 442        u32 value = 0;
 443
 444        value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
 445                                 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
 446
 447        if (phydev->speed != SPEED_1000)
 448                value |= NETSEC_MCR_PS;
 449
 450        if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
 451            phydev->speed == SPEED_100)
 452                value |= NETSEC_GMAC_MCR_REG_FES;
 453
 454        value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
 455
 456        if (phy_interface_mode_is_rgmii(priv->phy_interface))
 457                value |= NETSEC_GMAC_MCR_REG_IBN;
 458
 459        if (netsec_mac_write(priv, GMAC_REG_MCR, value))
 460                return -ETIMEDOUT;
 461
 462        return 0;
 463}
 464
 465static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
 466
 467static int netsec_phy_write(struct mii_bus *bus,
 468                            int phy_addr, int reg, u16 val)
 469{
 470        int status;
 471        struct netsec_priv *priv = bus->priv;
 472
 473        if (netsec_mac_write(priv, GMAC_REG_GDR, val))
 474                return -ETIMEDOUT;
 475        if (netsec_mac_write(priv, GMAC_REG_GAR,
 476                             phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
 477                             reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
 478                             NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
 479                             (netsec_clk_type(priv->freq) <<
 480                              GMAC_REG_SHIFT_CR_GAR)))
 481                return -ETIMEDOUT;
 482
 483        status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
 484                                            NETSEC_GMAC_GAR_REG_GB);
 485
 486        /* Developerbox implements RTL8211E PHY and there is
 487         * a compatibility problem with F_GMAC4.
 488         * RTL8211E expects MDC clock must be kept toggling for several
 489         * clock cycle with MDIO high before entering the IDLE state.
 490         * To meet this requirement, netsec driver needs to issue dummy
 491         * read(e.g. read PHYID1(offset 0x2) register) right after write.
 492         */
 493        netsec_phy_read(bus, phy_addr, MII_PHYSID1);
 494
 495        return status;
 496}
 497
 498static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
 499{
 500        struct netsec_priv *priv = bus->priv;
 501        u32 data;
 502        int ret;
 503
 504        if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
 505                             phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
 506                             reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
 507                             (netsec_clk_type(priv->freq) <<
 508                              GMAC_REG_SHIFT_CR_GAR)))
 509                return -ETIMEDOUT;
 510
 511        ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
 512                                         NETSEC_GMAC_GAR_REG_GB);
 513        if (ret)
 514                return ret;
 515
 516        ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
 517        if (ret)
 518                return ret;
 519
 520        return data;
 521}
 522
 523/************* ETHTOOL_OPS FOLLOW *************/
 524
 525static void netsec_et_get_drvinfo(struct net_device *net_device,
 526                                  struct ethtool_drvinfo *info)
 527{
 528        strlcpy(info->driver, "netsec", sizeof(info->driver));
 529        strlcpy(info->bus_info, dev_name(net_device->dev.parent),
 530                sizeof(info->bus_info));
 531}
 532
 533static int netsec_et_get_coalesce(struct net_device *net_device,
 534                                  struct ethtool_coalesce *et_coalesce)
 535{
 536        struct netsec_priv *priv = netdev_priv(net_device);
 537
 538        *et_coalesce = priv->et_coalesce;
 539
 540        return 0;
 541}
 542
 543static int netsec_et_set_coalesce(struct net_device *net_device,
 544                                  struct ethtool_coalesce *et_coalesce)
 545{
 546        struct netsec_priv *priv = netdev_priv(net_device);
 547
 548        priv->et_coalesce = *et_coalesce;
 549
 550        if (priv->et_coalesce.tx_coalesce_usecs < 50)
 551                priv->et_coalesce.tx_coalesce_usecs = 50;
 552        if (priv->et_coalesce.tx_max_coalesced_frames < 1)
 553                priv->et_coalesce.tx_max_coalesced_frames = 1;
 554
 555        netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
 556                     priv->et_coalesce.tx_max_coalesced_frames);
 557        netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
 558                     priv->et_coalesce.tx_coalesce_usecs);
 559        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
 560        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
 561
 562        if (priv->et_coalesce.rx_coalesce_usecs < 50)
 563                priv->et_coalesce.rx_coalesce_usecs = 50;
 564        if (priv->et_coalesce.rx_max_coalesced_frames < 1)
 565                priv->et_coalesce.rx_max_coalesced_frames = 1;
 566
 567        netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
 568                     priv->et_coalesce.rx_max_coalesced_frames);
 569        netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
 570                     priv->et_coalesce.rx_coalesce_usecs);
 571        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
 572        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
 573
 574        return 0;
 575}
 576
 577static u32 netsec_et_get_msglevel(struct net_device *dev)
 578{
 579        struct netsec_priv *priv = netdev_priv(dev);
 580
 581        return priv->msg_enable;
 582}
 583
 584static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
 585{
 586        struct netsec_priv *priv = netdev_priv(dev);
 587
 588        priv->msg_enable = datum;
 589}
 590
 591static const struct ethtool_ops netsec_ethtool_ops = {
 592        .get_drvinfo            = netsec_et_get_drvinfo,
 593        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
 594        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
 595        .get_link               = ethtool_op_get_link,
 596        .get_coalesce           = netsec_et_get_coalesce,
 597        .set_coalesce           = netsec_et_set_coalesce,
 598        .get_msglevel           = netsec_et_get_msglevel,
 599        .set_msglevel           = netsec_et_set_msglevel,
 600};
 601
 602/************* NETDEV_OPS FOLLOW *************/
 603
 604
 605static void netsec_set_rx_de(struct netsec_priv *priv,
 606                             struct netsec_desc_ring *dring, u16 idx,
 607                             const struct netsec_desc *desc)
 608{
 609        struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
 610        u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
 611                   (1 << NETSEC_RX_PKT_FS_FIELD) |
 612                   (1 << NETSEC_RX_PKT_LS_FIELD);
 613
 614        if (idx == DESC_NUM - 1)
 615                attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
 616
 617        de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
 618        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
 619        de->buf_len_info = desc->len;
 620        de->attr = attr;
 621        dma_wmb();
 622
 623        dring->desc[idx].dma_addr = desc->dma_addr;
 624        dring->desc[idx].addr = desc->addr;
 625        dring->desc[idx].len = desc->len;
 626}
 627
 628static bool netsec_clean_tx_dring(struct netsec_priv *priv)
 629{
 630        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
 631        struct netsec_de *entry;
 632        int tail = dring->tail;
 633        unsigned int bytes;
 634        int cnt = 0;
 635
 636        spin_lock(&dring->lock);
 637
 638        bytes = 0;
 639        entry = dring->vaddr + DESC_SZ * tail;
 640
 641        while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
 642               cnt < DESC_NUM) {
 643                struct netsec_desc *desc;
 644                int eop;
 645
 646                desc = &dring->desc[tail];
 647                eop = (entry->attr >> NETSEC_TX_LAST) & 1;
 648                dma_rmb();
 649
 650                /* if buf_type is either TYPE_NETSEC_SKB or
 651                 * TYPE_NETSEC_XDP_NDO we mapped it
 652                 */
 653                if (desc->buf_type != TYPE_NETSEC_XDP_TX)
 654                        dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
 655                                         DMA_TO_DEVICE);
 656
 657                if (!eop)
 658                        goto next;
 659
 660                if (desc->buf_type == TYPE_NETSEC_SKB) {
 661                        bytes += desc->skb->len;
 662                        dev_kfree_skb(desc->skb);
 663                } else {
 664                        xdp_return_frame(desc->xdpf);
 665                }
 666next:
 667                /* clean up so netsec_uninit_pkt_dring() won't free the skb
 668                 * again
 669                 */
 670                *desc = (struct netsec_desc){};
 671
 672                /* entry->attr is not going to be accessed by the NIC until
 673                 * netsec_set_tx_de() is called. No need for a dma_wmb() here
 674                 */
 675                entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
 676                /* move tail ahead */
 677                dring->tail = (tail + 1) % DESC_NUM;
 678
 679                tail = dring->tail;
 680                entry = dring->vaddr + DESC_SZ * tail;
 681                cnt++;
 682        }
 683
 684        spin_unlock(&dring->lock);
 685
 686        if (!cnt)
 687                return false;
 688
 689        /* reading the register clears the irq */
 690        netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
 691
 692        priv->ndev->stats.tx_packets += cnt;
 693        priv->ndev->stats.tx_bytes += bytes;
 694
 695        netdev_completed_queue(priv->ndev, cnt, bytes);
 696
 697        return true;
 698}
 699
 700static void netsec_process_tx(struct netsec_priv *priv)
 701{
 702        struct net_device *ndev = priv->ndev;
 703        bool cleaned;
 704
 705        cleaned = netsec_clean_tx_dring(priv);
 706
 707        if (cleaned && netif_queue_stopped(ndev)) {
 708                /* Make sure we update the value, anyone stopping the queue
 709                 * after this will read the proper consumer idx
 710                 */
 711                smp_wmb();
 712                netif_wake_queue(ndev);
 713        }
 714}
 715
 716static void *netsec_alloc_rx_data(struct netsec_priv *priv,
 717                                  dma_addr_t *dma_handle, u16 *desc_len)
 718
 719{
 720
 721        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 722        enum dma_data_direction dma_dir;
 723        struct page *page;
 724
 725        page = page_pool_dev_alloc_pages(dring->page_pool);
 726        if (!page)
 727                return NULL;
 728
 729        /* We allocate the same buffer length for XDP and non-XDP cases.
 730         * page_pool API will map the whole page, skip what's needed for
 731         * network payloads and/or XDP
 732         */
 733        *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
 734        /* Make sure the incoming payload fits in the page for XDP and non-XDP
 735         * cases and reserve enough space for headroom + skb_shared_info
 736         */
 737        *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
 738        dma_dir = page_pool_get_dma_dir(dring->page_pool);
 739        dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
 740
 741        return page_address(page);
 742}
 743
 744static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
 745{
 746        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 747        u16 idx = from;
 748
 749        while (num) {
 750                netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
 751                idx++;
 752                if (idx >= DESC_NUM)
 753                        idx = 0;
 754                num--;
 755        }
 756}
 757
 758static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
 759{
 760        if (likely(pkts))
 761                netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
 762}
 763
 764static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
 765                                   u16 pkts)
 766{
 767        if (xdp_res & NETSEC_XDP_REDIR)
 768                xdp_do_flush_map();
 769
 770        if (xdp_res & NETSEC_XDP_TX)
 771                netsec_xdp_ring_tx_db(priv, pkts);
 772}
 773
 774static void netsec_set_tx_de(struct netsec_priv *priv,
 775                             struct netsec_desc_ring *dring,
 776                             const struct netsec_tx_pkt_ctrl *tx_ctrl,
 777                             const struct netsec_desc *desc, void *buf)
 778{
 779        int idx = dring->head;
 780        struct netsec_de *de;
 781        u32 attr;
 782
 783        de = dring->vaddr + (DESC_SZ * idx);
 784
 785        attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
 786               (1 << NETSEC_TX_SHIFT_PT_FIELD) |
 787               (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
 788               (1 << NETSEC_TX_SHIFT_FS_FIELD) |
 789               (1 << NETSEC_TX_LAST) |
 790               (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
 791               (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
 792               (1 << NETSEC_TX_SHIFT_TRS_FIELD);
 793        if (idx == DESC_NUM - 1)
 794                attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
 795
 796        de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
 797        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
 798        de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
 799        de->attr = attr;
 800
 801        dring->desc[idx] = *desc;
 802        if (desc->buf_type == TYPE_NETSEC_SKB)
 803                dring->desc[idx].skb = buf;
 804        else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
 805                 desc->buf_type == TYPE_NETSEC_XDP_NDO)
 806                dring->desc[idx].xdpf = buf;
 807
 808        /* move head ahead */
 809        dring->head = (dring->head + 1) % DESC_NUM;
 810}
 811
 812/* The current driver only supports 1 Txq, this should run under spin_lock() */
 813static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
 814                                struct xdp_frame *xdpf, bool is_ndo)
 815
 816{
 817        struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
 818        struct page *page = virt_to_page(xdpf->data);
 819        struct netsec_tx_pkt_ctrl tx_ctrl = {};
 820        struct netsec_desc tx_desc;
 821        dma_addr_t dma_handle;
 822        u16 filled;
 823
 824        if (tx_ring->head >= tx_ring->tail)
 825                filled = tx_ring->head - tx_ring->tail;
 826        else
 827                filled = tx_ring->head + DESC_NUM - tx_ring->tail;
 828
 829        if (DESC_NUM - filled <= 1)
 830                return NETSEC_XDP_CONSUMED;
 831
 832        if (is_ndo) {
 833                /* this is for ndo_xdp_xmit, the buffer needs mapping before
 834                 * sending
 835                 */
 836                dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
 837                                            DMA_TO_DEVICE);
 838                if (dma_mapping_error(priv->dev, dma_handle))
 839                        return NETSEC_XDP_CONSUMED;
 840                tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
 841        } else {
 842                /* This is the device Rx buffer from page_pool. No need to remap
 843                 * just sync and send it
 844                 */
 845                struct netsec_desc_ring *rx_ring =
 846                        &priv->desc_ring[NETSEC_RING_RX];
 847                enum dma_data_direction dma_dir =
 848                        page_pool_get_dma_dir(rx_ring->page_pool);
 849
 850                dma_handle = page_pool_get_dma_addr(page) +
 851                        NETSEC_RXBUF_HEADROOM;
 852                dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
 853                                           dma_dir);
 854                tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
 855        }
 856
 857        tx_desc.dma_addr = dma_handle;
 858        tx_desc.addr = xdpf->data;
 859        tx_desc.len = xdpf->len;
 860
 861        netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
 862
 863        return NETSEC_XDP_TX;
 864}
 865
 866static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
 867{
 868        struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
 869        struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
 870        u32 ret;
 871
 872        if (unlikely(!xdpf))
 873                return NETSEC_XDP_CONSUMED;
 874
 875        spin_lock(&tx_ring->lock);
 876        ret = netsec_xdp_queue_one(priv, xdpf, false);
 877        spin_unlock(&tx_ring->lock);
 878
 879        return ret;
 880}
 881
 882static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
 883                          struct xdp_buff *xdp)
 884{
 885        u32 ret = NETSEC_XDP_PASS;
 886        int err;
 887        u32 act;
 888
 889        act = bpf_prog_run_xdp(prog, xdp);
 890
 891        switch (act) {
 892        case XDP_PASS:
 893                ret = NETSEC_XDP_PASS;
 894                break;
 895        case XDP_TX:
 896                ret = netsec_xdp_xmit_back(priv, xdp);
 897                if (ret != NETSEC_XDP_TX)
 898                        xdp_return_buff(xdp);
 899                break;
 900        case XDP_REDIRECT:
 901                err = xdp_do_redirect(priv->ndev, xdp, prog);
 902                if (!err) {
 903                        ret = NETSEC_XDP_REDIR;
 904                } else {
 905                        ret = NETSEC_XDP_CONSUMED;
 906                        xdp_return_buff(xdp);
 907                }
 908                break;
 909        default:
 910                bpf_warn_invalid_xdp_action(act);
 911                /* fall through */
 912        case XDP_ABORTED:
 913                trace_xdp_exception(priv->ndev, prog, act);
 914                /* fall through -- handle aborts by dropping packet */
 915        case XDP_DROP:
 916                ret = NETSEC_XDP_CONSUMED;
 917                xdp_return_buff(xdp);
 918                break;
 919        }
 920
 921        return ret;
 922}
 923
 924static int netsec_process_rx(struct netsec_priv *priv, int budget)
 925{
 926        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 927        struct net_device *ndev = priv->ndev;
 928        struct netsec_rx_pkt_info rx_info;
 929        enum dma_data_direction dma_dir;
 930        struct bpf_prog *xdp_prog;
 931        struct sk_buff *skb = NULL;
 932        u16 xdp_xmit = 0;
 933        u32 xdp_act = 0;
 934        int done = 0;
 935
 936        rcu_read_lock();
 937        xdp_prog = READ_ONCE(priv->xdp_prog);
 938        dma_dir = page_pool_get_dma_dir(dring->page_pool);
 939
 940        while (done < budget) {
 941                u16 idx = dring->tail;
 942                struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
 943                struct netsec_desc *desc = &dring->desc[idx];
 944                struct page *page = virt_to_page(desc->addr);
 945                u32 xdp_result = XDP_PASS;
 946                u16 pkt_len, desc_len;
 947                dma_addr_t dma_handle;
 948                struct xdp_buff xdp;
 949                void *buf_addr;
 950
 951                if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
 952                        /* reading the register clears the irq */
 953                        netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
 954                        break;
 955                }
 956
 957                /* This  barrier is needed to keep us from reading
 958                 * any other fields out of the netsec_de until we have
 959                 * verified the descriptor has been written back
 960                 */
 961                dma_rmb();
 962                done++;
 963
 964                pkt_len = de->buf_len_info >> 16;
 965                rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
 966                        NETSEC_RX_PKT_ERR_MASK;
 967                rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
 968                if (rx_info.err_flag) {
 969                        netif_err(priv, drv, priv->ndev,
 970                                  "%s: rx fail err(%d)\n", __func__,
 971                                  rx_info.err_code);
 972                        ndev->stats.rx_dropped++;
 973                        dring->tail = (dring->tail + 1) % DESC_NUM;
 974                        /* reuse buffer page frag */
 975                        netsec_rx_fill(priv, idx, 1);
 976                        continue;
 977                }
 978                rx_info.rx_cksum_result =
 979                        (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
 980
 981                /* allocate a fresh buffer and map it to the hardware.
 982                 * This will eventually replace the old buffer in the hardware
 983                 */
 984                buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
 985
 986                if (unlikely(!buf_addr))
 987                        break;
 988
 989                dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
 990                                        dma_dir);
 991                prefetch(desc->addr);
 992
 993                xdp.data_hard_start = desc->addr;
 994                xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
 995                xdp_set_data_meta_invalid(&xdp);
 996                xdp.data_end = xdp.data + pkt_len;
 997                xdp.rxq = &dring->xdp_rxq;
 998
 999                if (xdp_prog) {
1000                        xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
1001                        if (xdp_result != NETSEC_XDP_PASS) {
1002                                xdp_act |= xdp_result;
1003                                if (xdp_result == NETSEC_XDP_TX)
1004                                        xdp_xmit++;
1005                                goto next;
1006                        }
1007                }
1008                skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
1009
1010                if (unlikely(!skb)) {
1011                        /* If skb fails recycle_direct will either unmap and
1012                         * free the page or refill the cache depending on the
1013                         * cache state. Since we paid the allocation cost if
1014                         * building an skb fails try to put the page into cache
1015                         */
1016                        page_pool_recycle_direct(dring->page_pool, page);
1017                        netif_err(priv, drv, priv->ndev,
1018                                  "rx failed to build skb\n");
1019                        break;
1020                }
1021                page_pool_release_page(dring->page_pool, page);
1022
1023                skb_reserve(skb, xdp.data - xdp.data_hard_start);
1024                skb_put(skb, xdp.data_end - xdp.data);
1025                skb->protocol = eth_type_trans(skb, priv->ndev);
1026
1027                if (priv->rx_cksum_offload_flag &&
1028                    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
1029                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1030
1031next:
1032                if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
1033                    xdp_result & NETSEC_XDP_RX_OK) {
1034                        ndev->stats.rx_packets++;
1035                        ndev->stats.rx_bytes += xdp.data_end - xdp.data;
1036                }
1037
1038                /* Update the descriptor with fresh buffers */
1039                desc->len = desc_len;
1040                desc->dma_addr = dma_handle;
1041                desc->addr = buf_addr;
1042
1043                netsec_rx_fill(priv, idx, 1);
1044                dring->tail = (dring->tail + 1) % DESC_NUM;
1045        }
1046        netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
1047
1048        rcu_read_unlock();
1049
1050        return done;
1051}
1052
1053static int netsec_napi_poll(struct napi_struct *napi, int budget)
1054{
1055        struct netsec_priv *priv;
1056        int done;
1057
1058        priv = container_of(napi, struct netsec_priv, napi);
1059
1060        netsec_process_tx(priv);
1061        done = netsec_process_rx(priv, budget);
1062
1063        if (done < budget && napi_complete_done(napi, done)) {
1064                unsigned long flags;
1065
1066                spin_lock_irqsave(&priv->reglock, flags);
1067                netsec_write(priv, NETSEC_REG_INTEN_SET,
1068                             NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1069                spin_unlock_irqrestore(&priv->reglock, flags);
1070        }
1071
1072        return done;
1073}
1074
1075
1076static int netsec_desc_used(struct netsec_desc_ring *dring)
1077{
1078        int used;
1079
1080        if (dring->head >= dring->tail)
1081                used = dring->head - dring->tail;
1082        else
1083                used = dring->head + DESC_NUM - dring->tail;
1084
1085        return used;
1086}
1087
1088static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
1089{
1090        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1091
1092        /* keep tail from touching the queue */
1093        if (DESC_NUM - used < 2) {
1094                netif_stop_queue(priv->ndev);
1095
1096                /* Make sure we read the updated value in case
1097                 * descriptors got freed
1098                 */
1099                smp_rmb();
1100
1101                used = netsec_desc_used(dring);
1102                if (DESC_NUM - used < 2)
1103                        return NETDEV_TX_BUSY;
1104
1105                netif_wake_queue(priv->ndev);
1106        }
1107
1108        return 0;
1109}
1110
1111static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
1112                                            struct net_device *ndev)
1113{
1114        struct netsec_priv *priv = netdev_priv(ndev);
1115        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1116        struct netsec_tx_pkt_ctrl tx_ctrl = {};
1117        struct netsec_desc tx_desc;
1118        u16 tso_seg_len = 0;
1119        int filled;
1120
1121        spin_lock_bh(&dring->lock);
1122        filled = netsec_desc_used(dring);
1123        if (netsec_check_stop_tx(priv, filled)) {
1124                spin_unlock_bh(&dring->lock);
1125                net_warn_ratelimited("%s %s Tx queue full\n",
1126                                     dev_name(priv->dev), ndev->name);
1127                return NETDEV_TX_BUSY;
1128        }
1129
1130        if (skb->ip_summed == CHECKSUM_PARTIAL)
1131                tx_ctrl.cksum_offload_flag = true;
1132
1133        if (skb_is_gso(skb))
1134                tso_seg_len = skb_shinfo(skb)->gso_size;
1135
1136        if (tso_seg_len > 0) {
1137                if (skb->protocol == htons(ETH_P_IP)) {
1138                        ip_hdr(skb)->tot_len = 0;
1139                        tcp_hdr(skb)->check =
1140                                ~tcp_v4_check(0, ip_hdr(skb)->saddr,
1141                                              ip_hdr(skb)->daddr, 0);
1142                } else {
1143                        ipv6_hdr(skb)->payload_len = 0;
1144                        tcp_hdr(skb)->check =
1145                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1146                                                 &ipv6_hdr(skb)->daddr,
1147                                                 0, IPPROTO_TCP, 0);
1148                }
1149
1150                tx_ctrl.tcp_seg_offload_flag = true;
1151                tx_ctrl.tcp_seg_len = tso_seg_len;
1152        }
1153
1154        tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
1155                                          skb_headlen(skb), DMA_TO_DEVICE);
1156        if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
1157                spin_unlock_bh(&dring->lock);
1158                netif_err(priv, drv, priv->ndev,
1159                          "%s: DMA mapping failed\n", __func__);
1160                ndev->stats.tx_dropped++;
1161                dev_kfree_skb_any(skb);
1162                return NETDEV_TX_OK;
1163        }
1164        tx_desc.addr = skb->data;
1165        tx_desc.len = skb_headlen(skb);
1166        tx_desc.buf_type = TYPE_NETSEC_SKB;
1167
1168        skb_tx_timestamp(skb);
1169        netdev_sent_queue(priv->ndev, skb->len);
1170
1171        netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
1172        spin_unlock_bh(&dring->lock);
1173        netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
1174
1175        return NETDEV_TX_OK;
1176}
1177
1178static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
1179{
1180        struct netsec_desc_ring *dring = &priv->desc_ring[id];
1181        struct netsec_desc *desc;
1182        u16 idx;
1183
1184        if (!dring->vaddr || !dring->desc)
1185                return;
1186        for (idx = 0; idx < DESC_NUM; idx++) {
1187                desc = &dring->desc[idx];
1188                if (!desc->addr)
1189                        continue;
1190
1191                if (id == NETSEC_RING_RX) {
1192                        struct page *page = virt_to_page(desc->addr);
1193
1194                        page_pool_put_page(dring->page_pool, page, false);
1195                } else if (id == NETSEC_RING_TX) {
1196                        dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
1197                                         DMA_TO_DEVICE);
1198                        dev_kfree_skb(desc->skb);
1199                }
1200        }
1201
1202        /* Rx is currently using page_pool */
1203        if (id == NETSEC_RING_RX) {
1204                if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
1205                        xdp_rxq_info_unreg(&dring->xdp_rxq);
1206                page_pool_destroy(dring->page_pool);
1207        }
1208
1209        memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
1210        memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
1211
1212        dring->head = 0;
1213        dring->tail = 0;
1214
1215        if (id == NETSEC_RING_TX)
1216                netdev_reset_queue(priv->ndev);
1217}
1218
1219static void netsec_free_dring(struct netsec_priv *priv, int id)
1220{
1221        struct netsec_desc_ring *dring = &priv->desc_ring[id];
1222
1223        if (dring->vaddr) {
1224                dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
1225                                  dring->vaddr, dring->desc_dma);
1226                dring->vaddr = NULL;
1227        }
1228
1229        kfree(dring->desc);
1230        dring->desc = NULL;
1231}
1232
1233static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1234{
1235        struct netsec_desc_ring *dring = &priv->desc_ring[id];
1236
1237        dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1238                                          &dring->desc_dma, GFP_KERNEL);
1239        if (!dring->vaddr)
1240                goto err;
1241
1242        dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
1243        if (!dring->desc)
1244                goto err;
1245
1246        return 0;
1247err:
1248        netsec_free_dring(priv, id);
1249
1250        return -ENOMEM;
1251}
1252
1253static void netsec_setup_tx_dring(struct netsec_priv *priv)
1254{
1255        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1256        int i;
1257
1258        for (i = 0; i < DESC_NUM; i++) {
1259                struct netsec_de *de;
1260
1261                de = dring->vaddr + (DESC_SZ * i);
1262                /* de->attr is not going to be accessed by the NIC
1263                 * until netsec_set_tx_de() is called.
1264                 * No need for a dma_wmb() here
1265                 */
1266                de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
1267        }
1268}
1269
1270static int netsec_setup_rx_dring(struct netsec_priv *priv)
1271{
1272        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
1273        struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1274        struct page_pool_params pp_params = { 0 };
1275        int i, err;
1276
1277        pp_params.order = 0;
1278        /* internal DMA mapping in page_pool */
1279        pp_params.flags = PP_FLAG_DMA_MAP;
1280        pp_params.pool_size = DESC_NUM;
1281        pp_params.nid = cpu_to_node(0);
1282        pp_params.dev = priv->dev;
1283        pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1284
1285        dring->page_pool = page_pool_create(&pp_params);
1286        if (IS_ERR(dring->page_pool)) {
1287                err = PTR_ERR(dring->page_pool);
1288                dring->page_pool = NULL;
1289                goto err_out;
1290        }
1291
1292        err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
1293        if (err)
1294                goto err_out;
1295
1296        err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
1297                                         dring->page_pool);
1298        if (err)
1299                goto err_out;
1300
1301        for (i = 0; i < DESC_NUM; i++) {
1302                struct netsec_desc *desc = &dring->desc[i];
1303                dma_addr_t dma_handle;
1304                void *buf;
1305                u16 len;
1306
1307                buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1308
1309                if (!buf) {
1310                        err = -ENOMEM;
1311                        goto err_out;
1312                }
1313                desc->dma_addr = dma_handle;
1314                desc->addr = buf;
1315                desc->len = len;
1316        }
1317
1318        netsec_rx_fill(priv, 0, DESC_NUM);
1319
1320        return 0;
1321
1322err_out:
1323        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1324        return err;
1325}
1326
1327static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1328                                           u32 addr_h, u32 addr_l, u32 size)
1329{
1330        u64 base = (u64)addr_h << 32 | addr_l;
1331        void __iomem *ucode;
1332        u32 i;
1333
1334        ucode = ioremap(base, size * sizeof(u32));
1335        if (!ucode)
1336                return -ENOMEM;
1337
1338        for (i = 0; i < size; i++)
1339                netsec_write(priv, reg, readl(ucode + i * 4));
1340
1341        iounmap(ucode);
1342        return 0;
1343}
1344
1345static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1346{
1347        u32 addr_h, addr_l, size;
1348        int err;
1349
1350        addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1351        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1352        size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1353        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1354                                              addr_h, addr_l, size);
1355        if (err)
1356                return err;
1357
1358        addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1359        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1360        size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1361        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1362                                              addr_h, addr_l, size);
1363        if (err)
1364                return err;
1365
1366        addr_h = 0;
1367        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1368        size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1369        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1370                                              addr_h, addr_l, size);
1371        if (err)
1372                return err;
1373
1374        return 0;
1375}
1376
1377static int netsec_reset_hardware(struct netsec_priv *priv,
1378                                 bool load_ucode)
1379{
1380        u32 value;
1381        int err;
1382
1383        /* stop DMA engines */
1384        if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1385                netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1386                             NETSEC_DMA_CTRL_REG_STOP);
1387                netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1388                             NETSEC_DMA_CTRL_REG_STOP);
1389
1390                while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1391                       NETSEC_DMA_CTRL_REG_STOP)
1392                        cpu_relax();
1393
1394                while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1395                       NETSEC_DMA_CTRL_REG_STOP)
1396                        cpu_relax();
1397        }
1398
1399        netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1400        netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1401        netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1402
1403        while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1404                cpu_relax();
1405
1406        /* set desc_start addr */
1407        netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1408                     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1409        netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1410                     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1411
1412        netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1413                     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1414        netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1415                     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1416
1417        /* set normal tx dring ring config */
1418        netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1419                     1 << NETSEC_REG_DESC_ENDIAN);
1420        netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1421                     1 << NETSEC_REG_DESC_ENDIAN);
1422
1423        if (load_ucode) {
1424                err = netsec_netdev_load_microcode(priv);
1425                if (err) {
1426                        netif_err(priv, probe, priv->ndev,
1427                                  "%s: failed to load microcode (%d)\n",
1428                                  __func__, err);
1429                        return err;
1430                }
1431        }
1432
1433        /* start DMA engines */
1434        netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
1435        netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
1436
1437        usleep_range(1000, 2000);
1438
1439        if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1440              NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1441                netif_err(priv, probe, priv->ndev,
1442                          "microengine start failed\n");
1443                return -ENXIO;
1444        }
1445        netsec_write(priv, NETSEC_REG_TOP_STATUS,
1446                     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1447
1448        value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1449        if (priv->ndev->mtu > ETH_DATA_LEN)
1450                value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1451
1452        /* change to normal mode */
1453        netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1454        netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
1455
1456        while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1457                NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1458                cpu_relax();
1459
1460        /* clear any pending EMPTY/ERR irq status */
1461        netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
1462
1463        /* Disable TX & RX intr */
1464        netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1465
1466        return 0;
1467}
1468
1469static int netsec_start_gmac(struct netsec_priv *priv)
1470{
1471        struct phy_device *phydev = priv->ndev->phydev;
1472        u32 value = 0;
1473        int ret;
1474
1475        if (phydev->speed != SPEED_1000)
1476                value = (NETSEC_GMAC_MCR_REG_CST |
1477                         NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1478
1479        if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1480                return -ETIMEDOUT;
1481        if (netsec_mac_write(priv, GMAC_REG_BMR,
1482                             NETSEC_GMAC_BMR_REG_RESET))
1483                return -ETIMEDOUT;
1484
1485        /* Wait soft reset */
1486        usleep_range(1000, 5000);
1487
1488        ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
1489        if (ret)
1490                return ret;
1491        if (value & NETSEC_GMAC_BMR_REG_SWR)
1492                return -EAGAIN;
1493
1494        netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
1495        if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
1496                return -ETIMEDOUT;
1497
1498        netsec_write(priv, MAC_REG_DESC_INIT, 1);
1499        if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
1500                return -ETIMEDOUT;
1501
1502        if (netsec_mac_write(priv, GMAC_REG_BMR,
1503                             NETSEC_GMAC_BMR_REG_COMMON))
1504                return -ETIMEDOUT;
1505        if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1506                             NETSEC_GMAC_RDLAR_REG_COMMON))
1507                return -ETIMEDOUT;
1508        if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1509                             NETSEC_GMAC_TDLAR_REG_COMMON))
1510                return -ETIMEDOUT;
1511        if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
1512                return -ETIMEDOUT;
1513
1514        ret = netsec_mac_update_to_phy_state(priv);
1515        if (ret)
1516                return ret;
1517
1518        ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1519        if (ret)
1520                return ret;
1521
1522        value |= NETSEC_GMAC_OMR_REG_SR;
1523        value |= NETSEC_GMAC_OMR_REG_ST;
1524
1525        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1526        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1527
1528        netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
1529
1530        if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1531                return -ETIMEDOUT;
1532
1533        return 0;
1534}
1535
1536static int netsec_stop_gmac(struct netsec_priv *priv)
1537{
1538        u32 value;
1539        int ret;
1540
1541        ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1542        if (ret)
1543                return ret;
1544        value &= ~NETSEC_GMAC_OMR_REG_SR;
1545        value &= ~NETSEC_GMAC_OMR_REG_ST;
1546
1547        /* disable all interrupts */
1548        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1549        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1550
1551        return netsec_mac_write(priv, GMAC_REG_OMR, value);
1552}
1553
1554static void netsec_phy_adjust_link(struct net_device *ndev)
1555{
1556        struct netsec_priv *priv = netdev_priv(ndev);
1557
1558        if (ndev->phydev->link)
1559                netsec_start_gmac(priv);
1560        else
1561                netsec_stop_gmac(priv);
1562
1563        phy_print_status(ndev->phydev);
1564}
1565
1566static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1567{
1568        struct netsec_priv *priv = dev_id;
1569        u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1570        unsigned long flags;
1571
1572        /* Disable interrupts */
1573        if (status & NETSEC_IRQ_TX) {
1574                val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1575                netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1576        }
1577        if (status & NETSEC_IRQ_RX) {
1578                val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1579                netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1580        }
1581
1582        spin_lock_irqsave(&priv->reglock, flags);
1583        netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1584        spin_unlock_irqrestore(&priv->reglock, flags);
1585
1586        napi_schedule(&priv->napi);
1587
1588        return IRQ_HANDLED;
1589}
1590
1591static int netsec_netdev_open(struct net_device *ndev)
1592{
1593        struct netsec_priv *priv = netdev_priv(ndev);
1594        int ret;
1595
1596        pm_runtime_get_sync(priv->dev);
1597
1598        netsec_setup_tx_dring(priv);
1599        ret = netsec_setup_rx_dring(priv);
1600        if (ret) {
1601                netif_err(priv, probe, priv->ndev,
1602                          "%s: fail setup ring\n", __func__);
1603                goto err1;
1604        }
1605
1606        ret = request_irq(priv->ndev->irq, netsec_irq_handler,
1607                          IRQF_SHARED, "netsec", priv);
1608        if (ret) {
1609                netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1610                goto err2;
1611        }
1612
1613        if (dev_of_node(priv->dev)) {
1614                if (!of_phy_connect(priv->ndev, priv->phy_np,
1615                                    netsec_phy_adjust_link, 0,
1616                                    priv->phy_interface)) {
1617                        netif_err(priv, link, priv->ndev, "missing PHY\n");
1618                        ret = -ENODEV;
1619                        goto err3;
1620                }
1621        } else {
1622                ret = phy_connect_direct(priv->ndev, priv->phydev,
1623                                         netsec_phy_adjust_link,
1624                                         priv->phy_interface);
1625                if (ret) {
1626                        netif_err(priv, link, priv->ndev,
1627                                  "phy_connect_direct() failed (%d)\n", ret);
1628                        goto err3;
1629                }
1630        }
1631
1632        phy_start(ndev->phydev);
1633
1634        netsec_start_gmac(priv);
1635        napi_enable(&priv->napi);
1636        netif_start_queue(ndev);
1637
1638        /* Enable TX+RX intr. */
1639        netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1640
1641        return 0;
1642err3:
1643        free_irq(priv->ndev->irq, priv);
1644err2:
1645        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1646err1:
1647        pm_runtime_put_sync(priv->dev);
1648        return ret;
1649}
1650
1651static int netsec_netdev_stop(struct net_device *ndev)
1652{
1653        int ret;
1654        struct netsec_priv *priv = netdev_priv(ndev);
1655
1656        netif_stop_queue(priv->ndev);
1657        dma_wmb();
1658
1659        napi_disable(&priv->napi);
1660
1661        netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1662        netsec_stop_gmac(priv);
1663
1664        free_irq(priv->ndev->irq, priv);
1665
1666        netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
1667        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1668
1669        phy_stop(ndev->phydev);
1670        phy_disconnect(ndev->phydev);
1671
1672        ret = netsec_reset_hardware(priv, false);
1673
1674        pm_runtime_put_sync(priv->dev);
1675
1676        return ret;
1677}
1678
1679static int netsec_netdev_init(struct net_device *ndev)
1680{
1681        struct netsec_priv *priv = netdev_priv(ndev);
1682        int ret;
1683        u16 data;
1684
1685        BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
1686
1687        ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
1688        if (ret)
1689                return ret;
1690
1691        ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
1692        if (ret)
1693                goto err1;
1694
1695        /* set phy power down */
1696        data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
1697                BMCR_PDOWN;
1698        netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1699
1700        ret = netsec_reset_hardware(priv, true);
1701        if (ret)
1702                goto err2;
1703
1704        spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
1705        spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
1706
1707        return 0;
1708err2:
1709        netsec_free_dring(priv, NETSEC_RING_RX);
1710err1:
1711        netsec_free_dring(priv, NETSEC_RING_TX);
1712        return ret;
1713}
1714
1715static void netsec_netdev_uninit(struct net_device *ndev)
1716{
1717        struct netsec_priv *priv = netdev_priv(ndev);
1718
1719        netsec_free_dring(priv, NETSEC_RING_RX);
1720        netsec_free_dring(priv, NETSEC_RING_TX);
1721}
1722
1723static int netsec_netdev_set_features(struct net_device *ndev,
1724                                      netdev_features_t features)
1725{
1726        struct netsec_priv *priv = netdev_priv(ndev);
1727
1728        priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1729
1730        return 0;
1731}
1732
1733static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
1734                               int cmd)
1735{
1736        return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1737}
1738
1739static int netsec_xdp_xmit(struct net_device *ndev, int n,
1740                           struct xdp_frame **frames, u32 flags)
1741{
1742        struct netsec_priv *priv = netdev_priv(ndev);
1743        struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
1744        int drops = 0;
1745        int i;
1746
1747        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1748                return -EINVAL;
1749
1750        spin_lock(&tx_ring->lock);
1751        for (i = 0; i < n; i++) {
1752                struct xdp_frame *xdpf = frames[i];
1753                int err;
1754
1755                err = netsec_xdp_queue_one(priv, xdpf, true);
1756                if (err != NETSEC_XDP_TX) {
1757                        xdp_return_frame_rx_napi(xdpf);
1758                        drops++;
1759                } else {
1760                        tx_ring->xdp_xmit++;
1761                }
1762        }
1763        spin_unlock(&tx_ring->lock);
1764
1765        if (unlikely(flags & XDP_XMIT_FLUSH)) {
1766                netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
1767                tx_ring->xdp_xmit = 0;
1768        }
1769
1770        return n - drops;
1771}
1772
1773static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
1774                            struct netlink_ext_ack *extack)
1775{
1776        struct net_device *dev = priv->ndev;
1777        struct bpf_prog *old_prog;
1778
1779        /* For now just support only the usual MTU sized frames */
1780        if (prog && dev->mtu > 1500) {
1781                NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
1782                return -EOPNOTSUPP;
1783        }
1784
1785        if (netif_running(dev))
1786                netsec_netdev_stop(dev);
1787
1788        /* Detach old prog, if any */
1789        old_prog = xchg(&priv->xdp_prog, prog);
1790        if (old_prog)
1791                bpf_prog_put(old_prog);
1792
1793        if (netif_running(dev))
1794                netsec_netdev_open(dev);
1795
1796        return 0;
1797}
1798
1799static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
1800{
1801        struct netsec_priv *priv = netdev_priv(ndev);
1802
1803        switch (xdp->command) {
1804        case XDP_SETUP_PROG:
1805                return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
1806        case XDP_QUERY_PROG:
1807                xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1808                return 0;
1809        default:
1810                return -EINVAL;
1811        }
1812}
1813
1814static const struct net_device_ops netsec_netdev_ops = {
1815        .ndo_init               = netsec_netdev_init,
1816        .ndo_uninit             = netsec_netdev_uninit,
1817        .ndo_open               = netsec_netdev_open,
1818        .ndo_stop               = netsec_netdev_stop,
1819        .ndo_start_xmit         = netsec_netdev_start_xmit,
1820        .ndo_set_features       = netsec_netdev_set_features,
1821        .ndo_set_mac_address    = eth_mac_addr,
1822        .ndo_validate_addr      = eth_validate_addr,
1823        .ndo_do_ioctl           = netsec_netdev_ioctl,
1824        .ndo_xdp_xmit           = netsec_xdp_xmit,
1825        .ndo_bpf                = netsec_xdp,
1826};
1827
1828static int netsec_of_probe(struct platform_device *pdev,
1829                           struct netsec_priv *priv, u32 *phy_addr)
1830{
1831        priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1832        if (!priv->phy_np) {
1833                dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1834                return -EINVAL;
1835        }
1836
1837        *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
1838
1839        priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
1840        if (IS_ERR(priv->clk)) {
1841                dev_err(&pdev->dev, "phy_ref_clk not found\n");
1842                return PTR_ERR(priv->clk);
1843        }
1844        priv->freq = clk_get_rate(priv->clk);
1845
1846        return 0;
1847}
1848
1849static int netsec_acpi_probe(struct platform_device *pdev,
1850                             struct netsec_priv *priv, u32 *phy_addr)
1851{
1852        int ret;
1853
1854        if (!IS_ENABLED(CONFIG_ACPI))
1855                return -ENODEV;
1856
1857        ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
1858        if (ret) {
1859                dev_err(&pdev->dev,
1860                        "missing required property 'phy-channel'\n");
1861                return ret;
1862        }
1863
1864        ret = device_property_read_u32(&pdev->dev,
1865                                       "socionext,phy-clock-frequency",
1866                                       &priv->freq);
1867        if (ret)
1868                dev_err(&pdev->dev,
1869                        "missing required property 'socionext,phy-clock-frequency'\n");
1870        return ret;
1871}
1872
1873static void netsec_unregister_mdio(struct netsec_priv *priv)
1874{
1875        struct phy_device *phydev = priv->phydev;
1876
1877        if (!dev_of_node(priv->dev) && phydev) {
1878                phy_device_remove(phydev);
1879                phy_device_free(phydev);
1880        }
1881
1882        mdiobus_unregister(priv->mii_bus);
1883}
1884
1885static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1886{
1887        struct mii_bus *bus;
1888        int ret;
1889
1890        bus = devm_mdiobus_alloc(priv->dev);
1891        if (!bus)
1892                return -ENOMEM;
1893
1894        snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
1895        bus->priv = priv;
1896        bus->name = "SNI NETSEC MDIO";
1897        bus->read = netsec_phy_read;
1898        bus->write = netsec_phy_write;
1899        bus->parent = priv->dev;
1900        priv->mii_bus = bus;
1901
1902        if (dev_of_node(priv->dev)) {
1903                struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
1904
1905                mdio_node = of_get_child_by_name(parent, "mdio");
1906                if (mdio_node) {
1907                        parent = mdio_node;
1908                } else {
1909                        /* older f/w doesn't populate the mdio subnode,
1910                         * allow relaxed upgrade of f/w in due time.
1911                         */
1912                        dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1913                }
1914
1915                ret = of_mdiobus_register(bus, parent);
1916                of_node_put(mdio_node);
1917
1918                if (ret) {
1919                        dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1920                        return ret;
1921                }
1922        } else {
1923                /* Mask out all PHYs from auto probing. */
1924                bus->phy_mask = ~0;
1925                ret = mdiobus_register(bus);
1926                if (ret) {
1927                        dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1928                        return ret;
1929                }
1930
1931                priv->phydev = get_phy_device(bus, phy_addr, false);
1932                if (IS_ERR(priv->phydev)) {
1933                        ret = PTR_ERR(priv->phydev);
1934                        dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1935                        priv->phydev = NULL;
1936                        return -ENODEV;
1937                }
1938
1939                ret = phy_device_register(priv->phydev);
1940                if (ret) {
1941                        mdiobus_unregister(bus);
1942                        dev_err(priv->dev,
1943                                "phy_device_register err(%d)\n", ret);
1944                }
1945        }
1946
1947        return ret;
1948}
1949
1950static int netsec_probe(struct platform_device *pdev)
1951{
1952        struct resource *mmio_res, *eeprom_res, *irq_res;
1953        u8 *mac, macbuf[ETH_ALEN];
1954        struct netsec_priv *priv;
1955        u32 hw_ver, phy_addr = 0;
1956        struct net_device *ndev;
1957        int ret;
1958
1959        mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1960        if (!mmio_res) {
1961                dev_err(&pdev->dev, "No MMIO resource found.\n");
1962                return -ENODEV;
1963        }
1964
1965        eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1966        if (!eeprom_res) {
1967                dev_info(&pdev->dev, "No EEPROM resource found.\n");
1968                return -ENODEV;
1969        }
1970
1971        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1972        if (!irq_res) {
1973                dev_err(&pdev->dev, "No IRQ resource found.\n");
1974                return -ENODEV;
1975        }
1976
1977        ndev = alloc_etherdev(sizeof(*priv));
1978        if (!ndev)
1979                return -ENOMEM;
1980
1981        priv = netdev_priv(ndev);
1982
1983        spin_lock_init(&priv->reglock);
1984        SET_NETDEV_DEV(ndev, &pdev->dev);
1985        platform_set_drvdata(pdev, priv);
1986        ndev->irq = irq_res->start;
1987        priv->dev = &pdev->dev;
1988        priv->ndev = ndev;
1989
1990        priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
1991                           NETIF_MSG_LINK | NETIF_MSG_PROBE;
1992
1993        priv->phy_interface = device_get_phy_mode(&pdev->dev);
1994        if ((int)priv->phy_interface < 0) {
1995                dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
1996                ret = -ENODEV;
1997                goto free_ndev;
1998        }
1999
2000        priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
2001                                    resource_size(mmio_res));
2002        if (!priv->ioaddr) {
2003                dev_err(&pdev->dev, "devm_ioremap() failed\n");
2004                ret = -ENXIO;
2005                goto free_ndev;
2006        }
2007
2008        priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
2009                                         resource_size(eeprom_res));
2010        if (!priv->eeprom_base) {
2011                dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
2012                ret = -ENXIO;
2013                goto free_ndev;
2014        }
2015
2016        mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
2017        if (mac)
2018                ether_addr_copy(ndev->dev_addr, mac);
2019
2020        if (priv->eeprom_base &&
2021            (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
2022                void __iomem *macp = priv->eeprom_base +
2023                                        NETSEC_EEPROM_MAC_ADDRESS;
2024
2025                ndev->dev_addr[0] = readb(macp + 3);
2026                ndev->dev_addr[1] = readb(macp + 2);
2027                ndev->dev_addr[2] = readb(macp + 1);
2028                ndev->dev_addr[3] = readb(macp + 0);
2029                ndev->dev_addr[4] = readb(macp + 7);
2030                ndev->dev_addr[5] = readb(macp + 6);
2031        }
2032
2033        if (!is_valid_ether_addr(ndev->dev_addr)) {
2034                dev_warn(&pdev->dev, "No MAC address found, using random\n");
2035                eth_hw_addr_random(ndev);
2036        }
2037
2038        if (dev_of_node(&pdev->dev))
2039                ret = netsec_of_probe(pdev, priv, &phy_addr);
2040        else
2041                ret = netsec_acpi_probe(pdev, priv, &phy_addr);
2042        if (ret)
2043                goto free_ndev;
2044
2045        priv->phy_addr = phy_addr;
2046
2047        if (!priv->freq) {
2048                dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
2049                ret = -ENODEV;
2050                goto free_ndev;
2051        }
2052
2053        /* default for throughput */
2054        priv->et_coalesce.rx_coalesce_usecs = 500;
2055        priv->et_coalesce.rx_max_coalesced_frames = 8;
2056        priv->et_coalesce.tx_coalesce_usecs = 500;
2057        priv->et_coalesce.tx_max_coalesced_frames = 8;
2058
2059        ret = device_property_read_u32(&pdev->dev, "max-frame-size",
2060                                       &ndev->max_mtu);
2061        if (ret < 0)
2062                ndev->max_mtu = ETH_DATA_LEN;
2063
2064        /* runtime_pm coverage just for probe, open/close also cover it */
2065        pm_runtime_enable(&pdev->dev);
2066        pm_runtime_get_sync(&pdev->dev);
2067
2068        hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
2069        /* this driver only supports F_TAIKI style NETSEC */
2070        if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
2071            NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
2072                ret = -ENODEV;
2073                goto pm_disable;
2074        }
2075
2076        dev_info(&pdev->dev, "hardware revision %d.%d\n",
2077                 hw_ver >> 16, hw_ver & 0xffff);
2078
2079        netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
2080
2081        ndev->netdev_ops = &netsec_netdev_ops;
2082        ndev->ethtool_ops = &netsec_ethtool_ops;
2083
2084        ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
2085                                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2086        ndev->hw_features = ndev->features;
2087
2088        priv->rx_cksum_offload_flag = true;
2089
2090        ret = netsec_register_mdio(priv, phy_addr);
2091        if (ret)
2092                goto unreg_napi;
2093
2094        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
2095                dev_warn(&pdev->dev, "Failed to set DMA mask\n");
2096
2097        ret = register_netdev(ndev);
2098        if (ret) {
2099                netif_err(priv, probe, ndev, "register_netdev() failed\n");
2100                goto unreg_mii;
2101        }
2102
2103        pm_runtime_put_sync(&pdev->dev);
2104        return 0;
2105
2106unreg_mii:
2107        netsec_unregister_mdio(priv);
2108unreg_napi:
2109        netif_napi_del(&priv->napi);
2110pm_disable:
2111        pm_runtime_put_sync(&pdev->dev);
2112        pm_runtime_disable(&pdev->dev);
2113free_ndev:
2114        free_netdev(ndev);
2115        dev_err(&pdev->dev, "init failed\n");
2116
2117        return ret;
2118}
2119
2120static int netsec_remove(struct platform_device *pdev)
2121{
2122        struct netsec_priv *priv = platform_get_drvdata(pdev);
2123
2124        unregister_netdev(priv->ndev);
2125
2126        netsec_unregister_mdio(priv);
2127
2128        netif_napi_del(&priv->napi);
2129
2130        pm_runtime_disable(&pdev->dev);
2131        free_netdev(priv->ndev);
2132
2133        return 0;
2134}
2135
2136#ifdef CONFIG_PM
2137static int netsec_runtime_suspend(struct device *dev)
2138{
2139        struct netsec_priv *priv = dev_get_drvdata(dev);
2140
2141        netsec_write(priv, NETSEC_REG_CLK_EN, 0);
2142
2143        clk_disable_unprepare(priv->clk);
2144
2145        return 0;
2146}
2147
2148static int netsec_runtime_resume(struct device *dev)
2149{
2150        struct netsec_priv *priv = dev_get_drvdata(dev);
2151
2152        clk_prepare_enable(priv->clk);
2153
2154        netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
2155                                               NETSEC_CLK_EN_REG_DOM_C |
2156                                               NETSEC_CLK_EN_REG_DOM_G);
2157        return 0;
2158}
2159#endif
2160
2161static const struct dev_pm_ops netsec_pm_ops = {
2162        SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
2163};
2164
2165static const struct of_device_id netsec_dt_ids[] = {
2166        { .compatible = "socionext,synquacer-netsec" },
2167        { }
2168};
2169MODULE_DEVICE_TABLE(of, netsec_dt_ids);
2170
2171#ifdef CONFIG_ACPI
2172static const struct acpi_device_id netsec_acpi_ids[] = {
2173        { "SCX0001" },
2174        { }
2175};
2176MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
2177#endif
2178
2179static struct platform_driver netsec_driver = {
2180        .probe  = netsec_probe,
2181        .remove = netsec_remove,
2182        .driver = {
2183                .name = "netsec",
2184                .pm = &netsec_pm_ops,
2185                .of_match_table = netsec_dt_ids,
2186                .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
2187        },
2188};
2189module_platform_driver(netsec_driver);
2190
2191MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2192MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2193MODULE_DESCRIPTION("NETSEC Ethernet driver");
2194MODULE_LICENSE("GPL");
2195