linux/drivers/net/ethernet/socionext/netsec.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/types.h>
   4#include <linux/clk.h>
   5#include <linux/platform_device.h>
   6#include <linux/pm_runtime.h>
   7#include <linux/acpi.h>
   8#include <linux/of_mdio.h>
   9#include <linux/etherdevice.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12#include <linux/netlink.h>
  13#include <linux/bpf.h>
  14#include <linux/bpf_trace.h>
  15
  16#include <net/tcp.h>
  17#include <net/page_pool.h>
  18#include <net/ip6_checksum.h>
  19
  20#define NETSEC_REG_SOFT_RST                     0x104
  21#define NETSEC_REG_COM_INIT                     0x120
  22
  23#define NETSEC_REG_TOP_STATUS                   0x200
  24#define NETSEC_IRQ_RX                           BIT(1)
  25#define NETSEC_IRQ_TX                           BIT(0)
  26
  27#define NETSEC_REG_TOP_INTEN                    0x204
  28#define NETSEC_REG_INTEN_SET                    0x234
  29#define NETSEC_REG_INTEN_CLR                    0x238
  30
  31#define NETSEC_REG_NRM_TX_STATUS                0x400
  32#define NETSEC_REG_NRM_TX_INTEN                 0x404
  33#define NETSEC_REG_NRM_TX_INTEN_SET             0x428
  34#define NETSEC_REG_NRM_TX_INTEN_CLR             0x42c
  35#define NRM_TX_ST_NTOWNR        BIT(17)
  36#define NRM_TX_ST_TR_ERR        BIT(16)
  37#define NRM_TX_ST_TXDONE        BIT(15)
  38#define NRM_TX_ST_TMREXP        BIT(14)
  39
  40#define NETSEC_REG_NRM_RX_STATUS                0x440
  41#define NETSEC_REG_NRM_RX_INTEN                 0x444
  42#define NETSEC_REG_NRM_RX_INTEN_SET             0x468
  43#define NETSEC_REG_NRM_RX_INTEN_CLR             0x46c
  44#define NRM_RX_ST_RC_ERR        BIT(16)
  45#define NRM_RX_ST_PKTCNT        BIT(15)
  46#define NRM_RX_ST_TMREXP        BIT(14)
  47
  48#define NETSEC_REG_PKT_CMD_BUF                  0xd0
  49
  50#define NETSEC_REG_CLK_EN                       0x100
  51
  52#define NETSEC_REG_PKT_CTRL                     0x140
  53
  54#define NETSEC_REG_DMA_TMR_CTRL                 0x20c
  55#define NETSEC_REG_F_TAIKI_MC_VER               0x22c
  56#define NETSEC_REG_F_TAIKI_VER                  0x230
  57#define NETSEC_REG_DMA_HM_CTRL                  0x214
  58#define NETSEC_REG_DMA_MH_CTRL                  0x220
  59#define NETSEC_REG_ADDR_DIS_CORE                0x218
  60#define NETSEC_REG_DMAC_HM_CMD_BUF              0x210
  61#define NETSEC_REG_DMAC_MH_CMD_BUF              0x21c
  62
  63#define NETSEC_REG_NRM_TX_PKTCNT                0x410
  64
  65#define NETSEC_REG_NRM_TX_DONE_PKTCNT           0x414
  66#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT     0x418
  67
  68#define NETSEC_REG_NRM_TX_TMR                   0x41c
  69
  70#define NETSEC_REG_NRM_RX_PKTCNT                0x454
  71#define NETSEC_REG_NRM_RX_RXINT_PKTCNT          0x458
  72#define NETSEC_REG_NRM_TX_TXINT_TMR             0x420
  73#define NETSEC_REG_NRM_RX_RXINT_TMR             0x460
  74
  75#define NETSEC_REG_NRM_RX_TMR                   0x45c
  76
  77#define NETSEC_REG_NRM_TX_DESC_START_UP         0x434
  78#define NETSEC_REG_NRM_TX_DESC_START_LW         0x408
  79#define NETSEC_REG_NRM_RX_DESC_START_UP         0x474
  80#define NETSEC_REG_NRM_RX_DESC_START_LW         0x448
  81
  82#define NETSEC_REG_NRM_TX_CONFIG                0x430
  83#define NETSEC_REG_NRM_RX_CONFIG                0x470
  84
  85#define MAC_REG_STATUS                          0x1024
  86#define MAC_REG_DATA                            0x11c0
  87#define MAC_REG_CMD                             0x11c4
  88#define MAC_REG_FLOW_TH                         0x11cc
  89#define MAC_REG_INTF_SEL                        0x11d4
  90#define MAC_REG_DESC_INIT                       0x11fc
  91#define MAC_REG_DESC_SOFT_RST                   0x1204
  92#define NETSEC_REG_MODE_TRANS_COMP_STATUS       0x500
  93
  94#define GMAC_REG_MCR                            0x0000
  95#define GMAC_REG_MFFR                           0x0004
  96#define GMAC_REG_GAR                            0x0010
  97#define GMAC_REG_GDR                            0x0014
  98#define GMAC_REG_FCR                            0x0018
  99#define GMAC_REG_BMR                            0x1000
 100#define GMAC_REG_RDLAR                          0x100c
 101#define GMAC_REG_TDLAR                          0x1010
 102#define GMAC_REG_OMR                            0x1018
 103
 104#define MHZ(n)          ((n) * 1000 * 1000)
 105
 106#define NETSEC_TX_SHIFT_OWN_FIELD               31
 107#define NETSEC_TX_SHIFT_LD_FIELD                30
 108#define NETSEC_TX_SHIFT_DRID_FIELD              24
 109#define NETSEC_TX_SHIFT_PT_FIELD                21
 110#define NETSEC_TX_SHIFT_TDRID_FIELD             16
 111#define NETSEC_TX_SHIFT_CC_FIELD                15
 112#define NETSEC_TX_SHIFT_FS_FIELD                9
 113#define NETSEC_TX_LAST                          8
 114#define NETSEC_TX_SHIFT_CO                      7
 115#define NETSEC_TX_SHIFT_SO                      6
 116#define NETSEC_TX_SHIFT_TRS_FIELD               4
 117
 118#define NETSEC_RX_PKT_OWN_FIELD                 31
 119#define NETSEC_RX_PKT_LD_FIELD                  30
 120#define NETSEC_RX_PKT_SDRID_FIELD               24
 121#define NETSEC_RX_PKT_FR_FIELD                  23
 122#define NETSEC_RX_PKT_ER_FIELD                  21
 123#define NETSEC_RX_PKT_ERR_FIELD                 16
 124#define NETSEC_RX_PKT_TDRID_FIELD               12
 125#define NETSEC_RX_PKT_FS_FIELD                  9
 126#define NETSEC_RX_PKT_LS_FIELD                  8
 127#define NETSEC_RX_PKT_CO_FIELD                  6
 128
 129#define NETSEC_RX_PKT_ERR_MASK                  3
 130
 131#define NETSEC_MAX_TX_PKT_LEN                   1518
 132#define NETSEC_MAX_TX_JUMBO_PKT_LEN             9018
 133
 134#define NETSEC_RING_GMAC                        15
 135#define NETSEC_RING_MAX                         2
 136
 137#define NETSEC_TCP_SEG_LEN_MAX                  1460
 138#define NETSEC_TCP_JUMBO_SEG_LEN_MAX            8960
 139
 140#define NETSEC_RX_CKSUM_NOTAVAIL                0
 141#define NETSEC_RX_CKSUM_OK                      1
 142#define NETSEC_RX_CKSUM_NG                      2
 143
 144#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END        BIT(20)
 145#define NETSEC_IRQ_TRANSITION_COMPLETE          BIT(4)
 146
 147#define NETSEC_MODE_TRANS_COMP_IRQ_N2T          BIT(20)
 148#define NETSEC_MODE_TRANS_COMP_IRQ_T2N          BIT(19)
 149
 150#define NETSEC_INT_PKTCNT_MAX                   2047
 151
 152#define NETSEC_FLOW_START_TH_MAX                95
 153#define NETSEC_FLOW_STOP_TH_MAX                 95
 154#define NETSEC_FLOW_PAUSE_TIME_MIN              5
 155
 156#define NETSEC_CLK_EN_REG_DOM_ALL               0x3f
 157
 158#define NETSEC_PKT_CTRL_REG_MODE_NRM            BIT(28)
 159#define NETSEC_PKT_CTRL_REG_EN_JUMBO            BIT(27)
 160#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER       BIT(3)
 161#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE   BIT(2)
 162#define NETSEC_PKT_CTRL_REG_LOG_HD_ER           BIT(1)
 163#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH        BIT(0)
 164
 165#define NETSEC_CLK_EN_REG_DOM_G                 BIT(5)
 166#define NETSEC_CLK_EN_REG_DOM_C                 BIT(1)
 167#define NETSEC_CLK_EN_REG_DOM_D                 BIT(0)
 168
 169#define NETSEC_COM_INIT_REG_DB                  BIT(2)
 170#define NETSEC_COM_INIT_REG_CLS                 BIT(1)
 171#define NETSEC_COM_INIT_REG_ALL                 (NETSEC_COM_INIT_REG_CLS | \
 172                                                 NETSEC_COM_INIT_REG_DB)
 173
 174#define NETSEC_SOFT_RST_REG_RESET               0
 175#define NETSEC_SOFT_RST_REG_RUN                 BIT(31)
 176
 177#define NETSEC_DMA_CTRL_REG_STOP                1
 178#define MH_CTRL__MODE_TRANS                     BIT(20)
 179
 180#define NETSEC_GMAC_CMD_ST_READ                 0
 181#define NETSEC_GMAC_CMD_ST_WRITE                BIT(28)
 182#define NETSEC_GMAC_CMD_ST_BUSY                 BIT(31)
 183
 184#define NETSEC_GMAC_BMR_REG_COMMON              0x00412080
 185#define NETSEC_GMAC_BMR_REG_RESET               0x00020181
 186#define NETSEC_GMAC_BMR_REG_SWR                 0x00000001
 187
 188#define NETSEC_GMAC_OMR_REG_ST                  BIT(13)
 189#define NETSEC_GMAC_OMR_REG_SR                  BIT(1)
 190
 191#define NETSEC_GMAC_MCR_REG_IBN                 BIT(30)
 192#define NETSEC_GMAC_MCR_REG_CST                 BIT(25)
 193#define NETSEC_GMAC_MCR_REG_JE                  BIT(20)
 194#define NETSEC_MCR_PS                           BIT(15)
 195#define NETSEC_GMAC_MCR_REG_FES                 BIT(14)
 196#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON  0x0000280c
 197#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON  0x0001a00c
 198
 199#define NETSEC_FCR_RFE                          BIT(2)
 200#define NETSEC_FCR_TFE                          BIT(1)
 201
 202#define NETSEC_GMAC_GAR_REG_GW                  BIT(1)
 203#define NETSEC_GMAC_GAR_REG_GB                  BIT(0)
 204
 205#define NETSEC_GMAC_GAR_REG_SHIFT_PA            11
 206#define NETSEC_GMAC_GAR_REG_SHIFT_GR            6
 207#define GMAC_REG_SHIFT_CR_GAR                   2
 208
 209#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ        2
 210#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ        3
 211#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ       0
 212#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ      1
 213#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ      4
 214#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ      5
 215
 216#define NETSEC_GMAC_RDLAR_REG_COMMON            0x18000
 217#define NETSEC_GMAC_TDLAR_REG_COMMON            0x1c000
 218
 219#define NETSEC_REG_NETSEC_VER_F_TAIKI           0x50000
 220
 221#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP      BIT(31)
 222#define NETSEC_REG_DESC_RING_CONFIG_CH_RST      BIT(30)
 223#define NETSEC_REG_DESC_TMR_MODE                4
 224#define NETSEC_REG_DESC_ENDIAN                  0
 225
 226#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST       1
 227#define NETSEC_MAC_DESC_INIT_REG_INIT           1
 228
 229#define NETSEC_EEPROM_MAC_ADDRESS               0x00
 230#define NETSEC_EEPROM_HM_ME_ADDRESS_H           0x08
 231#define NETSEC_EEPROM_HM_ME_ADDRESS_L           0x0C
 232#define NETSEC_EEPROM_HM_ME_SIZE                0x10
 233#define NETSEC_EEPROM_MH_ME_ADDRESS_H           0x14
 234#define NETSEC_EEPROM_MH_ME_ADDRESS_L           0x18
 235#define NETSEC_EEPROM_MH_ME_SIZE                0x1C
 236#define NETSEC_EEPROM_PKT_ME_ADDRESS            0x20
 237#define NETSEC_EEPROM_PKT_ME_SIZE               0x24
 238
 239#define DESC_NUM        256
 240
 241#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
 242#define NETSEC_RXBUF_HEADROOM (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + \
 243                               NET_IP_ALIGN)
 244#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
 245                                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 246
 247#define DESC_SZ sizeof(struct netsec_de)
 248
 249#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)        ((x) & 0xffff0000)
 250
 251#define NETSEC_XDP_PASS          0
 252#define NETSEC_XDP_CONSUMED      BIT(0)
 253#define NETSEC_XDP_TX            BIT(1)
 254#define NETSEC_XDP_REDIR         BIT(2)
 255#define NETSEC_XDP_RX_OK (NETSEC_XDP_PASS | NETSEC_XDP_TX | NETSEC_XDP_REDIR)
 256
 257enum ring_id {
 258        NETSEC_RING_TX = 0,
 259        NETSEC_RING_RX
 260};
 261
 262enum buf_type {
 263        TYPE_NETSEC_SKB = 0,
 264        TYPE_NETSEC_XDP_TX,
 265        TYPE_NETSEC_XDP_NDO,
 266};
 267
 268struct netsec_desc {
 269        union {
 270                struct sk_buff *skb;
 271                struct xdp_frame *xdpf;
 272        };
 273        dma_addr_t dma_addr;
 274        void *addr;
 275        u16 len;
 276        u8 buf_type;
 277};
 278
 279struct netsec_desc_ring {
 280        dma_addr_t desc_dma;
 281        struct netsec_desc *desc;
 282        void *vaddr;
 283        u16 head, tail;
 284        u16 xdp_xmit; /* netsec_xdp_xmit packets */
 285        bool is_xdp;
 286        struct page_pool *page_pool;
 287        struct xdp_rxq_info xdp_rxq;
 288        spinlock_t lock; /* XDP tx queue locking */
 289};
 290
 291struct netsec_priv {
 292        struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
 293        struct ethtool_coalesce et_coalesce;
 294        struct bpf_prog *xdp_prog;
 295        spinlock_t reglock; /* protect reg access */
 296        struct napi_struct napi;
 297        phy_interface_t phy_interface;
 298        struct net_device *ndev;
 299        struct device_node *phy_np;
 300        struct phy_device *phydev;
 301        struct mii_bus *mii_bus;
 302        void __iomem *ioaddr;
 303        void __iomem *eeprom_base;
 304        struct device *dev;
 305        struct clk *clk;
 306        u32 msg_enable;
 307        u32 freq;
 308        u32 phy_addr;
 309        bool rx_cksum_offload_flag;
 310};
 311
 312struct netsec_de { /* Netsec Descriptor layout */
 313        u32 attr;
 314        u32 data_buf_addr_up;
 315        u32 data_buf_addr_lw;
 316        u32 buf_len_info;
 317};
 318
 319struct netsec_tx_pkt_ctrl {
 320        u16 tcp_seg_len;
 321        bool tcp_seg_offload_flag;
 322        bool cksum_offload_flag;
 323};
 324
 325struct netsec_rx_pkt_info {
 326        int rx_cksum_result;
 327        int err_code;
 328        bool err_flag;
 329};
 330
 331static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
 332{
 333        writel(val, priv->ioaddr + reg_addr);
 334}
 335
 336static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
 337{
 338        return readl(priv->ioaddr + reg_addr);
 339}
 340
 341/************* MDIO BUS OPS FOLLOW *************/
 342
 343#define TIMEOUT_SPINS_MAC               1000
 344#define TIMEOUT_SECONDARY_MS_MAC        100
 345
 346static u32 netsec_clk_type(u32 freq)
 347{
 348        if (freq < MHZ(35))
 349                return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
 350        if (freq < MHZ(60))
 351                return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
 352        if (freq < MHZ(100))
 353                return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
 354        if (freq < MHZ(150))
 355                return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
 356        if (freq < MHZ(250))
 357                return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
 358
 359        return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
 360}
 361
 362static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
 363{
 364        u32 timeout = TIMEOUT_SPINS_MAC;
 365
 366        while (--timeout && netsec_read(priv, addr) & mask)
 367                cpu_relax();
 368        if (timeout)
 369                return 0;
 370
 371        timeout = TIMEOUT_SECONDARY_MS_MAC;
 372        while (--timeout && netsec_read(priv, addr) & mask)
 373                usleep_range(1000, 2000);
 374
 375        if (timeout)
 376                return 0;
 377
 378        netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
 379
 380        return -ETIMEDOUT;
 381}
 382
 383static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
 384{
 385        netsec_write(priv, MAC_REG_DATA, value);
 386        netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
 387        return netsec_wait_while_busy(priv,
 388                                      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
 389}
 390
 391static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
 392{
 393        int ret;
 394
 395        netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
 396        ret = netsec_wait_while_busy(priv,
 397                                     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
 398        if (ret)
 399                return ret;
 400
 401        *read = netsec_read(priv, MAC_REG_DATA);
 402
 403        return 0;
 404}
 405
 406static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
 407                                      u32 addr, u32 mask)
 408{
 409        u32 timeout = TIMEOUT_SPINS_MAC;
 410        int ret, data;
 411
 412        do {
 413                ret = netsec_mac_read(priv, addr, &data);
 414                if (ret)
 415                        break;
 416                cpu_relax();
 417        } while (--timeout && (data & mask));
 418
 419        if (timeout)
 420                return 0;
 421
 422        timeout = TIMEOUT_SECONDARY_MS_MAC;
 423        do {
 424                usleep_range(1000, 2000);
 425
 426                ret = netsec_mac_read(priv, addr, &data);
 427                if (ret)
 428                        break;
 429                cpu_relax();
 430        } while (--timeout && (data & mask));
 431
 432        if (timeout && !ret)
 433                return 0;
 434
 435        netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
 436
 437        return -ETIMEDOUT;
 438}
 439
 440static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
 441{
 442        struct phy_device *phydev = priv->ndev->phydev;
 443        u32 value = 0;
 444
 445        value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
 446                                 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
 447
 448        if (phydev->speed != SPEED_1000)
 449                value |= NETSEC_MCR_PS;
 450
 451        if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
 452            phydev->speed == SPEED_100)
 453                value |= NETSEC_GMAC_MCR_REG_FES;
 454
 455        value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
 456
 457        if (phy_interface_mode_is_rgmii(priv->phy_interface))
 458                value |= NETSEC_GMAC_MCR_REG_IBN;
 459
 460        if (netsec_mac_write(priv, GMAC_REG_MCR, value))
 461                return -ETIMEDOUT;
 462
 463        return 0;
 464}
 465
 466static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
 467
 468static int netsec_phy_write(struct mii_bus *bus,
 469                            int phy_addr, int reg, u16 val)
 470{
 471        int status;
 472        struct netsec_priv *priv = bus->priv;
 473
 474        if (netsec_mac_write(priv, GMAC_REG_GDR, val))
 475                return -ETIMEDOUT;
 476        if (netsec_mac_write(priv, GMAC_REG_GAR,
 477                             phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
 478                             reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
 479                             NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
 480                             (netsec_clk_type(priv->freq) <<
 481                              GMAC_REG_SHIFT_CR_GAR)))
 482                return -ETIMEDOUT;
 483
 484        status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
 485                                            NETSEC_GMAC_GAR_REG_GB);
 486
 487        /* Developerbox implements RTL8211E PHY and there is
 488         * a compatibility problem with F_GMAC4.
 489         * RTL8211E expects MDC clock must be kept toggling for several
 490         * clock cycle with MDIO high before entering the IDLE state.
 491         * To meet this requirement, netsec driver needs to issue dummy
 492         * read(e.g. read PHYID1(offset 0x2) register) right after write.
 493         */
 494        netsec_phy_read(bus, phy_addr, MII_PHYSID1);
 495
 496        return status;
 497}
 498
 499static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
 500{
 501        struct netsec_priv *priv = bus->priv;
 502        u32 data;
 503        int ret;
 504
 505        if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
 506                             phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
 507                             reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
 508                             (netsec_clk_type(priv->freq) <<
 509                              GMAC_REG_SHIFT_CR_GAR)))
 510                return -ETIMEDOUT;
 511
 512        ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
 513                                         NETSEC_GMAC_GAR_REG_GB);
 514        if (ret)
 515                return ret;
 516
 517        ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
 518        if (ret)
 519                return ret;
 520
 521        return data;
 522}
 523
 524/************* ETHTOOL_OPS FOLLOW *************/
 525
 526static void netsec_et_get_drvinfo(struct net_device *net_device,
 527                                  struct ethtool_drvinfo *info)
 528{
 529        strlcpy(info->driver, "netsec", sizeof(info->driver));
 530        strlcpy(info->bus_info, dev_name(net_device->dev.parent),
 531                sizeof(info->bus_info));
 532}
 533
 534static int netsec_et_get_coalesce(struct net_device *net_device,
 535                                  struct ethtool_coalesce *et_coalesce)
 536{
 537        struct netsec_priv *priv = netdev_priv(net_device);
 538
 539        *et_coalesce = priv->et_coalesce;
 540
 541        return 0;
 542}
 543
 544static int netsec_et_set_coalesce(struct net_device *net_device,
 545                                  struct ethtool_coalesce *et_coalesce)
 546{
 547        struct netsec_priv *priv = netdev_priv(net_device);
 548
 549        priv->et_coalesce = *et_coalesce;
 550
 551        if (priv->et_coalesce.tx_coalesce_usecs < 50)
 552                priv->et_coalesce.tx_coalesce_usecs = 50;
 553        if (priv->et_coalesce.tx_max_coalesced_frames < 1)
 554                priv->et_coalesce.tx_max_coalesced_frames = 1;
 555
 556        netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
 557                     priv->et_coalesce.tx_max_coalesced_frames);
 558        netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
 559                     priv->et_coalesce.tx_coalesce_usecs);
 560        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
 561        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
 562
 563        if (priv->et_coalesce.rx_coalesce_usecs < 50)
 564                priv->et_coalesce.rx_coalesce_usecs = 50;
 565        if (priv->et_coalesce.rx_max_coalesced_frames < 1)
 566                priv->et_coalesce.rx_max_coalesced_frames = 1;
 567
 568        netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
 569                     priv->et_coalesce.rx_max_coalesced_frames);
 570        netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
 571                     priv->et_coalesce.rx_coalesce_usecs);
 572        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
 573        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
 574
 575        return 0;
 576}
 577
 578static u32 netsec_et_get_msglevel(struct net_device *dev)
 579{
 580        struct netsec_priv *priv = netdev_priv(dev);
 581
 582        return priv->msg_enable;
 583}
 584
 585static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
 586{
 587        struct netsec_priv *priv = netdev_priv(dev);
 588
 589        priv->msg_enable = datum;
 590}
 591
 592static const struct ethtool_ops netsec_ethtool_ops = {
 593        .get_drvinfo            = netsec_et_get_drvinfo,
 594        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
 595        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
 596        .get_link               = ethtool_op_get_link,
 597        .get_coalesce           = netsec_et_get_coalesce,
 598        .set_coalesce           = netsec_et_set_coalesce,
 599        .get_msglevel           = netsec_et_get_msglevel,
 600        .set_msglevel           = netsec_et_set_msglevel,
 601};
 602
 603/************* NETDEV_OPS FOLLOW *************/
 604
 605
 606static void netsec_set_rx_de(struct netsec_priv *priv,
 607                             struct netsec_desc_ring *dring, u16 idx,
 608                             const struct netsec_desc *desc)
 609{
 610        struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
 611        u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
 612                   (1 << NETSEC_RX_PKT_FS_FIELD) |
 613                   (1 << NETSEC_RX_PKT_LS_FIELD);
 614
 615        if (idx == DESC_NUM - 1)
 616                attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
 617
 618        de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
 619        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
 620        de->buf_len_info = desc->len;
 621        de->attr = attr;
 622        dma_wmb();
 623
 624        dring->desc[idx].dma_addr = desc->dma_addr;
 625        dring->desc[idx].addr = desc->addr;
 626        dring->desc[idx].len = desc->len;
 627}
 628
 629static bool netsec_clean_tx_dring(struct netsec_priv *priv)
 630{
 631        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
 632        struct netsec_de *entry;
 633        int tail = dring->tail;
 634        unsigned int bytes;
 635        int cnt = 0;
 636
 637        if (dring->is_xdp)
 638                spin_lock(&dring->lock);
 639
 640        bytes = 0;
 641        entry = dring->vaddr + DESC_SZ * tail;
 642
 643        while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) &&
 644               cnt < DESC_NUM) {
 645                struct netsec_desc *desc;
 646                int eop;
 647
 648                desc = &dring->desc[tail];
 649                eop = (entry->attr >> NETSEC_TX_LAST) & 1;
 650                dma_rmb();
 651
 652                /* if buf_type is either TYPE_NETSEC_SKB or
 653                 * TYPE_NETSEC_XDP_NDO we mapped it
 654                 */
 655                if (desc->buf_type != TYPE_NETSEC_XDP_TX)
 656                        dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
 657                                         DMA_TO_DEVICE);
 658
 659                if (!eop)
 660                        goto next;
 661
 662                if (desc->buf_type == TYPE_NETSEC_SKB) {
 663                        bytes += desc->skb->len;
 664                        dev_kfree_skb(desc->skb);
 665                } else {
 666                        xdp_return_frame(desc->xdpf);
 667                }
 668next:
 669                /* clean up so netsec_uninit_pkt_dring() won't free the skb
 670                 * again
 671                 */
 672                *desc = (struct netsec_desc){};
 673
 674                /* entry->attr is not going to be accessed by the NIC until
 675                 * netsec_set_tx_de() is called. No need for a dma_wmb() here
 676                 */
 677                entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
 678                /* move tail ahead */
 679                dring->tail = (tail + 1) % DESC_NUM;
 680
 681                tail = dring->tail;
 682                entry = dring->vaddr + DESC_SZ * tail;
 683                cnt++;
 684        }
 685        if (dring->is_xdp)
 686                spin_unlock(&dring->lock);
 687
 688        if (!cnt)
 689                return false;
 690
 691        /* reading the register clears the irq */
 692        netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
 693
 694        priv->ndev->stats.tx_packets += cnt;
 695        priv->ndev->stats.tx_bytes += bytes;
 696
 697        netdev_completed_queue(priv->ndev, cnt, bytes);
 698
 699        return true;
 700}
 701
 702static void netsec_process_tx(struct netsec_priv *priv)
 703{
 704        struct net_device *ndev = priv->ndev;
 705        bool cleaned;
 706
 707        cleaned = netsec_clean_tx_dring(priv);
 708
 709        if (cleaned && netif_queue_stopped(ndev)) {
 710                /* Make sure we update the value, anyone stopping the queue
 711                 * after this will read the proper consumer idx
 712                 */
 713                smp_wmb();
 714                netif_wake_queue(ndev);
 715        }
 716}
 717
 718static void *netsec_alloc_rx_data(struct netsec_priv *priv,
 719                                  dma_addr_t *dma_handle, u16 *desc_len)
 720
 721{
 722
 723        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 724        enum dma_data_direction dma_dir;
 725        struct page *page;
 726
 727        page = page_pool_dev_alloc_pages(dring->page_pool);
 728        if (!page)
 729                return NULL;
 730
 731        /* We allocate the same buffer length for XDP and non-XDP cases.
 732         * page_pool API will map the whole page, skip what's needed for
 733         * network payloads and/or XDP
 734         */
 735        *dma_handle = page_pool_get_dma_addr(page) + NETSEC_RXBUF_HEADROOM;
 736        /* Make sure the incoming payload fits in the page for XDP and non-XDP
 737         * cases and reserve enough space for headroom + skb_shared_info
 738         */
 739        *desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA;
 740        dma_dir = page_pool_get_dma_dir(dring->page_pool);
 741        dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
 742
 743        return page_address(page);
 744}
 745
 746static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
 747{
 748        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 749        u16 idx = from;
 750
 751        while (num) {
 752                netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
 753                idx++;
 754                if (idx >= DESC_NUM)
 755                        idx = 0;
 756                num--;
 757        }
 758}
 759
 760static void netsec_xdp_ring_tx_db(struct netsec_priv *priv, u16 pkts)
 761{
 762        if (likely(pkts))
 763                netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, pkts);
 764}
 765
 766static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
 767                                   u16 pkts)
 768{
 769        if (xdp_res & NETSEC_XDP_REDIR)
 770                xdp_do_flush_map();
 771
 772        if (xdp_res & NETSEC_XDP_TX)
 773                netsec_xdp_ring_tx_db(priv, pkts);
 774}
 775
 776static void netsec_set_tx_de(struct netsec_priv *priv,
 777                             struct netsec_desc_ring *dring,
 778                             const struct netsec_tx_pkt_ctrl *tx_ctrl,
 779                             const struct netsec_desc *desc, void *buf)
 780{
 781        int idx = dring->head;
 782        struct netsec_de *de;
 783        u32 attr;
 784
 785        de = dring->vaddr + (DESC_SZ * idx);
 786
 787        attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
 788               (1 << NETSEC_TX_SHIFT_PT_FIELD) |
 789               (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
 790               (1 << NETSEC_TX_SHIFT_FS_FIELD) |
 791               (1 << NETSEC_TX_LAST) |
 792               (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
 793               (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
 794               (1 << NETSEC_TX_SHIFT_TRS_FIELD);
 795        if (idx == DESC_NUM - 1)
 796                attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
 797
 798        de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
 799        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
 800        de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
 801        de->attr = attr;
 802        /* under spin_lock if using XDP */
 803        if (!dring->is_xdp)
 804                dma_wmb();
 805
 806        dring->desc[idx] = *desc;
 807        if (desc->buf_type == TYPE_NETSEC_SKB)
 808                dring->desc[idx].skb = buf;
 809        else if (desc->buf_type == TYPE_NETSEC_XDP_TX ||
 810                 desc->buf_type == TYPE_NETSEC_XDP_NDO)
 811                dring->desc[idx].xdpf = buf;
 812
 813        /* move head ahead */
 814        dring->head = (dring->head + 1) % DESC_NUM;
 815}
 816
 817/* The current driver only supports 1 Txq, this should run under spin_lock() */
 818static u32 netsec_xdp_queue_one(struct netsec_priv *priv,
 819                                struct xdp_frame *xdpf, bool is_ndo)
 820
 821{
 822        struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
 823        struct page *page = virt_to_page(xdpf->data);
 824        struct netsec_tx_pkt_ctrl tx_ctrl = {};
 825        struct netsec_desc tx_desc;
 826        dma_addr_t dma_handle;
 827        u16 filled;
 828
 829        if (tx_ring->head >= tx_ring->tail)
 830                filled = tx_ring->head - tx_ring->tail;
 831        else
 832                filled = tx_ring->head + DESC_NUM - tx_ring->tail;
 833
 834        if (DESC_NUM - filled <= 1)
 835                return NETSEC_XDP_CONSUMED;
 836
 837        if (is_ndo) {
 838                /* this is for ndo_xdp_xmit, the buffer needs mapping before
 839                 * sending
 840                 */
 841                dma_handle = dma_map_single(priv->dev, xdpf->data, xdpf->len,
 842                                            DMA_TO_DEVICE);
 843                if (dma_mapping_error(priv->dev, dma_handle))
 844                        return NETSEC_XDP_CONSUMED;
 845                tx_desc.buf_type = TYPE_NETSEC_XDP_NDO;
 846        } else {
 847                /* This is the device Rx buffer from page_pool. No need to remap
 848                 * just sync and send it
 849                 */
 850                struct netsec_desc_ring *rx_ring =
 851                        &priv->desc_ring[NETSEC_RING_RX];
 852                enum dma_data_direction dma_dir =
 853                        page_pool_get_dma_dir(rx_ring->page_pool);
 854
 855                dma_handle = page_pool_get_dma_addr(page) +
 856                        NETSEC_RXBUF_HEADROOM;
 857                dma_sync_single_for_device(priv->dev, dma_handle, xdpf->len,
 858                                           dma_dir);
 859                tx_desc.buf_type = TYPE_NETSEC_XDP_TX;
 860        }
 861
 862        tx_desc.dma_addr = dma_handle;
 863        tx_desc.addr = xdpf->data;
 864        tx_desc.len = xdpf->len;
 865
 866        netsec_set_tx_de(priv, tx_ring, &tx_ctrl, &tx_desc, xdpf);
 867
 868        return NETSEC_XDP_TX;
 869}
 870
 871static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
 872{
 873        struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
 874        struct xdp_frame *xdpf = convert_to_xdp_frame(xdp);
 875        u32 ret;
 876
 877        if (unlikely(!xdpf))
 878                return NETSEC_XDP_CONSUMED;
 879
 880        spin_lock(&tx_ring->lock);
 881        ret = netsec_xdp_queue_one(priv, xdpf, false);
 882        spin_unlock(&tx_ring->lock);
 883
 884        return ret;
 885}
 886
 887static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
 888                          struct xdp_buff *xdp)
 889{
 890        u32 ret = NETSEC_XDP_PASS;
 891        int err;
 892        u32 act;
 893
 894        act = bpf_prog_run_xdp(prog, xdp);
 895
 896        switch (act) {
 897        case XDP_PASS:
 898                ret = NETSEC_XDP_PASS;
 899                break;
 900        case XDP_TX:
 901                ret = netsec_xdp_xmit_back(priv, xdp);
 902                if (ret != NETSEC_XDP_TX)
 903                        xdp_return_buff(xdp);
 904                break;
 905        case XDP_REDIRECT:
 906                err = xdp_do_redirect(priv->ndev, xdp, prog);
 907                if (!err) {
 908                        ret = NETSEC_XDP_REDIR;
 909                } else {
 910                        ret = NETSEC_XDP_CONSUMED;
 911                        xdp_return_buff(xdp);
 912                }
 913                break;
 914        default:
 915                bpf_warn_invalid_xdp_action(act);
 916                /* fall through */
 917        case XDP_ABORTED:
 918                trace_xdp_exception(priv->ndev, prog, act);
 919                /* fall through -- handle aborts by dropping packet */
 920        case XDP_DROP:
 921                ret = NETSEC_XDP_CONSUMED;
 922                xdp_return_buff(xdp);
 923                break;
 924        }
 925
 926        return ret;
 927}
 928
 929static int netsec_process_rx(struct netsec_priv *priv, int budget)
 930{
 931        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 932        struct net_device *ndev = priv->ndev;
 933        struct netsec_rx_pkt_info rx_info;
 934        enum dma_data_direction dma_dir;
 935        struct bpf_prog *xdp_prog;
 936        struct sk_buff *skb = NULL;
 937        u16 xdp_xmit = 0;
 938        u32 xdp_act = 0;
 939        int done = 0;
 940
 941        rcu_read_lock();
 942        xdp_prog = READ_ONCE(priv->xdp_prog);
 943        dma_dir = page_pool_get_dma_dir(dring->page_pool);
 944
 945        while (done < budget) {
 946                u16 idx = dring->tail;
 947                struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
 948                struct netsec_desc *desc = &dring->desc[idx];
 949                struct page *page = virt_to_page(desc->addr);
 950                u32 xdp_result = XDP_PASS;
 951                u16 pkt_len, desc_len;
 952                dma_addr_t dma_handle;
 953                struct xdp_buff xdp;
 954                void *buf_addr;
 955
 956                if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
 957                        /* reading the register clears the irq */
 958                        netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
 959                        break;
 960                }
 961
 962                /* This  barrier is needed to keep us from reading
 963                 * any other fields out of the netsec_de until we have
 964                 * verified the descriptor has been written back
 965                 */
 966                dma_rmb();
 967                done++;
 968
 969                pkt_len = de->buf_len_info >> 16;
 970                rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
 971                        NETSEC_RX_PKT_ERR_MASK;
 972                rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
 973                if (rx_info.err_flag) {
 974                        netif_err(priv, drv, priv->ndev,
 975                                  "%s: rx fail err(%d)\n", __func__,
 976                                  rx_info.err_code);
 977                        ndev->stats.rx_dropped++;
 978                        dring->tail = (dring->tail + 1) % DESC_NUM;
 979                        /* reuse buffer page frag */
 980                        netsec_rx_fill(priv, idx, 1);
 981                        continue;
 982                }
 983                rx_info.rx_cksum_result =
 984                        (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
 985
 986                /* allocate a fresh buffer and map it to the hardware.
 987                 * This will eventually replace the old buffer in the hardware
 988                 */
 989                buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
 990
 991                if (unlikely(!buf_addr))
 992                        break;
 993
 994                dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
 995                                        dma_dir);
 996                prefetch(desc->addr);
 997
 998                xdp.data_hard_start = desc->addr;
 999                xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
1000                xdp_set_data_meta_invalid(&xdp);
1001                xdp.data_end = xdp.data + pkt_len;
1002                xdp.rxq = &dring->xdp_rxq;
1003
1004                if (xdp_prog) {
1005                        xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
1006                        if (xdp_result != NETSEC_XDP_PASS) {
1007                                xdp_act |= xdp_result;
1008                                if (xdp_result == NETSEC_XDP_TX)
1009                                        xdp_xmit++;
1010                                goto next;
1011                        }
1012                }
1013                skb = build_skb(desc->addr, desc->len + NETSEC_RX_BUF_NON_DATA);
1014
1015                if (unlikely(!skb)) {
1016                        /* If skb fails recycle_direct will either unmap and
1017                         * free the page or refill the cache depending on the
1018                         * cache state. Since we paid the allocation cost if
1019                         * building an skb fails try to put the page into cache
1020                         */
1021                        page_pool_recycle_direct(dring->page_pool, page);
1022                        netif_err(priv, drv, priv->ndev,
1023                                  "rx failed to build skb\n");
1024                        break;
1025                }
1026                page_pool_release_page(dring->page_pool, page);
1027
1028                skb_reserve(skb, xdp.data - xdp.data_hard_start);
1029                skb_put(skb, xdp.data_end - xdp.data);
1030                skb->protocol = eth_type_trans(skb, priv->ndev);
1031
1032                if (priv->rx_cksum_offload_flag &&
1033                    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
1034                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1035
1036next:
1037                if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
1038                    xdp_result & NETSEC_XDP_RX_OK) {
1039                        ndev->stats.rx_packets++;
1040                        ndev->stats.rx_bytes += xdp.data_end - xdp.data;
1041                }
1042
1043                /* Update the descriptor with fresh buffers */
1044                desc->len = desc_len;
1045                desc->dma_addr = dma_handle;
1046                desc->addr = buf_addr;
1047
1048                netsec_rx_fill(priv, idx, 1);
1049                dring->tail = (dring->tail + 1) % DESC_NUM;
1050        }
1051        netsec_finalize_xdp_rx(priv, xdp_act, xdp_xmit);
1052
1053        rcu_read_unlock();
1054
1055        return done;
1056}
1057
1058static int netsec_napi_poll(struct napi_struct *napi, int budget)
1059{
1060        struct netsec_priv *priv;
1061        int done;
1062
1063        priv = container_of(napi, struct netsec_priv, napi);
1064
1065        netsec_process_tx(priv);
1066        done = netsec_process_rx(priv, budget);
1067
1068        if (done < budget && napi_complete_done(napi, done)) {
1069                unsigned long flags;
1070
1071                spin_lock_irqsave(&priv->reglock, flags);
1072                netsec_write(priv, NETSEC_REG_INTEN_SET,
1073                             NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1074                spin_unlock_irqrestore(&priv->reglock, flags);
1075        }
1076
1077        return done;
1078}
1079
1080
1081static int netsec_desc_used(struct netsec_desc_ring *dring)
1082{
1083        int used;
1084
1085        if (dring->head >= dring->tail)
1086                used = dring->head - dring->tail;
1087        else
1088                used = dring->head + DESC_NUM - dring->tail;
1089
1090        return used;
1091}
1092
1093static int netsec_check_stop_tx(struct netsec_priv *priv, int used)
1094{
1095        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1096
1097        /* keep tail from touching the queue */
1098        if (DESC_NUM - used < 2) {
1099                netif_stop_queue(priv->ndev);
1100
1101                /* Make sure we read the updated value in case
1102                 * descriptors got freed
1103                 */
1104                smp_rmb();
1105
1106                used = netsec_desc_used(dring);
1107                if (DESC_NUM - used < 2)
1108                        return NETDEV_TX_BUSY;
1109
1110                netif_wake_queue(priv->ndev);
1111        }
1112
1113        return 0;
1114}
1115
1116static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
1117                                            struct net_device *ndev)
1118{
1119        struct netsec_priv *priv = netdev_priv(ndev);
1120        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1121        struct netsec_tx_pkt_ctrl tx_ctrl = {};
1122        struct netsec_desc tx_desc;
1123        u16 tso_seg_len = 0;
1124        int filled;
1125
1126        if (dring->is_xdp)
1127                spin_lock_bh(&dring->lock);
1128        filled = netsec_desc_used(dring);
1129        if (netsec_check_stop_tx(priv, filled)) {
1130                if (dring->is_xdp)
1131                        spin_unlock_bh(&dring->lock);
1132                net_warn_ratelimited("%s %s Tx queue full\n",
1133                                     dev_name(priv->dev), ndev->name);
1134                return NETDEV_TX_BUSY;
1135        }
1136
1137        if (skb->ip_summed == CHECKSUM_PARTIAL)
1138                tx_ctrl.cksum_offload_flag = true;
1139
1140        if (skb_is_gso(skb))
1141                tso_seg_len = skb_shinfo(skb)->gso_size;
1142
1143        if (tso_seg_len > 0) {
1144                if (skb->protocol == htons(ETH_P_IP)) {
1145                        ip_hdr(skb)->tot_len = 0;
1146                        tcp_hdr(skb)->check =
1147                                ~tcp_v4_check(0, ip_hdr(skb)->saddr,
1148                                              ip_hdr(skb)->daddr, 0);
1149                } else {
1150                        ipv6_hdr(skb)->payload_len = 0;
1151                        tcp_hdr(skb)->check =
1152                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1153                                                 &ipv6_hdr(skb)->daddr,
1154                                                 0, IPPROTO_TCP, 0);
1155                }
1156
1157                tx_ctrl.tcp_seg_offload_flag = true;
1158                tx_ctrl.tcp_seg_len = tso_seg_len;
1159        }
1160
1161        tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
1162                                          skb_headlen(skb), DMA_TO_DEVICE);
1163        if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
1164                if (dring->is_xdp)
1165                        spin_unlock_bh(&dring->lock);
1166                netif_err(priv, drv, priv->ndev,
1167                          "%s: DMA mapping failed\n", __func__);
1168                ndev->stats.tx_dropped++;
1169                dev_kfree_skb_any(skb);
1170                return NETDEV_TX_OK;
1171        }
1172        tx_desc.addr = skb->data;
1173        tx_desc.len = skb_headlen(skb);
1174        tx_desc.buf_type = TYPE_NETSEC_SKB;
1175
1176        skb_tx_timestamp(skb);
1177        netdev_sent_queue(priv->ndev, skb->len);
1178
1179        netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
1180        if (dring->is_xdp)
1181                spin_unlock_bh(&dring->lock);
1182        netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
1183
1184        return NETDEV_TX_OK;
1185}
1186
1187static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
1188{
1189        struct netsec_desc_ring *dring = &priv->desc_ring[id];
1190        struct netsec_desc *desc;
1191        u16 idx;
1192
1193        if (!dring->vaddr || !dring->desc)
1194                return;
1195        for (idx = 0; idx < DESC_NUM; idx++) {
1196                desc = &dring->desc[idx];
1197                if (!desc->addr)
1198                        continue;
1199
1200                if (id == NETSEC_RING_RX) {
1201                        struct page *page = virt_to_page(desc->addr);
1202
1203                        page_pool_put_page(dring->page_pool, page, false);
1204                } else if (id == NETSEC_RING_TX) {
1205                        dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
1206                                         DMA_TO_DEVICE);
1207                        dev_kfree_skb(desc->skb);
1208                }
1209        }
1210
1211        /* Rx is currently using page_pool */
1212        if (id == NETSEC_RING_RX) {
1213                if (xdp_rxq_info_is_reg(&dring->xdp_rxq))
1214                        xdp_rxq_info_unreg(&dring->xdp_rxq);
1215                page_pool_destroy(dring->page_pool);
1216        }
1217
1218        memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
1219        memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
1220
1221        dring->head = 0;
1222        dring->tail = 0;
1223
1224        if (id == NETSEC_RING_TX)
1225                netdev_reset_queue(priv->ndev);
1226}
1227
1228static void netsec_free_dring(struct netsec_priv *priv, int id)
1229{
1230        struct netsec_desc_ring *dring = &priv->desc_ring[id];
1231
1232        if (dring->vaddr) {
1233                dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
1234                                  dring->vaddr, dring->desc_dma);
1235                dring->vaddr = NULL;
1236        }
1237
1238        kfree(dring->desc);
1239        dring->desc = NULL;
1240}
1241
1242static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1243{
1244        struct netsec_desc_ring *dring = &priv->desc_ring[id];
1245
1246        dring->vaddr = dma_alloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
1247                                          &dring->desc_dma, GFP_KERNEL);
1248        if (!dring->vaddr)
1249                goto err;
1250
1251        dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
1252        if (!dring->desc)
1253                goto err;
1254
1255        return 0;
1256err:
1257        netsec_free_dring(priv, id);
1258
1259        return -ENOMEM;
1260}
1261
1262static void netsec_setup_tx_dring(struct netsec_priv *priv)
1263{
1264        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
1265        struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1266        int i;
1267
1268        for (i = 0; i < DESC_NUM; i++) {
1269                struct netsec_de *de;
1270
1271                de = dring->vaddr + (DESC_SZ * i);
1272                /* de->attr is not going to be accessed by the NIC
1273                 * until netsec_set_tx_de() is called.
1274                 * No need for a dma_wmb() here
1275                 */
1276                de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
1277        }
1278
1279        if (xdp_prog)
1280                dring->is_xdp = true;
1281        else
1282                dring->is_xdp = false;
1283
1284}
1285
1286static int netsec_setup_rx_dring(struct netsec_priv *priv)
1287{
1288        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
1289        struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
1290        struct page_pool_params pp_params = { 0 };
1291        int i, err;
1292
1293        pp_params.order = 0;
1294        /* internal DMA mapping in page_pool */
1295        pp_params.flags = PP_FLAG_DMA_MAP;
1296        pp_params.pool_size = DESC_NUM;
1297        pp_params.nid = cpu_to_node(0);
1298        pp_params.dev = priv->dev;
1299        pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1300
1301        dring->page_pool = page_pool_create(&pp_params);
1302        if (IS_ERR(dring->page_pool)) {
1303                err = PTR_ERR(dring->page_pool);
1304                dring->page_pool = NULL;
1305                goto err_out;
1306        }
1307
1308        err = xdp_rxq_info_reg(&dring->xdp_rxq, priv->ndev, 0);
1309        if (err)
1310                goto err_out;
1311
1312        err = xdp_rxq_info_reg_mem_model(&dring->xdp_rxq, MEM_TYPE_PAGE_POOL,
1313                                         dring->page_pool);
1314        if (err)
1315                goto err_out;
1316
1317        for (i = 0; i < DESC_NUM; i++) {
1318                struct netsec_desc *desc = &dring->desc[i];
1319                dma_addr_t dma_handle;
1320                void *buf;
1321                u16 len;
1322
1323                buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1324
1325                if (!buf) {
1326                        err = -ENOMEM;
1327                        goto err_out;
1328                }
1329                desc->dma_addr = dma_handle;
1330                desc->addr = buf;
1331                desc->len = len;
1332        }
1333
1334        netsec_rx_fill(priv, 0, DESC_NUM);
1335
1336        return 0;
1337
1338err_out:
1339        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1340        return err;
1341}
1342
1343static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1344                                           u32 addr_h, u32 addr_l, u32 size)
1345{
1346        u64 base = (u64)addr_h << 32 | addr_l;
1347        void __iomem *ucode;
1348        u32 i;
1349
1350        ucode = ioremap(base, size * sizeof(u32));
1351        if (!ucode)
1352                return -ENOMEM;
1353
1354        for (i = 0; i < size; i++)
1355                netsec_write(priv, reg, readl(ucode + i * 4));
1356
1357        iounmap(ucode);
1358        return 0;
1359}
1360
1361static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1362{
1363        u32 addr_h, addr_l, size;
1364        int err;
1365
1366        addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1367        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1368        size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1369        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1370                                              addr_h, addr_l, size);
1371        if (err)
1372                return err;
1373
1374        addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1375        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1376        size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1377        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1378                                              addr_h, addr_l, size);
1379        if (err)
1380                return err;
1381
1382        addr_h = 0;
1383        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1384        size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1385        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1386                                              addr_h, addr_l, size);
1387        if (err)
1388                return err;
1389
1390        return 0;
1391}
1392
1393static int netsec_reset_hardware(struct netsec_priv *priv,
1394                                 bool load_ucode)
1395{
1396        u32 value;
1397        int err;
1398
1399        /* stop DMA engines */
1400        if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1401                netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1402                             NETSEC_DMA_CTRL_REG_STOP);
1403                netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1404                             NETSEC_DMA_CTRL_REG_STOP);
1405
1406                while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1407                       NETSEC_DMA_CTRL_REG_STOP)
1408                        cpu_relax();
1409
1410                while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1411                       NETSEC_DMA_CTRL_REG_STOP)
1412                        cpu_relax();
1413        }
1414
1415        netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1416        netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1417        netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1418
1419        while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1420                cpu_relax();
1421
1422        /* set desc_start addr */
1423        netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1424                     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1425        netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1426                     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1427
1428        netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1429                     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1430        netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1431                     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1432
1433        /* set normal tx dring ring config */
1434        netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1435                     1 << NETSEC_REG_DESC_ENDIAN);
1436        netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1437                     1 << NETSEC_REG_DESC_ENDIAN);
1438
1439        if (load_ucode) {
1440                err = netsec_netdev_load_microcode(priv);
1441                if (err) {
1442                        netif_err(priv, probe, priv->ndev,
1443                                  "%s: failed to load microcode (%d)\n",
1444                                  __func__, err);
1445                        return err;
1446                }
1447        }
1448
1449        /* start DMA engines */
1450        netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
1451        netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
1452
1453        usleep_range(1000, 2000);
1454
1455        if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1456              NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1457                netif_err(priv, probe, priv->ndev,
1458                          "microengine start failed\n");
1459                return -ENXIO;
1460        }
1461        netsec_write(priv, NETSEC_REG_TOP_STATUS,
1462                     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1463
1464        value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1465        if (priv->ndev->mtu > ETH_DATA_LEN)
1466                value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1467
1468        /* change to normal mode */
1469        netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1470        netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
1471
1472        while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1473                NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1474                cpu_relax();
1475
1476        /* clear any pending EMPTY/ERR irq status */
1477        netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
1478
1479        /* Disable TX & RX intr */
1480        netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1481
1482        return 0;
1483}
1484
1485static int netsec_start_gmac(struct netsec_priv *priv)
1486{
1487        struct phy_device *phydev = priv->ndev->phydev;
1488        u32 value = 0;
1489        int ret;
1490
1491        if (phydev->speed != SPEED_1000)
1492                value = (NETSEC_GMAC_MCR_REG_CST |
1493                         NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1494
1495        if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1496                return -ETIMEDOUT;
1497        if (netsec_mac_write(priv, GMAC_REG_BMR,
1498                             NETSEC_GMAC_BMR_REG_RESET))
1499                return -ETIMEDOUT;
1500
1501        /* Wait soft reset */
1502        usleep_range(1000, 5000);
1503
1504        ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
1505        if (ret)
1506                return ret;
1507        if (value & NETSEC_GMAC_BMR_REG_SWR)
1508                return -EAGAIN;
1509
1510        netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
1511        if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
1512                return -ETIMEDOUT;
1513
1514        netsec_write(priv, MAC_REG_DESC_INIT, 1);
1515        if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
1516                return -ETIMEDOUT;
1517
1518        if (netsec_mac_write(priv, GMAC_REG_BMR,
1519                             NETSEC_GMAC_BMR_REG_COMMON))
1520                return -ETIMEDOUT;
1521        if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1522                             NETSEC_GMAC_RDLAR_REG_COMMON))
1523                return -ETIMEDOUT;
1524        if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1525                             NETSEC_GMAC_TDLAR_REG_COMMON))
1526                return -ETIMEDOUT;
1527        if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
1528                return -ETIMEDOUT;
1529
1530        ret = netsec_mac_update_to_phy_state(priv);
1531        if (ret)
1532                return ret;
1533
1534        ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1535        if (ret)
1536                return ret;
1537
1538        value |= NETSEC_GMAC_OMR_REG_SR;
1539        value |= NETSEC_GMAC_OMR_REG_ST;
1540
1541        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1542        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1543
1544        netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
1545
1546        if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1547                return -ETIMEDOUT;
1548
1549        return 0;
1550}
1551
1552static int netsec_stop_gmac(struct netsec_priv *priv)
1553{
1554        u32 value;
1555        int ret;
1556
1557        ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1558        if (ret)
1559                return ret;
1560        value &= ~NETSEC_GMAC_OMR_REG_SR;
1561        value &= ~NETSEC_GMAC_OMR_REG_ST;
1562
1563        /* disable all interrupts */
1564        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1565        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1566
1567        return netsec_mac_write(priv, GMAC_REG_OMR, value);
1568}
1569
1570static void netsec_phy_adjust_link(struct net_device *ndev)
1571{
1572        struct netsec_priv *priv = netdev_priv(ndev);
1573
1574        if (ndev->phydev->link)
1575                netsec_start_gmac(priv);
1576        else
1577                netsec_stop_gmac(priv);
1578
1579        phy_print_status(ndev->phydev);
1580}
1581
1582static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1583{
1584        struct netsec_priv *priv = dev_id;
1585        u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1586        unsigned long flags;
1587
1588        /* Disable interrupts */
1589        if (status & NETSEC_IRQ_TX) {
1590                val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1591                netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1592        }
1593        if (status & NETSEC_IRQ_RX) {
1594                val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1595                netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1596        }
1597
1598        spin_lock_irqsave(&priv->reglock, flags);
1599        netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1600        spin_unlock_irqrestore(&priv->reglock, flags);
1601
1602        napi_schedule(&priv->napi);
1603
1604        return IRQ_HANDLED;
1605}
1606
1607static int netsec_netdev_open(struct net_device *ndev)
1608{
1609        struct netsec_priv *priv = netdev_priv(ndev);
1610        int ret;
1611
1612        pm_runtime_get_sync(priv->dev);
1613
1614        netsec_setup_tx_dring(priv);
1615        ret = netsec_setup_rx_dring(priv);
1616        if (ret) {
1617                netif_err(priv, probe, priv->ndev,
1618                          "%s: fail setup ring\n", __func__);
1619                goto err1;
1620        }
1621
1622        ret = request_irq(priv->ndev->irq, netsec_irq_handler,
1623                          IRQF_SHARED, "netsec", priv);
1624        if (ret) {
1625                netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1626                goto err2;
1627        }
1628
1629        if (dev_of_node(priv->dev)) {
1630                if (!of_phy_connect(priv->ndev, priv->phy_np,
1631                                    netsec_phy_adjust_link, 0,
1632                                    priv->phy_interface)) {
1633                        netif_err(priv, link, priv->ndev, "missing PHY\n");
1634                        ret = -ENODEV;
1635                        goto err3;
1636                }
1637        } else {
1638                ret = phy_connect_direct(priv->ndev, priv->phydev,
1639                                         netsec_phy_adjust_link,
1640                                         priv->phy_interface);
1641                if (ret) {
1642                        netif_err(priv, link, priv->ndev,
1643                                  "phy_connect_direct() failed (%d)\n", ret);
1644                        goto err3;
1645                }
1646        }
1647
1648        phy_start(ndev->phydev);
1649
1650        netsec_start_gmac(priv);
1651        napi_enable(&priv->napi);
1652        netif_start_queue(ndev);
1653
1654        /* Enable TX+RX intr. */
1655        netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1656
1657        return 0;
1658err3:
1659        free_irq(priv->ndev->irq, priv);
1660err2:
1661        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1662err1:
1663        pm_runtime_put_sync(priv->dev);
1664        return ret;
1665}
1666
1667static int netsec_netdev_stop(struct net_device *ndev)
1668{
1669        int ret;
1670        struct netsec_priv *priv = netdev_priv(ndev);
1671
1672        netif_stop_queue(priv->ndev);
1673        dma_wmb();
1674
1675        napi_disable(&priv->napi);
1676
1677        netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1678        netsec_stop_gmac(priv);
1679
1680        free_irq(priv->ndev->irq, priv);
1681
1682        netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
1683        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1684
1685        phy_stop(ndev->phydev);
1686        phy_disconnect(ndev->phydev);
1687
1688        ret = netsec_reset_hardware(priv, false);
1689
1690        pm_runtime_put_sync(priv->dev);
1691
1692        return ret;
1693}
1694
1695static int netsec_netdev_init(struct net_device *ndev)
1696{
1697        struct netsec_priv *priv = netdev_priv(ndev);
1698        int ret;
1699        u16 data;
1700
1701        BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
1702
1703        ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
1704        if (ret)
1705                return ret;
1706
1707        ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
1708        if (ret)
1709                goto err1;
1710
1711        /* set phy power down */
1712        data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
1713                BMCR_PDOWN;
1714        netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
1715
1716        ret = netsec_reset_hardware(priv, true);
1717        if (ret)
1718                goto err2;
1719
1720        spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
1721        spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
1722
1723        return 0;
1724err2:
1725        netsec_free_dring(priv, NETSEC_RING_RX);
1726err1:
1727        netsec_free_dring(priv, NETSEC_RING_TX);
1728        return ret;
1729}
1730
1731static void netsec_netdev_uninit(struct net_device *ndev)
1732{
1733        struct netsec_priv *priv = netdev_priv(ndev);
1734
1735        netsec_free_dring(priv, NETSEC_RING_RX);
1736        netsec_free_dring(priv, NETSEC_RING_TX);
1737}
1738
1739static int netsec_netdev_set_features(struct net_device *ndev,
1740                                      netdev_features_t features)
1741{
1742        struct netsec_priv *priv = netdev_priv(ndev);
1743
1744        priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1745
1746        return 0;
1747}
1748
1749static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
1750                               int cmd)
1751{
1752        return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1753}
1754
1755static int netsec_xdp_xmit(struct net_device *ndev, int n,
1756                           struct xdp_frame **frames, u32 flags)
1757{
1758        struct netsec_priv *priv = netdev_priv(ndev);
1759        struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
1760        int drops = 0;
1761        int i;
1762
1763        if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1764                return -EINVAL;
1765
1766        spin_lock(&tx_ring->lock);
1767        for (i = 0; i < n; i++) {
1768                struct xdp_frame *xdpf = frames[i];
1769                int err;
1770
1771                err = netsec_xdp_queue_one(priv, xdpf, true);
1772                if (err != NETSEC_XDP_TX) {
1773                        xdp_return_frame_rx_napi(xdpf);
1774                        drops++;
1775                } else {
1776                        tx_ring->xdp_xmit++;
1777                }
1778        }
1779        spin_unlock(&tx_ring->lock);
1780
1781        if (unlikely(flags & XDP_XMIT_FLUSH)) {
1782                netsec_xdp_ring_tx_db(priv, tx_ring->xdp_xmit);
1783                tx_ring->xdp_xmit = 0;
1784        }
1785
1786        return n - drops;
1787}
1788
1789static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
1790                            struct netlink_ext_ack *extack)
1791{
1792        struct net_device *dev = priv->ndev;
1793        struct bpf_prog *old_prog;
1794
1795        /* For now just support only the usual MTU sized frames */
1796        if (prog && dev->mtu > 1500) {
1797                NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
1798                return -EOPNOTSUPP;
1799        }
1800
1801        if (netif_running(dev))
1802                netsec_netdev_stop(dev);
1803
1804        /* Detach old prog, if any */
1805        old_prog = xchg(&priv->xdp_prog, prog);
1806        if (old_prog)
1807                bpf_prog_put(old_prog);
1808
1809        if (netif_running(dev))
1810                netsec_netdev_open(dev);
1811
1812        return 0;
1813}
1814
1815static int netsec_xdp(struct net_device *ndev, struct netdev_bpf *xdp)
1816{
1817        struct netsec_priv *priv = netdev_priv(ndev);
1818
1819        switch (xdp->command) {
1820        case XDP_SETUP_PROG:
1821                return netsec_xdp_setup(priv, xdp->prog, xdp->extack);
1822        case XDP_QUERY_PROG:
1823                xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1824                return 0;
1825        default:
1826                return -EINVAL;
1827        }
1828}
1829
1830static const struct net_device_ops netsec_netdev_ops = {
1831        .ndo_init               = netsec_netdev_init,
1832        .ndo_uninit             = netsec_netdev_uninit,
1833        .ndo_open               = netsec_netdev_open,
1834        .ndo_stop               = netsec_netdev_stop,
1835        .ndo_start_xmit         = netsec_netdev_start_xmit,
1836        .ndo_set_features       = netsec_netdev_set_features,
1837        .ndo_set_mac_address    = eth_mac_addr,
1838        .ndo_validate_addr      = eth_validate_addr,
1839        .ndo_do_ioctl           = netsec_netdev_ioctl,
1840        .ndo_xdp_xmit           = netsec_xdp_xmit,
1841        .ndo_bpf                = netsec_xdp,
1842};
1843
1844static int netsec_of_probe(struct platform_device *pdev,
1845                           struct netsec_priv *priv, u32 *phy_addr)
1846{
1847        priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1848        if (!priv->phy_np) {
1849                dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1850                return -EINVAL;
1851        }
1852
1853        *phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
1854
1855        priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
1856        if (IS_ERR(priv->clk)) {
1857                dev_err(&pdev->dev, "phy_ref_clk not found\n");
1858                return PTR_ERR(priv->clk);
1859        }
1860        priv->freq = clk_get_rate(priv->clk);
1861
1862        return 0;
1863}
1864
1865static int netsec_acpi_probe(struct platform_device *pdev,
1866                             struct netsec_priv *priv, u32 *phy_addr)
1867{
1868        int ret;
1869
1870        if (!IS_ENABLED(CONFIG_ACPI))
1871                return -ENODEV;
1872
1873        ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
1874        if (ret) {
1875                dev_err(&pdev->dev,
1876                        "missing required property 'phy-channel'\n");
1877                return ret;
1878        }
1879
1880        ret = device_property_read_u32(&pdev->dev,
1881                                       "socionext,phy-clock-frequency",
1882                                       &priv->freq);
1883        if (ret)
1884                dev_err(&pdev->dev,
1885                        "missing required property 'socionext,phy-clock-frequency'\n");
1886        return ret;
1887}
1888
1889static void netsec_unregister_mdio(struct netsec_priv *priv)
1890{
1891        struct phy_device *phydev = priv->phydev;
1892
1893        if (!dev_of_node(priv->dev) && phydev) {
1894                phy_device_remove(phydev);
1895                phy_device_free(phydev);
1896        }
1897
1898        mdiobus_unregister(priv->mii_bus);
1899}
1900
1901static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1902{
1903        struct mii_bus *bus;
1904        int ret;
1905
1906        bus = devm_mdiobus_alloc(priv->dev);
1907        if (!bus)
1908                return -ENOMEM;
1909
1910        snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
1911        bus->priv = priv;
1912        bus->name = "SNI NETSEC MDIO";
1913        bus->read = netsec_phy_read;
1914        bus->write = netsec_phy_write;
1915        bus->parent = priv->dev;
1916        priv->mii_bus = bus;
1917
1918        if (dev_of_node(priv->dev)) {
1919                struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
1920
1921                mdio_node = of_get_child_by_name(parent, "mdio");
1922                if (mdio_node) {
1923                        parent = mdio_node;
1924                } else {
1925                        /* older f/w doesn't populate the mdio subnode,
1926                         * allow relaxed upgrade of f/w in due time.
1927                         */
1928                        dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1929                }
1930
1931                ret = of_mdiobus_register(bus, parent);
1932                of_node_put(mdio_node);
1933
1934                if (ret) {
1935                        dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1936                        return ret;
1937                }
1938        } else {
1939                /* Mask out all PHYs from auto probing. */
1940                bus->phy_mask = ~0;
1941                ret = mdiobus_register(bus);
1942                if (ret) {
1943                        dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1944                        return ret;
1945                }
1946
1947                priv->phydev = get_phy_device(bus, phy_addr, false);
1948                if (IS_ERR(priv->phydev)) {
1949                        ret = PTR_ERR(priv->phydev);
1950                        dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1951                        priv->phydev = NULL;
1952                        return -ENODEV;
1953                }
1954
1955                ret = phy_device_register(priv->phydev);
1956                if (ret) {
1957                        mdiobus_unregister(bus);
1958                        dev_err(priv->dev,
1959                                "phy_device_register err(%d)\n", ret);
1960                }
1961        }
1962
1963        return ret;
1964}
1965
1966static int netsec_probe(struct platform_device *pdev)
1967{
1968        struct resource *mmio_res, *eeprom_res, *irq_res;
1969        u8 *mac, macbuf[ETH_ALEN];
1970        struct netsec_priv *priv;
1971        u32 hw_ver, phy_addr = 0;
1972        struct net_device *ndev;
1973        int ret;
1974
1975        mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1976        if (!mmio_res) {
1977                dev_err(&pdev->dev, "No MMIO resource found.\n");
1978                return -ENODEV;
1979        }
1980
1981        eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1982        if (!eeprom_res) {
1983                dev_info(&pdev->dev, "No EEPROM resource found.\n");
1984                return -ENODEV;
1985        }
1986
1987        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1988        if (!irq_res) {
1989                dev_err(&pdev->dev, "No IRQ resource found.\n");
1990                return -ENODEV;
1991        }
1992
1993        ndev = alloc_etherdev(sizeof(*priv));
1994        if (!ndev)
1995                return -ENOMEM;
1996
1997        priv = netdev_priv(ndev);
1998
1999        spin_lock_init(&priv->reglock);
2000        SET_NETDEV_DEV(ndev, &pdev->dev);
2001        platform_set_drvdata(pdev, priv);
2002        ndev->irq = irq_res->start;
2003        priv->dev = &pdev->dev;
2004        priv->ndev = ndev;
2005
2006        priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
2007                           NETIF_MSG_LINK | NETIF_MSG_PROBE;
2008
2009        priv->phy_interface = device_get_phy_mode(&pdev->dev);
2010        if (priv->phy_interface < 0) {
2011                dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
2012                ret = -ENODEV;
2013                goto free_ndev;
2014        }
2015
2016        priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
2017                                    resource_size(mmio_res));
2018        if (!priv->ioaddr) {
2019                dev_err(&pdev->dev, "devm_ioremap() failed\n");
2020                ret = -ENXIO;
2021                goto free_ndev;
2022        }
2023
2024        priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
2025                                         resource_size(eeprom_res));
2026        if (!priv->eeprom_base) {
2027                dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
2028                ret = -ENXIO;
2029                goto free_ndev;
2030        }
2031
2032        mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
2033        if (mac)
2034                ether_addr_copy(ndev->dev_addr, mac);
2035
2036        if (priv->eeprom_base &&
2037            (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
2038                void __iomem *macp = priv->eeprom_base +
2039                                        NETSEC_EEPROM_MAC_ADDRESS;
2040
2041                ndev->dev_addr[0] = readb(macp + 3);
2042                ndev->dev_addr[1] = readb(macp + 2);
2043                ndev->dev_addr[2] = readb(macp + 1);
2044                ndev->dev_addr[3] = readb(macp + 0);
2045                ndev->dev_addr[4] = readb(macp + 7);
2046                ndev->dev_addr[5] = readb(macp + 6);
2047        }
2048
2049        if (!is_valid_ether_addr(ndev->dev_addr)) {
2050                dev_warn(&pdev->dev, "No MAC address found, using random\n");
2051                eth_hw_addr_random(ndev);
2052        }
2053
2054        if (dev_of_node(&pdev->dev))
2055                ret = netsec_of_probe(pdev, priv, &phy_addr);
2056        else
2057                ret = netsec_acpi_probe(pdev, priv, &phy_addr);
2058        if (ret)
2059                goto free_ndev;
2060
2061        priv->phy_addr = phy_addr;
2062
2063        if (!priv->freq) {
2064                dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
2065                ret = -ENODEV;
2066                goto free_ndev;
2067        }
2068
2069        /* default for throughput */
2070        priv->et_coalesce.rx_coalesce_usecs = 500;
2071        priv->et_coalesce.rx_max_coalesced_frames = 8;
2072        priv->et_coalesce.tx_coalesce_usecs = 500;
2073        priv->et_coalesce.tx_max_coalesced_frames = 8;
2074
2075        ret = device_property_read_u32(&pdev->dev, "max-frame-size",
2076                                       &ndev->max_mtu);
2077        if (ret < 0)
2078                ndev->max_mtu = ETH_DATA_LEN;
2079
2080        /* runtime_pm coverage just for probe, open/close also cover it */
2081        pm_runtime_enable(&pdev->dev);
2082        pm_runtime_get_sync(&pdev->dev);
2083
2084        hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
2085        /* this driver only supports F_TAIKI style NETSEC */
2086        if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
2087            NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
2088                ret = -ENODEV;
2089                goto pm_disable;
2090        }
2091
2092        dev_info(&pdev->dev, "hardware revision %d.%d\n",
2093                 hw_ver >> 16, hw_ver & 0xffff);
2094
2095        netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_POLL_WEIGHT);
2096
2097        ndev->netdev_ops = &netsec_netdev_ops;
2098        ndev->ethtool_ops = &netsec_ethtool_ops;
2099
2100        ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
2101                                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2102        ndev->hw_features = ndev->features;
2103
2104        priv->rx_cksum_offload_flag = true;
2105
2106        ret = netsec_register_mdio(priv, phy_addr);
2107        if (ret)
2108                goto unreg_napi;
2109
2110        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
2111                dev_warn(&pdev->dev, "Failed to set DMA mask\n");
2112
2113        ret = register_netdev(ndev);
2114        if (ret) {
2115                netif_err(priv, probe, ndev, "register_netdev() failed\n");
2116                goto unreg_mii;
2117        }
2118
2119        pm_runtime_put_sync(&pdev->dev);
2120        return 0;
2121
2122unreg_mii:
2123        netsec_unregister_mdio(priv);
2124unreg_napi:
2125        netif_napi_del(&priv->napi);
2126pm_disable:
2127        pm_runtime_put_sync(&pdev->dev);
2128        pm_runtime_disable(&pdev->dev);
2129free_ndev:
2130        free_netdev(ndev);
2131        dev_err(&pdev->dev, "init failed\n");
2132
2133        return ret;
2134}
2135
2136static int netsec_remove(struct platform_device *pdev)
2137{
2138        struct netsec_priv *priv = platform_get_drvdata(pdev);
2139
2140        unregister_netdev(priv->ndev);
2141
2142        netsec_unregister_mdio(priv);
2143
2144        netif_napi_del(&priv->napi);
2145
2146        pm_runtime_disable(&pdev->dev);
2147        free_netdev(priv->ndev);
2148
2149        return 0;
2150}
2151
2152#ifdef CONFIG_PM
2153static int netsec_runtime_suspend(struct device *dev)
2154{
2155        struct netsec_priv *priv = dev_get_drvdata(dev);
2156
2157        netsec_write(priv, NETSEC_REG_CLK_EN, 0);
2158
2159        clk_disable_unprepare(priv->clk);
2160
2161        return 0;
2162}
2163
2164static int netsec_runtime_resume(struct device *dev)
2165{
2166        struct netsec_priv *priv = dev_get_drvdata(dev);
2167
2168        clk_prepare_enable(priv->clk);
2169
2170        netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
2171                                               NETSEC_CLK_EN_REG_DOM_C |
2172                                               NETSEC_CLK_EN_REG_DOM_G);
2173        return 0;
2174}
2175#endif
2176
2177static const struct dev_pm_ops netsec_pm_ops = {
2178        SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
2179};
2180
2181static const struct of_device_id netsec_dt_ids[] = {
2182        { .compatible = "socionext,synquacer-netsec" },
2183        { }
2184};
2185MODULE_DEVICE_TABLE(of, netsec_dt_ids);
2186
2187#ifdef CONFIG_ACPI
2188static const struct acpi_device_id netsec_acpi_ids[] = {
2189        { "SCX0001" },
2190        { }
2191};
2192MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
2193#endif
2194
2195static struct platform_driver netsec_driver = {
2196        .probe  = netsec_probe,
2197        .remove = netsec_remove,
2198        .driver = {
2199                .name = "netsec",
2200                .pm = &netsec_pm_ops,
2201                .of_match_table = netsec_dt_ids,
2202                .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
2203        },
2204};
2205module_platform_driver(netsec_driver);
2206
2207MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
2208MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
2209MODULE_DESCRIPTION("NETSEC Ethernet driver");
2210MODULE_LICENSE("GPL");
2211