linux/drivers/net/ethernet/socionext/netsec.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3#include <linux/types.h>
   4#include <linux/clk.h>
   5#include <linux/platform_device.h>
   6#include <linux/pm_runtime.h>
   7#include <linux/acpi.h>
   8#include <linux/of_mdio.h>
   9#include <linux/etherdevice.h>
  10#include <linux/interrupt.h>
  11#include <linux/io.h>
  12
  13#include <net/tcp.h>
  14#include <net/ip6_checksum.h>
  15
  16#define NETSEC_REG_SOFT_RST                     0x104
  17#define NETSEC_REG_COM_INIT                     0x120
  18
  19#define NETSEC_REG_TOP_STATUS                   0x200
  20#define NETSEC_IRQ_RX                           BIT(1)
  21#define NETSEC_IRQ_TX                           BIT(0)
  22
  23#define NETSEC_REG_TOP_INTEN                    0x204
  24#define NETSEC_REG_INTEN_SET                    0x234
  25#define NETSEC_REG_INTEN_CLR                    0x238
  26
  27#define NETSEC_REG_NRM_TX_STATUS                0x400
  28#define NETSEC_REG_NRM_TX_INTEN                 0x404
  29#define NETSEC_REG_NRM_TX_INTEN_SET             0x428
  30#define NETSEC_REG_NRM_TX_INTEN_CLR             0x42c
  31#define NRM_TX_ST_NTOWNR        BIT(17)
  32#define NRM_TX_ST_TR_ERR        BIT(16)
  33#define NRM_TX_ST_TXDONE        BIT(15)
  34#define NRM_TX_ST_TMREXP        BIT(14)
  35
  36#define NETSEC_REG_NRM_RX_STATUS                0x440
  37#define NETSEC_REG_NRM_RX_INTEN                 0x444
  38#define NETSEC_REG_NRM_RX_INTEN_SET             0x468
  39#define NETSEC_REG_NRM_RX_INTEN_CLR             0x46c
  40#define NRM_RX_ST_RC_ERR        BIT(16)
  41#define NRM_RX_ST_PKTCNT        BIT(15)
  42#define NRM_RX_ST_TMREXP        BIT(14)
  43
  44#define NETSEC_REG_PKT_CMD_BUF                  0xd0
  45
  46#define NETSEC_REG_CLK_EN                       0x100
  47
  48#define NETSEC_REG_PKT_CTRL                     0x140
  49
  50#define NETSEC_REG_DMA_TMR_CTRL                 0x20c
  51#define NETSEC_REG_F_TAIKI_MC_VER               0x22c
  52#define NETSEC_REG_F_TAIKI_VER                  0x230
  53#define NETSEC_REG_DMA_HM_CTRL                  0x214
  54#define NETSEC_REG_DMA_MH_CTRL                  0x220
  55#define NETSEC_REG_ADDR_DIS_CORE                0x218
  56#define NETSEC_REG_DMAC_HM_CMD_BUF              0x210
  57#define NETSEC_REG_DMAC_MH_CMD_BUF              0x21c
  58
  59#define NETSEC_REG_NRM_TX_PKTCNT                0x410
  60
  61#define NETSEC_REG_NRM_TX_DONE_PKTCNT           0x414
  62#define NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT     0x418
  63
  64#define NETSEC_REG_NRM_TX_TMR                   0x41c
  65
  66#define NETSEC_REG_NRM_RX_PKTCNT                0x454
  67#define NETSEC_REG_NRM_RX_RXINT_PKTCNT          0x458
  68#define NETSEC_REG_NRM_TX_TXINT_TMR             0x420
  69#define NETSEC_REG_NRM_RX_RXINT_TMR             0x460
  70
  71#define NETSEC_REG_NRM_RX_TMR                   0x45c
  72
  73#define NETSEC_REG_NRM_TX_DESC_START_UP         0x434
  74#define NETSEC_REG_NRM_TX_DESC_START_LW         0x408
  75#define NETSEC_REG_NRM_RX_DESC_START_UP         0x474
  76#define NETSEC_REG_NRM_RX_DESC_START_LW         0x448
  77
  78#define NETSEC_REG_NRM_TX_CONFIG                0x430
  79#define NETSEC_REG_NRM_RX_CONFIG                0x470
  80
  81#define MAC_REG_STATUS                          0x1024
  82#define MAC_REG_DATA                            0x11c0
  83#define MAC_REG_CMD                             0x11c4
  84#define MAC_REG_FLOW_TH                         0x11cc
  85#define MAC_REG_INTF_SEL                        0x11d4
  86#define MAC_REG_DESC_INIT                       0x11fc
  87#define MAC_REG_DESC_SOFT_RST                   0x1204
  88#define NETSEC_REG_MODE_TRANS_COMP_STATUS       0x500
  89
  90#define GMAC_REG_MCR                            0x0000
  91#define GMAC_REG_MFFR                           0x0004
  92#define GMAC_REG_GAR                            0x0010
  93#define GMAC_REG_GDR                            0x0014
  94#define GMAC_REG_FCR                            0x0018
  95#define GMAC_REG_BMR                            0x1000
  96#define GMAC_REG_RDLAR                          0x100c
  97#define GMAC_REG_TDLAR                          0x1010
  98#define GMAC_REG_OMR                            0x1018
  99
 100#define MHZ(n)          ((n) * 1000 * 1000)
 101
 102#define NETSEC_TX_SHIFT_OWN_FIELD               31
 103#define NETSEC_TX_SHIFT_LD_FIELD                30
 104#define NETSEC_TX_SHIFT_DRID_FIELD              24
 105#define NETSEC_TX_SHIFT_PT_FIELD                21
 106#define NETSEC_TX_SHIFT_TDRID_FIELD             16
 107#define NETSEC_TX_SHIFT_CC_FIELD                15
 108#define NETSEC_TX_SHIFT_FS_FIELD                9
 109#define NETSEC_TX_LAST                          8
 110#define NETSEC_TX_SHIFT_CO                      7
 111#define NETSEC_TX_SHIFT_SO                      6
 112#define NETSEC_TX_SHIFT_TRS_FIELD               4
 113
 114#define NETSEC_RX_PKT_OWN_FIELD                 31
 115#define NETSEC_RX_PKT_LD_FIELD                  30
 116#define NETSEC_RX_PKT_SDRID_FIELD               24
 117#define NETSEC_RX_PKT_FR_FIELD                  23
 118#define NETSEC_RX_PKT_ER_FIELD                  21
 119#define NETSEC_RX_PKT_ERR_FIELD                 16
 120#define NETSEC_RX_PKT_TDRID_FIELD               12
 121#define NETSEC_RX_PKT_FS_FIELD                  9
 122#define NETSEC_RX_PKT_LS_FIELD                  8
 123#define NETSEC_RX_PKT_CO_FIELD                  6
 124
 125#define NETSEC_RX_PKT_ERR_MASK                  3
 126
 127#define NETSEC_MAX_TX_PKT_LEN                   1518
 128#define NETSEC_MAX_TX_JUMBO_PKT_LEN             9018
 129
 130#define NETSEC_RING_GMAC                        15
 131#define NETSEC_RING_MAX                         2
 132
 133#define NETSEC_TCP_SEG_LEN_MAX                  1460
 134#define NETSEC_TCP_JUMBO_SEG_LEN_MAX            8960
 135
 136#define NETSEC_RX_CKSUM_NOTAVAIL                0
 137#define NETSEC_RX_CKSUM_OK                      1
 138#define NETSEC_RX_CKSUM_NG                      2
 139
 140#define NETSEC_TOP_IRQ_REG_CODE_LOAD_END        BIT(20)
 141#define NETSEC_IRQ_TRANSITION_COMPLETE          BIT(4)
 142
 143#define NETSEC_MODE_TRANS_COMP_IRQ_N2T          BIT(20)
 144#define NETSEC_MODE_TRANS_COMP_IRQ_T2N          BIT(19)
 145
 146#define NETSEC_INT_PKTCNT_MAX                   2047
 147
 148#define NETSEC_FLOW_START_TH_MAX                95
 149#define NETSEC_FLOW_STOP_TH_MAX                 95
 150#define NETSEC_FLOW_PAUSE_TIME_MIN              5
 151
 152#define NETSEC_CLK_EN_REG_DOM_ALL               0x3f
 153
 154#define NETSEC_PKT_CTRL_REG_MODE_NRM            BIT(28)
 155#define NETSEC_PKT_CTRL_REG_EN_JUMBO            BIT(27)
 156#define NETSEC_PKT_CTRL_REG_LOG_CHKSUM_ER       BIT(3)
 157#define NETSEC_PKT_CTRL_REG_LOG_HD_INCOMPLETE   BIT(2)
 158#define NETSEC_PKT_CTRL_REG_LOG_HD_ER           BIT(1)
 159#define NETSEC_PKT_CTRL_REG_DRP_NO_MATCH        BIT(0)
 160
 161#define NETSEC_CLK_EN_REG_DOM_G                 BIT(5)
 162#define NETSEC_CLK_EN_REG_DOM_C                 BIT(1)
 163#define NETSEC_CLK_EN_REG_DOM_D                 BIT(0)
 164
 165#define NETSEC_COM_INIT_REG_DB                  BIT(2)
 166#define NETSEC_COM_INIT_REG_CLS                 BIT(1)
 167#define NETSEC_COM_INIT_REG_ALL                 (NETSEC_COM_INIT_REG_CLS | \
 168                                                 NETSEC_COM_INIT_REG_DB)
 169
 170#define NETSEC_SOFT_RST_REG_RESET               0
 171#define NETSEC_SOFT_RST_REG_RUN                 BIT(31)
 172
 173#define NETSEC_DMA_CTRL_REG_STOP                1
 174#define MH_CTRL__MODE_TRANS                     BIT(20)
 175
 176#define NETSEC_GMAC_CMD_ST_READ                 0
 177#define NETSEC_GMAC_CMD_ST_WRITE                BIT(28)
 178#define NETSEC_GMAC_CMD_ST_BUSY                 BIT(31)
 179
 180#define NETSEC_GMAC_BMR_REG_COMMON              0x00412080
 181#define NETSEC_GMAC_BMR_REG_RESET               0x00020181
 182#define NETSEC_GMAC_BMR_REG_SWR                 0x00000001
 183
 184#define NETSEC_GMAC_OMR_REG_ST                  BIT(13)
 185#define NETSEC_GMAC_OMR_REG_SR                  BIT(1)
 186
 187#define NETSEC_GMAC_MCR_REG_IBN                 BIT(30)
 188#define NETSEC_GMAC_MCR_REG_CST                 BIT(25)
 189#define NETSEC_GMAC_MCR_REG_JE                  BIT(20)
 190#define NETSEC_MCR_PS                           BIT(15)
 191#define NETSEC_GMAC_MCR_REG_FES                 BIT(14)
 192#define NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON  0x0000280c
 193#define NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON  0x0001a00c
 194
 195#define NETSEC_FCR_RFE                          BIT(2)
 196#define NETSEC_FCR_TFE                          BIT(1)
 197
 198#define NETSEC_GMAC_GAR_REG_GW                  BIT(1)
 199#define NETSEC_GMAC_GAR_REG_GB                  BIT(0)
 200
 201#define NETSEC_GMAC_GAR_REG_SHIFT_PA            11
 202#define NETSEC_GMAC_GAR_REG_SHIFT_GR            6
 203#define GMAC_REG_SHIFT_CR_GAR                   2
 204
 205#define NETSEC_GMAC_GAR_REG_CR_25_35_MHZ        2
 206#define NETSEC_GMAC_GAR_REG_CR_35_60_MHZ        3
 207#define NETSEC_GMAC_GAR_REG_CR_60_100_MHZ       0
 208#define NETSEC_GMAC_GAR_REG_CR_100_150_MHZ      1
 209#define NETSEC_GMAC_GAR_REG_CR_150_250_MHZ      4
 210#define NETSEC_GMAC_GAR_REG_CR_250_300_MHZ      5
 211
 212#define NETSEC_GMAC_RDLAR_REG_COMMON            0x18000
 213#define NETSEC_GMAC_TDLAR_REG_COMMON            0x1c000
 214
 215#define NETSEC_REG_NETSEC_VER_F_TAIKI           0x50000
 216
 217#define NETSEC_REG_DESC_RING_CONFIG_CFG_UP      BIT(31)
 218#define NETSEC_REG_DESC_RING_CONFIG_CH_RST      BIT(30)
 219#define NETSEC_REG_DESC_TMR_MODE                4
 220#define NETSEC_REG_DESC_ENDIAN                  0
 221
 222#define NETSEC_MAC_DESC_SOFT_RST_SOFT_RST       1
 223#define NETSEC_MAC_DESC_INIT_REG_INIT           1
 224
 225#define NETSEC_EEPROM_MAC_ADDRESS               0x00
 226#define NETSEC_EEPROM_HM_ME_ADDRESS_H           0x08
 227#define NETSEC_EEPROM_HM_ME_ADDRESS_L           0x0C
 228#define NETSEC_EEPROM_HM_ME_SIZE                0x10
 229#define NETSEC_EEPROM_MH_ME_ADDRESS_H           0x14
 230#define NETSEC_EEPROM_MH_ME_ADDRESS_L           0x18
 231#define NETSEC_EEPROM_MH_ME_SIZE                0x1C
 232#define NETSEC_EEPROM_PKT_ME_ADDRESS            0x20
 233#define NETSEC_EEPROM_PKT_ME_SIZE               0x24
 234
 235#define DESC_NUM        128
 236#define NAPI_BUDGET     (DESC_NUM / 2)
 237
 238#define DESC_SZ sizeof(struct netsec_de)
 239
 240#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x)        ((x) & 0xffff0000)
 241
 242enum ring_id {
 243        NETSEC_RING_TX = 0,
 244        NETSEC_RING_RX
 245};
 246
 247struct netsec_desc {
 248        struct sk_buff *skb;
 249        dma_addr_t dma_addr;
 250        void *addr;
 251        u16 len;
 252};
 253
 254struct netsec_desc_ring {
 255        dma_addr_t desc_dma;
 256        struct netsec_desc *desc;
 257        void *vaddr;
 258        u16 pkt_cnt;
 259        u16 head, tail;
 260};
 261
 262struct netsec_priv {
 263        struct netsec_desc_ring desc_ring[NETSEC_RING_MAX];
 264        struct ethtool_coalesce et_coalesce;
 265        spinlock_t reglock; /* protect reg access */
 266        struct napi_struct napi;
 267        phy_interface_t phy_interface;
 268        struct net_device *ndev;
 269        struct device_node *phy_np;
 270        struct phy_device *phydev;
 271        struct mii_bus *mii_bus;
 272        void __iomem *ioaddr;
 273        void __iomem *eeprom_base;
 274        struct device *dev;
 275        struct clk *clk;
 276        u32 msg_enable;
 277        u32 freq;
 278        bool rx_cksum_offload_flag;
 279};
 280
 281struct netsec_de { /* Netsec Descriptor layout */
 282        u32 attr;
 283        u32 data_buf_addr_up;
 284        u32 data_buf_addr_lw;
 285        u32 buf_len_info;
 286};
 287
 288struct netsec_tx_pkt_ctrl {
 289        u16 tcp_seg_len;
 290        bool tcp_seg_offload_flag;
 291        bool cksum_offload_flag;
 292};
 293
 294struct netsec_rx_pkt_info {
 295        int rx_cksum_result;
 296        int err_code;
 297        bool err_flag;
 298};
 299
 300static void netsec_write(struct netsec_priv *priv, u32 reg_addr, u32 val)
 301{
 302        writel(val, priv->ioaddr + reg_addr);
 303}
 304
 305static u32 netsec_read(struct netsec_priv *priv, u32 reg_addr)
 306{
 307        return readl(priv->ioaddr + reg_addr);
 308}
 309
 310/************* MDIO BUS OPS FOLLOW *************/
 311
 312#define TIMEOUT_SPINS_MAC               1000
 313#define TIMEOUT_SECONDARY_MS_MAC        100
 314
 315static u32 netsec_clk_type(u32 freq)
 316{
 317        if (freq < MHZ(35))
 318                return NETSEC_GMAC_GAR_REG_CR_25_35_MHZ;
 319        if (freq < MHZ(60))
 320                return NETSEC_GMAC_GAR_REG_CR_35_60_MHZ;
 321        if (freq < MHZ(100))
 322                return NETSEC_GMAC_GAR_REG_CR_60_100_MHZ;
 323        if (freq < MHZ(150))
 324                return NETSEC_GMAC_GAR_REG_CR_100_150_MHZ;
 325        if (freq < MHZ(250))
 326                return NETSEC_GMAC_GAR_REG_CR_150_250_MHZ;
 327
 328        return NETSEC_GMAC_GAR_REG_CR_250_300_MHZ;
 329}
 330
 331static int netsec_wait_while_busy(struct netsec_priv *priv, u32 addr, u32 mask)
 332{
 333        u32 timeout = TIMEOUT_SPINS_MAC;
 334
 335        while (--timeout && netsec_read(priv, addr) & mask)
 336                cpu_relax();
 337        if (timeout)
 338                return 0;
 339
 340        timeout = TIMEOUT_SECONDARY_MS_MAC;
 341        while (--timeout && netsec_read(priv, addr) & mask)
 342                usleep_range(1000, 2000);
 343
 344        if (timeout)
 345                return 0;
 346
 347        netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
 348
 349        return -ETIMEDOUT;
 350}
 351
 352static int netsec_mac_write(struct netsec_priv *priv, u32 addr, u32 value)
 353{
 354        netsec_write(priv, MAC_REG_DATA, value);
 355        netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_WRITE);
 356        return netsec_wait_while_busy(priv,
 357                                      MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
 358}
 359
 360static int netsec_mac_read(struct netsec_priv *priv, u32 addr, u32 *read)
 361{
 362        int ret;
 363
 364        netsec_write(priv, MAC_REG_CMD, addr | NETSEC_GMAC_CMD_ST_READ);
 365        ret = netsec_wait_while_busy(priv,
 366                                     MAC_REG_CMD, NETSEC_GMAC_CMD_ST_BUSY);
 367        if (ret)
 368                return ret;
 369
 370        *read = netsec_read(priv, MAC_REG_DATA);
 371
 372        return 0;
 373}
 374
 375static int netsec_mac_wait_while_busy(struct netsec_priv *priv,
 376                                      u32 addr, u32 mask)
 377{
 378        u32 timeout = TIMEOUT_SPINS_MAC;
 379        int ret, data;
 380
 381        do {
 382                ret = netsec_mac_read(priv, addr, &data);
 383                if (ret)
 384                        break;
 385                cpu_relax();
 386        } while (--timeout && (data & mask));
 387
 388        if (timeout)
 389                return 0;
 390
 391        timeout = TIMEOUT_SECONDARY_MS_MAC;
 392        do {
 393                usleep_range(1000, 2000);
 394
 395                ret = netsec_mac_read(priv, addr, &data);
 396                if (ret)
 397                        break;
 398                cpu_relax();
 399        } while (--timeout && (data & mask));
 400
 401        if (timeout && !ret)
 402                return 0;
 403
 404        netdev_WARN(priv->ndev, "%s: timeout\n", __func__);
 405
 406        return -ETIMEDOUT;
 407}
 408
 409static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
 410{
 411        struct phy_device *phydev = priv->ndev->phydev;
 412        u32 value = 0;
 413
 414        value = phydev->duplex ? NETSEC_GMAC_MCR_REG_FULL_DUPLEX_COMMON :
 415                                 NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON;
 416
 417        if (phydev->speed != SPEED_1000)
 418                value |= NETSEC_MCR_PS;
 419
 420        if (priv->phy_interface != PHY_INTERFACE_MODE_GMII &&
 421            phydev->speed == SPEED_100)
 422                value |= NETSEC_GMAC_MCR_REG_FES;
 423
 424        value |= NETSEC_GMAC_MCR_REG_CST | NETSEC_GMAC_MCR_REG_JE;
 425
 426        if (phy_interface_mode_is_rgmii(priv->phy_interface))
 427                value |= NETSEC_GMAC_MCR_REG_IBN;
 428
 429        if (netsec_mac_write(priv, GMAC_REG_MCR, value))
 430                return -ETIMEDOUT;
 431
 432        return 0;
 433}
 434
 435static int netsec_phy_write(struct mii_bus *bus,
 436                            int phy_addr, int reg, u16 val)
 437{
 438        struct netsec_priv *priv = bus->priv;
 439
 440        if (netsec_mac_write(priv, GMAC_REG_GDR, val))
 441                return -ETIMEDOUT;
 442        if (netsec_mac_write(priv, GMAC_REG_GAR,
 443                             phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
 444                             reg << NETSEC_GMAC_GAR_REG_SHIFT_GR |
 445                             NETSEC_GMAC_GAR_REG_GW | NETSEC_GMAC_GAR_REG_GB |
 446                             (netsec_clk_type(priv->freq) <<
 447                              GMAC_REG_SHIFT_CR_GAR)))
 448                return -ETIMEDOUT;
 449
 450        return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
 451                                          NETSEC_GMAC_GAR_REG_GB);
 452}
 453
 454static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
 455{
 456        struct netsec_priv *priv = bus->priv;
 457        u32 data;
 458        int ret;
 459
 460        if (netsec_mac_write(priv, GMAC_REG_GAR, NETSEC_GMAC_GAR_REG_GB |
 461                             phy_addr << NETSEC_GMAC_GAR_REG_SHIFT_PA |
 462                             reg_addr << NETSEC_GMAC_GAR_REG_SHIFT_GR |
 463                             (netsec_clk_type(priv->freq) <<
 464                              GMAC_REG_SHIFT_CR_GAR)))
 465                return -ETIMEDOUT;
 466
 467        ret = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
 468                                         NETSEC_GMAC_GAR_REG_GB);
 469        if (ret)
 470                return ret;
 471
 472        ret = netsec_mac_read(priv, GMAC_REG_GDR, &data);
 473        if (ret)
 474                return ret;
 475
 476        return data;
 477}
 478
 479/************* ETHTOOL_OPS FOLLOW *************/
 480
 481static void netsec_et_get_drvinfo(struct net_device *net_device,
 482                                  struct ethtool_drvinfo *info)
 483{
 484        strlcpy(info->driver, "netsec", sizeof(info->driver));
 485        strlcpy(info->bus_info, dev_name(net_device->dev.parent),
 486                sizeof(info->bus_info));
 487}
 488
 489static int netsec_et_get_coalesce(struct net_device *net_device,
 490                                  struct ethtool_coalesce *et_coalesce)
 491{
 492        struct netsec_priv *priv = netdev_priv(net_device);
 493
 494        *et_coalesce = priv->et_coalesce;
 495
 496        return 0;
 497}
 498
 499static int netsec_et_set_coalesce(struct net_device *net_device,
 500                                  struct ethtool_coalesce *et_coalesce)
 501{
 502        struct netsec_priv *priv = netdev_priv(net_device);
 503
 504        priv->et_coalesce = *et_coalesce;
 505
 506        if (priv->et_coalesce.tx_coalesce_usecs < 50)
 507                priv->et_coalesce.tx_coalesce_usecs = 50;
 508        if (priv->et_coalesce.tx_max_coalesced_frames < 1)
 509                priv->et_coalesce.tx_max_coalesced_frames = 1;
 510
 511        netsec_write(priv, NETSEC_REG_NRM_TX_DONE_TXINT_PKTCNT,
 512                     priv->et_coalesce.tx_max_coalesced_frames);
 513        netsec_write(priv, NETSEC_REG_NRM_TX_TXINT_TMR,
 514                     priv->et_coalesce.tx_coalesce_usecs);
 515        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TXDONE);
 516        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_SET, NRM_TX_ST_TMREXP);
 517
 518        if (priv->et_coalesce.rx_coalesce_usecs < 50)
 519                priv->et_coalesce.rx_coalesce_usecs = 50;
 520        if (priv->et_coalesce.rx_max_coalesced_frames < 1)
 521                priv->et_coalesce.rx_max_coalesced_frames = 1;
 522
 523        netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_PKTCNT,
 524                     priv->et_coalesce.rx_max_coalesced_frames);
 525        netsec_write(priv, NETSEC_REG_NRM_RX_RXINT_TMR,
 526                     priv->et_coalesce.rx_coalesce_usecs);
 527        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_PKTCNT);
 528        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_SET, NRM_RX_ST_TMREXP);
 529
 530        return 0;
 531}
 532
 533static u32 netsec_et_get_msglevel(struct net_device *dev)
 534{
 535        struct netsec_priv *priv = netdev_priv(dev);
 536
 537        return priv->msg_enable;
 538}
 539
 540static void netsec_et_set_msglevel(struct net_device *dev, u32 datum)
 541{
 542        struct netsec_priv *priv = netdev_priv(dev);
 543
 544        priv->msg_enable = datum;
 545}
 546
 547static const struct ethtool_ops netsec_ethtool_ops = {
 548        .get_drvinfo            = netsec_et_get_drvinfo,
 549        .get_link_ksettings     = phy_ethtool_get_link_ksettings,
 550        .set_link_ksettings     = phy_ethtool_set_link_ksettings,
 551        .get_link               = ethtool_op_get_link,
 552        .get_coalesce           = netsec_et_get_coalesce,
 553        .set_coalesce           = netsec_et_set_coalesce,
 554        .get_msglevel           = netsec_et_get_msglevel,
 555        .set_msglevel           = netsec_et_set_msglevel,
 556};
 557
 558/************* NETDEV_OPS FOLLOW *************/
 559
 560static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
 561                                        struct netsec_desc *desc)
 562{
 563        struct sk_buff *skb;
 564
 565        if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
 566                skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
 567        } else {
 568                desc->len = L1_CACHE_ALIGN(desc->len);
 569                skb = netdev_alloc_skb(priv->ndev, desc->len);
 570        }
 571        if (!skb)
 572                return NULL;
 573
 574        desc->addr = skb->data;
 575        desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
 576                                        DMA_FROM_DEVICE);
 577        if (dma_mapping_error(priv->dev, desc->dma_addr)) {
 578                dev_kfree_skb_any(skb);
 579                return NULL;
 580        }
 581        return skb;
 582}
 583
 584static void netsec_set_rx_de(struct netsec_priv *priv,
 585                             struct netsec_desc_ring *dring, u16 idx,
 586                             const struct netsec_desc *desc,
 587                             struct sk_buff *skb)
 588{
 589        struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
 590        u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
 591                   (1 << NETSEC_RX_PKT_FS_FIELD) |
 592                   (1 << NETSEC_RX_PKT_LS_FIELD);
 593
 594        if (idx == DESC_NUM - 1)
 595                attr |= (1 << NETSEC_RX_PKT_LD_FIELD);
 596
 597        de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
 598        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
 599        de->buf_len_info = desc->len;
 600        de->attr = attr;
 601        dma_wmb();
 602
 603        dring->desc[idx].dma_addr = desc->dma_addr;
 604        dring->desc[idx].addr = desc->addr;
 605        dring->desc[idx].len = desc->len;
 606        dring->desc[idx].skb = skb;
 607}
 608
 609static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
 610                                        struct netsec_desc_ring *dring,
 611                                        u16 idx,
 612                                        struct netsec_rx_pkt_info *rxpi,
 613                                        struct netsec_desc *desc, u16 *len)
 614{
 615        struct netsec_de de = {};
 616
 617        memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
 618
 619        *len = de.buf_len_info >> 16;
 620
 621        rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
 622        rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
 623        rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
 624                                                        NETSEC_RX_PKT_ERR_MASK;
 625        *desc = dring->desc[idx];
 626        return desc->skb;
 627}
 628
 629static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
 630                                              struct netsec_rx_pkt_info *rxpi,
 631                                              struct netsec_desc *desc,
 632                                              u16 *len)
 633{
 634        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 635        struct sk_buff *tmp_skb, *skb = NULL;
 636        struct netsec_desc td;
 637        int tail;
 638
 639        *rxpi = (struct netsec_rx_pkt_info){};
 640
 641        td.len = priv->ndev->mtu + 22;
 642
 643        tmp_skb = netsec_alloc_skb(priv, &td);
 644
 645        dma_rmb();
 646
 647        tail = dring->tail;
 648
 649        if (!tmp_skb) {
 650                netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
 651                                 dring->desc[tail].skb);
 652        } else {
 653                skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
 654                netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
 655        }
 656
 657        /* move tail ahead */
 658        dring->tail = (dring->tail + 1) % DESC_NUM;
 659
 660        dring->pkt_cnt--;
 661
 662        return skb;
 663}
 664
 665static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
 666{
 667        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
 668        unsigned int pkts, bytes;
 669
 670        dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT);
 671
 672        if (dring->pkt_cnt < budget)
 673                budget = dring->pkt_cnt;
 674
 675        pkts = 0;
 676        bytes = 0;
 677
 678        while (pkts < budget) {
 679                struct netsec_desc *desc;
 680                struct netsec_de *entry;
 681                int tail, eop;
 682
 683                tail = dring->tail;
 684
 685                /* move tail ahead */
 686                dring->tail = (tail + 1) % DESC_NUM;
 687
 688                desc = &dring->desc[tail];
 689                entry = dring->vaddr + DESC_SZ * tail;
 690
 691                eop = (entry->attr >> NETSEC_TX_LAST) & 1;
 692
 693                dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
 694                                 DMA_TO_DEVICE);
 695                if (eop) {
 696                        pkts++;
 697                        bytes += desc->skb->len;
 698                        dev_kfree_skb(desc->skb);
 699                }
 700                *desc = (struct netsec_desc){};
 701        }
 702        dring->pkt_cnt -= budget;
 703
 704        priv->ndev->stats.tx_packets += budget;
 705        priv->ndev->stats.tx_bytes += bytes;
 706
 707        netdev_completed_queue(priv->ndev, budget, bytes);
 708
 709        return budget;
 710}
 711
 712static int netsec_process_tx(struct netsec_priv *priv, int budget)
 713{
 714        struct net_device *ndev = priv->ndev;
 715        int new, done = 0;
 716
 717        do {
 718                new = netsec_clean_tx_dring(priv, budget);
 719                done += new;
 720                budget -= new;
 721        } while (new);
 722
 723        if (done && netif_queue_stopped(ndev))
 724                netif_wake_queue(ndev);
 725
 726        return done;
 727}
 728
 729static int netsec_process_rx(struct netsec_priv *priv, int budget)
 730{
 731        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 732        struct net_device *ndev = priv->ndev;
 733        struct netsec_rx_pkt_info rx_info;
 734        int done = 0, rx_num = 0;
 735        struct netsec_desc desc;
 736        struct sk_buff *skb;
 737        u16 len;
 738
 739        while (done < budget) {
 740                if (!rx_num) {
 741                        rx_num = netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
 742                        dring->pkt_cnt += rx_num;
 743
 744                        /* move head 'rx_num' */
 745                        dring->head = (dring->head + rx_num) % DESC_NUM;
 746
 747                        rx_num = dring->pkt_cnt;
 748                        if (!rx_num)
 749                                break;
 750                }
 751                done++;
 752                rx_num--;
 753                skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
 754                if (unlikely(!skb) || rx_info.err_flag) {
 755                        netif_err(priv, drv, priv->ndev,
 756                                  "%s: rx fail err(%d)\n",
 757                                  __func__, rx_info.err_code);
 758                        ndev->stats.rx_dropped++;
 759                        continue;
 760                }
 761
 762                dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
 763                                 DMA_FROM_DEVICE);
 764                skb_put(skb, len);
 765                skb->protocol = eth_type_trans(skb, priv->ndev);
 766
 767                if (priv->rx_cksum_offload_flag &&
 768                    rx_info.rx_cksum_result == NETSEC_RX_CKSUM_OK)
 769                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 770
 771                if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
 772                        ndev->stats.rx_packets++;
 773                        ndev->stats.rx_bytes += len;
 774                }
 775        }
 776
 777        return done;
 778}
 779
 780static int netsec_napi_poll(struct napi_struct *napi, int budget)
 781{
 782        struct netsec_priv *priv;
 783        struct net_device *ndev;
 784        int tx, rx, done, todo;
 785
 786        priv = container_of(napi, struct netsec_priv, napi);
 787        ndev = priv->ndev;
 788
 789        todo = budget;
 790        do {
 791                if (!todo)
 792                        break;
 793
 794                tx = netsec_process_tx(priv, todo);
 795                todo -= tx;
 796
 797                if (!todo)
 798                        break;
 799
 800                rx = netsec_process_rx(priv, todo);
 801                todo -= rx;
 802        } while (rx || tx);
 803
 804        done = budget - todo;
 805
 806        if (done < budget && napi_complete_done(napi, done)) {
 807                unsigned long flags;
 808
 809                spin_lock_irqsave(&priv->reglock, flags);
 810                netsec_write(priv, NETSEC_REG_INTEN_SET,
 811                             NETSEC_IRQ_RX | NETSEC_IRQ_TX);
 812                spin_unlock_irqrestore(&priv->reglock, flags);
 813        }
 814
 815        return done;
 816}
 817
 818static void netsec_set_tx_de(struct netsec_priv *priv,
 819                             struct netsec_desc_ring *dring,
 820                             const struct netsec_tx_pkt_ctrl *tx_ctrl,
 821                             const struct netsec_desc *desc,
 822                             struct sk_buff *skb)
 823{
 824        int idx = dring->head;
 825        struct netsec_de *de;
 826        u32 attr;
 827
 828        de = dring->vaddr + (DESC_SZ * idx);
 829
 830        attr = (1 << NETSEC_TX_SHIFT_OWN_FIELD) |
 831               (1 << NETSEC_TX_SHIFT_PT_FIELD) |
 832               (NETSEC_RING_GMAC << NETSEC_TX_SHIFT_TDRID_FIELD) |
 833               (1 << NETSEC_TX_SHIFT_FS_FIELD) |
 834               (1 << NETSEC_TX_LAST) |
 835               (tx_ctrl->cksum_offload_flag << NETSEC_TX_SHIFT_CO) |
 836               (tx_ctrl->tcp_seg_offload_flag << NETSEC_TX_SHIFT_SO) |
 837               (1 << NETSEC_TX_SHIFT_TRS_FIELD);
 838        if (idx == DESC_NUM - 1)
 839                attr |= (1 << NETSEC_TX_SHIFT_LD_FIELD);
 840
 841        de->data_buf_addr_up = upper_32_bits(desc->dma_addr);
 842        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
 843        de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
 844        de->attr = attr;
 845        dma_wmb();
 846
 847        dring->desc[idx] = *desc;
 848        dring->desc[idx].skb = skb;
 849
 850        /* move head ahead */
 851        dring->head = (dring->head + 1) % DESC_NUM;
 852}
 853
 854static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
 855                                            struct net_device *ndev)
 856{
 857        struct netsec_priv *priv = netdev_priv(ndev);
 858        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
 859        struct netsec_tx_pkt_ctrl tx_ctrl = {};
 860        struct netsec_desc tx_desc;
 861        u16 tso_seg_len = 0;
 862        int filled;
 863
 864        /* differentiate between full/emtpy ring */
 865        if (dring->head >= dring->tail)
 866                filled = dring->head - dring->tail;
 867        else
 868                filled = dring->head + DESC_NUM - dring->tail;
 869
 870        if (DESC_NUM - filled < 2) { /* if less than 2 available */
 871                netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__);
 872                netif_stop_queue(priv->ndev);
 873                dma_wmb();
 874                return NETDEV_TX_BUSY;
 875        }
 876
 877        if (skb->ip_summed == CHECKSUM_PARTIAL)
 878                tx_ctrl.cksum_offload_flag = true;
 879
 880        if (skb_is_gso(skb))
 881                tso_seg_len = skb_shinfo(skb)->gso_size;
 882
 883        if (tso_seg_len > 0) {
 884                if (skb->protocol == htons(ETH_P_IP)) {
 885                        ip_hdr(skb)->tot_len = 0;
 886                        tcp_hdr(skb)->check =
 887                                ~tcp_v4_check(0, ip_hdr(skb)->saddr,
 888                                              ip_hdr(skb)->daddr, 0);
 889                } else {
 890                        ipv6_hdr(skb)->payload_len = 0;
 891                        tcp_hdr(skb)->check =
 892                                ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
 893                                                 &ipv6_hdr(skb)->daddr,
 894                                                 0, IPPROTO_TCP, 0);
 895                }
 896
 897                tx_ctrl.tcp_seg_offload_flag = true;
 898                tx_ctrl.tcp_seg_len = tso_seg_len;
 899        }
 900
 901        tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
 902                                          skb_headlen(skb), DMA_TO_DEVICE);
 903        if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
 904                netif_err(priv, drv, priv->ndev,
 905                          "%s: DMA mapping failed\n", __func__);
 906                ndev->stats.tx_dropped++;
 907                dev_kfree_skb_any(skb);
 908                return NETDEV_TX_OK;
 909        }
 910        tx_desc.addr = skb->data;
 911        tx_desc.len = skb_headlen(skb);
 912
 913        skb_tx_timestamp(skb);
 914        netdev_sent_queue(priv->ndev, skb->len);
 915
 916        netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
 917        netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
 918
 919        return NETDEV_TX_OK;
 920}
 921
 922static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
 923{
 924        struct netsec_desc_ring *dring = &priv->desc_ring[id];
 925        struct netsec_desc *desc;
 926        u16 idx;
 927
 928        if (!dring->vaddr || !dring->desc)
 929                return;
 930
 931        for (idx = 0; idx < DESC_NUM; idx++) {
 932                desc = &dring->desc[idx];
 933                if (!desc->addr)
 934                        continue;
 935
 936                dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
 937                                 id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
 938                                                              DMA_TO_DEVICE);
 939                dev_kfree_skb(desc->skb);
 940        }
 941
 942        memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
 943        memset(dring->vaddr, 0, DESC_SZ * DESC_NUM);
 944
 945        dring->head = 0;
 946        dring->tail = 0;
 947        dring->pkt_cnt = 0;
 948}
 949
 950static void netsec_free_dring(struct netsec_priv *priv, int id)
 951{
 952        struct netsec_desc_ring *dring = &priv->desc_ring[id];
 953
 954        if (dring->vaddr) {
 955                dma_free_coherent(priv->dev, DESC_SZ * DESC_NUM,
 956                                  dring->vaddr, dring->desc_dma);
 957                dring->vaddr = NULL;
 958        }
 959
 960        kfree(dring->desc);
 961        dring->desc = NULL;
 962}
 963
 964static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
 965{
 966        struct netsec_desc_ring *dring = &priv->desc_ring[id];
 967        int ret = 0;
 968
 969        dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
 970                                           &dring->desc_dma, GFP_KERNEL);
 971        if (!dring->vaddr) {
 972                ret = -ENOMEM;
 973                goto err;
 974        }
 975
 976        dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
 977        if (!dring->desc) {
 978                ret = -ENOMEM;
 979                goto err;
 980        }
 981
 982        return 0;
 983err:
 984        netsec_free_dring(priv, id);
 985
 986        return ret;
 987}
 988
 989static int netsec_setup_rx_dring(struct netsec_priv *priv)
 990{
 991        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
 992        struct netsec_desc desc;
 993        struct sk_buff *skb;
 994        int n;
 995
 996        desc.len = priv->ndev->mtu + 22;
 997
 998        for (n = 0; n < DESC_NUM; n++) {
 999                skb = netsec_alloc_skb(priv, &desc);
1000                if (!skb) {
1001                        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1002                        return -ENOMEM;
1003                }
1004                netsec_set_rx_de(priv, dring, n, &desc, skb);
1005        }
1006
1007        return 0;
1008}
1009
1010static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
1011                                           u32 addr_h, u32 addr_l, u32 size)
1012{
1013        u64 base = (u64)addr_h << 32 | addr_l;
1014        void __iomem *ucode;
1015        u32 i;
1016
1017        ucode = ioremap(base, size * sizeof(u32));
1018        if (!ucode)
1019                return -ENOMEM;
1020
1021        for (i = 0; i < size; i++)
1022                netsec_write(priv, reg, readl(ucode + i * 4));
1023
1024        iounmap(ucode);
1025        return 0;
1026}
1027
1028static int netsec_netdev_load_microcode(struct netsec_priv *priv)
1029{
1030        u32 addr_h, addr_l, size;
1031        int err;
1032
1033        addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_H);
1034        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_ADDRESS_L);
1035        size = readl(priv->eeprom_base + NETSEC_EEPROM_HM_ME_SIZE);
1036        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_HM_CMD_BUF,
1037                                              addr_h, addr_l, size);
1038        if (err)
1039                return err;
1040
1041        addr_h = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_H);
1042        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_ADDRESS_L);
1043        size = readl(priv->eeprom_base + NETSEC_EEPROM_MH_ME_SIZE);
1044        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_DMAC_MH_CMD_BUF,
1045                                              addr_h, addr_l, size);
1046        if (err)
1047                return err;
1048
1049        addr_h = 0;
1050        addr_l = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_ADDRESS);
1051        size = readl(priv->eeprom_base + NETSEC_EEPROM_PKT_ME_SIZE);
1052        err = netsec_netdev_load_ucode_region(priv, NETSEC_REG_PKT_CMD_BUF,
1053                                              addr_h, addr_l, size);
1054        if (err)
1055                return err;
1056
1057        return 0;
1058}
1059
1060static int netsec_reset_hardware(struct netsec_priv *priv,
1061                                 bool load_ucode)
1062{
1063        u32 value;
1064        int err;
1065
1066        /* stop DMA engines */
1067        if (!netsec_read(priv, NETSEC_REG_ADDR_DIS_CORE)) {
1068                netsec_write(priv, NETSEC_REG_DMA_HM_CTRL,
1069                             NETSEC_DMA_CTRL_REG_STOP);
1070                netsec_write(priv, NETSEC_REG_DMA_MH_CTRL,
1071                             NETSEC_DMA_CTRL_REG_STOP);
1072
1073                while (netsec_read(priv, NETSEC_REG_DMA_HM_CTRL) &
1074                       NETSEC_DMA_CTRL_REG_STOP)
1075                        cpu_relax();
1076
1077                while (netsec_read(priv, NETSEC_REG_DMA_MH_CTRL) &
1078                       NETSEC_DMA_CTRL_REG_STOP)
1079                        cpu_relax();
1080        }
1081
1082        netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RESET);
1083        netsec_write(priv, NETSEC_REG_SOFT_RST, NETSEC_SOFT_RST_REG_RUN);
1084        netsec_write(priv, NETSEC_REG_COM_INIT, NETSEC_COM_INIT_REG_ALL);
1085
1086        while (netsec_read(priv, NETSEC_REG_COM_INIT) != 0)
1087                cpu_relax();
1088
1089        /* set desc_start addr */
1090        netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_UP,
1091                     upper_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1092        netsec_write(priv, NETSEC_REG_NRM_RX_DESC_START_LW,
1093                     lower_32_bits(priv->desc_ring[NETSEC_RING_RX].desc_dma));
1094
1095        netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_UP,
1096                     upper_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1097        netsec_write(priv, NETSEC_REG_NRM_TX_DESC_START_LW,
1098                     lower_32_bits(priv->desc_ring[NETSEC_RING_TX].desc_dma));
1099
1100        /* set normal tx dring ring config */
1101        netsec_write(priv, NETSEC_REG_NRM_TX_CONFIG,
1102                     1 << NETSEC_REG_DESC_ENDIAN);
1103        netsec_write(priv, NETSEC_REG_NRM_RX_CONFIG,
1104                     1 << NETSEC_REG_DESC_ENDIAN);
1105
1106        if (load_ucode) {
1107                err = netsec_netdev_load_microcode(priv);
1108                if (err) {
1109                        netif_err(priv, probe, priv->ndev,
1110                                  "%s: failed to load microcode (%d)\n",
1111                                  __func__, err);
1112                        return err;
1113                }
1114        }
1115
1116        /* start DMA engines */
1117        netsec_write(priv, NETSEC_REG_DMA_TMR_CTRL, priv->freq / 1000000 - 1);
1118        netsec_write(priv, NETSEC_REG_ADDR_DIS_CORE, 0);
1119
1120        usleep_range(1000, 2000);
1121
1122        if (!(netsec_read(priv, NETSEC_REG_TOP_STATUS) &
1123              NETSEC_TOP_IRQ_REG_CODE_LOAD_END)) {
1124                netif_err(priv, probe, priv->ndev,
1125                          "microengine start failed\n");
1126                return -ENXIO;
1127        }
1128        netsec_write(priv, NETSEC_REG_TOP_STATUS,
1129                     NETSEC_TOP_IRQ_REG_CODE_LOAD_END);
1130
1131        value = NETSEC_PKT_CTRL_REG_MODE_NRM;
1132        if (priv->ndev->mtu > ETH_DATA_LEN)
1133                value |= NETSEC_PKT_CTRL_REG_EN_JUMBO;
1134
1135        /* change to normal mode */
1136        netsec_write(priv, NETSEC_REG_DMA_MH_CTRL, MH_CTRL__MODE_TRANS);
1137        netsec_write(priv, NETSEC_REG_PKT_CTRL, value);
1138
1139        while ((netsec_read(priv, NETSEC_REG_MODE_TRANS_COMP_STATUS) &
1140                NETSEC_MODE_TRANS_COMP_IRQ_T2N) == 0)
1141                cpu_relax();
1142
1143        /* clear any pending EMPTY/ERR irq status */
1144        netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, ~0);
1145
1146        /* Disable TX & RX intr */
1147        netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1148
1149        return 0;
1150}
1151
1152static int netsec_start_gmac(struct netsec_priv *priv)
1153{
1154        struct phy_device *phydev = priv->ndev->phydev;
1155        u32 value = 0;
1156        int ret;
1157
1158        if (phydev->speed != SPEED_1000)
1159                value = (NETSEC_GMAC_MCR_REG_CST |
1160                         NETSEC_GMAC_MCR_REG_HALF_DUPLEX_COMMON);
1161
1162        if (netsec_mac_write(priv, GMAC_REG_MCR, value))
1163                return -ETIMEDOUT;
1164        if (netsec_mac_write(priv, GMAC_REG_BMR,
1165                             NETSEC_GMAC_BMR_REG_RESET))
1166                return -ETIMEDOUT;
1167
1168        /* Wait soft reset */
1169        usleep_range(1000, 5000);
1170
1171        ret = netsec_mac_read(priv, GMAC_REG_BMR, &value);
1172        if (ret)
1173                return ret;
1174        if (value & NETSEC_GMAC_BMR_REG_SWR)
1175                return -EAGAIN;
1176
1177        netsec_write(priv, MAC_REG_DESC_SOFT_RST, 1);
1178        if (netsec_wait_while_busy(priv, MAC_REG_DESC_SOFT_RST, 1))
1179                return -ETIMEDOUT;
1180
1181        netsec_write(priv, MAC_REG_DESC_INIT, 1);
1182        if (netsec_wait_while_busy(priv, MAC_REG_DESC_INIT, 1))
1183                return -ETIMEDOUT;
1184
1185        if (netsec_mac_write(priv, GMAC_REG_BMR,
1186                             NETSEC_GMAC_BMR_REG_COMMON))
1187                return -ETIMEDOUT;
1188        if (netsec_mac_write(priv, GMAC_REG_RDLAR,
1189                             NETSEC_GMAC_RDLAR_REG_COMMON))
1190                return -ETIMEDOUT;
1191        if (netsec_mac_write(priv, GMAC_REG_TDLAR,
1192                             NETSEC_GMAC_TDLAR_REG_COMMON))
1193                return -ETIMEDOUT;
1194        if (netsec_mac_write(priv, GMAC_REG_MFFR, 0x80000001))
1195                return -ETIMEDOUT;
1196
1197        ret = netsec_mac_update_to_phy_state(priv);
1198        if (ret)
1199                return ret;
1200
1201        ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1202        if (ret)
1203                return ret;
1204
1205        value |= NETSEC_GMAC_OMR_REG_SR;
1206        value |= NETSEC_GMAC_OMR_REG_ST;
1207
1208        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1209        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1210
1211        netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce);
1212
1213        if (netsec_mac_write(priv, GMAC_REG_OMR, value))
1214                return -ETIMEDOUT;
1215
1216        return 0;
1217}
1218
1219static int netsec_stop_gmac(struct netsec_priv *priv)
1220{
1221        u32 value;
1222        int ret;
1223
1224        ret = netsec_mac_read(priv, GMAC_REG_OMR, &value);
1225        if (ret)
1226                return ret;
1227        value &= ~NETSEC_GMAC_OMR_REG_SR;
1228        value &= ~NETSEC_GMAC_OMR_REG_ST;
1229
1230        /* disable all interrupts */
1231        netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0);
1232        netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0);
1233
1234        return netsec_mac_write(priv, GMAC_REG_OMR, value);
1235}
1236
1237static void netsec_phy_adjust_link(struct net_device *ndev)
1238{
1239        struct netsec_priv *priv = netdev_priv(ndev);
1240
1241        if (ndev->phydev->link)
1242                netsec_start_gmac(priv);
1243        else
1244                netsec_stop_gmac(priv);
1245
1246        phy_print_status(ndev->phydev);
1247}
1248
1249static irqreturn_t netsec_irq_handler(int irq, void *dev_id)
1250{
1251        struct netsec_priv *priv = dev_id;
1252        u32 val, status = netsec_read(priv, NETSEC_REG_TOP_STATUS);
1253        unsigned long flags;
1254
1255        /* Disable interrupts */
1256        if (status & NETSEC_IRQ_TX) {
1257                val = netsec_read(priv, NETSEC_REG_NRM_TX_STATUS);
1258                netsec_write(priv, NETSEC_REG_NRM_TX_STATUS, val);
1259        }
1260        if (status & NETSEC_IRQ_RX) {
1261                val = netsec_read(priv, NETSEC_REG_NRM_RX_STATUS);
1262                netsec_write(priv, NETSEC_REG_NRM_RX_STATUS, val);
1263        }
1264
1265        spin_lock_irqsave(&priv->reglock, flags);
1266        netsec_write(priv, NETSEC_REG_INTEN_CLR, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1267        spin_unlock_irqrestore(&priv->reglock, flags);
1268
1269        napi_schedule(&priv->napi);
1270
1271        return IRQ_HANDLED;
1272}
1273
1274static int netsec_netdev_open(struct net_device *ndev)
1275{
1276        struct netsec_priv *priv = netdev_priv(ndev);
1277        int ret;
1278
1279        pm_runtime_get_sync(priv->dev);
1280
1281        ret = netsec_setup_rx_dring(priv);
1282        if (ret) {
1283                netif_err(priv, probe, priv->ndev,
1284                          "%s: fail setup ring\n", __func__);
1285                goto err1;
1286        }
1287
1288        ret = request_irq(priv->ndev->irq, netsec_irq_handler,
1289                          IRQF_SHARED, "netsec", priv);
1290        if (ret) {
1291                netif_err(priv, drv, priv->ndev, "request_irq failed\n");
1292                goto err2;
1293        }
1294
1295        if (dev_of_node(priv->dev)) {
1296                if (!of_phy_connect(priv->ndev, priv->phy_np,
1297                                    netsec_phy_adjust_link, 0,
1298                                    priv->phy_interface)) {
1299                        netif_err(priv, link, priv->ndev, "missing PHY\n");
1300                        ret = -ENODEV;
1301                        goto err3;
1302                }
1303        } else {
1304                ret = phy_connect_direct(priv->ndev, priv->phydev,
1305                                         netsec_phy_adjust_link,
1306                                         priv->phy_interface);
1307                if (ret) {
1308                        netif_err(priv, link, priv->ndev,
1309                                  "phy_connect_direct() failed (%d)\n", ret);
1310                        goto err3;
1311                }
1312        }
1313
1314        phy_start(ndev->phydev);
1315
1316        netsec_start_gmac(priv);
1317        napi_enable(&priv->napi);
1318        netif_start_queue(ndev);
1319
1320        /* Enable TX+RX intr. */
1321        netsec_write(priv, NETSEC_REG_INTEN_SET, NETSEC_IRQ_RX | NETSEC_IRQ_TX);
1322
1323        return 0;
1324err3:
1325        free_irq(priv->ndev->irq, priv);
1326err2:
1327        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1328err1:
1329        pm_runtime_put_sync(priv->dev);
1330        return ret;
1331}
1332
1333static int netsec_netdev_stop(struct net_device *ndev)
1334{
1335        int ret;
1336        struct netsec_priv *priv = netdev_priv(ndev);
1337
1338        netif_stop_queue(priv->ndev);
1339        dma_wmb();
1340
1341        napi_disable(&priv->napi);
1342
1343        netsec_write(priv, NETSEC_REG_INTEN_CLR, ~0);
1344        netsec_stop_gmac(priv);
1345
1346        free_irq(priv->ndev->irq, priv);
1347
1348        netsec_uninit_pkt_dring(priv, NETSEC_RING_TX);
1349        netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1350
1351        ret = netsec_reset_hardware(priv, false);
1352
1353        phy_stop(ndev->phydev);
1354        phy_disconnect(ndev->phydev);
1355
1356        pm_runtime_put_sync(priv->dev);
1357
1358        return ret;
1359}
1360
1361static int netsec_netdev_init(struct net_device *ndev)
1362{
1363        struct netsec_priv *priv = netdev_priv(ndev);
1364        int ret;
1365
1366        ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
1367        if (ret)
1368                return ret;
1369
1370        ret = netsec_alloc_dring(priv, NETSEC_RING_RX);
1371        if (ret)
1372                goto err1;
1373
1374        ret = netsec_reset_hardware(priv, true);
1375        if (ret)
1376                goto err2;
1377
1378        return 0;
1379err2:
1380        netsec_free_dring(priv, NETSEC_RING_RX);
1381err1:
1382        netsec_free_dring(priv, NETSEC_RING_TX);
1383        return ret;
1384}
1385
1386static void netsec_netdev_uninit(struct net_device *ndev)
1387{
1388        struct netsec_priv *priv = netdev_priv(ndev);
1389
1390        netsec_free_dring(priv, NETSEC_RING_RX);
1391        netsec_free_dring(priv, NETSEC_RING_TX);
1392}
1393
1394static int netsec_netdev_set_features(struct net_device *ndev,
1395                                      netdev_features_t features)
1396{
1397        struct netsec_priv *priv = netdev_priv(ndev);
1398
1399        priv->rx_cksum_offload_flag = !!(features & NETIF_F_RXCSUM);
1400
1401        return 0;
1402}
1403
1404static int netsec_netdev_ioctl(struct net_device *ndev, struct ifreq *ifr,
1405                               int cmd)
1406{
1407        return phy_mii_ioctl(ndev->phydev, ifr, cmd);
1408}
1409
1410static const struct net_device_ops netsec_netdev_ops = {
1411        .ndo_init               = netsec_netdev_init,
1412        .ndo_uninit             = netsec_netdev_uninit,
1413        .ndo_open               = netsec_netdev_open,
1414        .ndo_stop               = netsec_netdev_stop,
1415        .ndo_start_xmit         = netsec_netdev_start_xmit,
1416        .ndo_set_features       = netsec_netdev_set_features,
1417        .ndo_set_mac_address    = eth_mac_addr,
1418        .ndo_validate_addr      = eth_validate_addr,
1419        .ndo_do_ioctl           = netsec_netdev_ioctl,
1420};
1421
1422static int netsec_of_probe(struct platform_device *pdev,
1423                           struct netsec_priv *priv)
1424{
1425        priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1426        if (!priv->phy_np) {
1427                dev_err(&pdev->dev, "missing required property 'phy-handle'\n");
1428                return -EINVAL;
1429        }
1430
1431        priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
1432        if (IS_ERR(priv->clk)) {
1433                dev_err(&pdev->dev, "phy_ref_clk not found\n");
1434                return PTR_ERR(priv->clk);
1435        }
1436        priv->freq = clk_get_rate(priv->clk);
1437
1438        return 0;
1439}
1440
1441static int netsec_acpi_probe(struct platform_device *pdev,
1442                             struct netsec_priv *priv, u32 *phy_addr)
1443{
1444        int ret;
1445
1446        if (!IS_ENABLED(CONFIG_ACPI))
1447                return -ENODEV;
1448
1449        ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
1450        if (ret) {
1451                dev_err(&pdev->dev,
1452                        "missing required property 'phy-channel'\n");
1453                return ret;
1454        }
1455
1456        ret = device_property_read_u32(&pdev->dev,
1457                                       "socionext,phy-clock-frequency",
1458                                       &priv->freq);
1459        if (ret)
1460                dev_err(&pdev->dev,
1461                        "missing required property 'socionext,phy-clock-frequency'\n");
1462        return ret;
1463}
1464
1465static void netsec_unregister_mdio(struct netsec_priv *priv)
1466{
1467        struct phy_device *phydev = priv->phydev;
1468
1469        if (!dev_of_node(priv->dev) && phydev) {
1470                phy_device_remove(phydev);
1471                phy_device_free(phydev);
1472        }
1473
1474        mdiobus_unregister(priv->mii_bus);
1475}
1476
1477static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
1478{
1479        struct mii_bus *bus;
1480        int ret;
1481
1482        bus = devm_mdiobus_alloc(priv->dev);
1483        if (!bus)
1484                return -ENOMEM;
1485
1486        snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(priv->dev));
1487        bus->priv = priv;
1488        bus->name = "SNI NETSEC MDIO";
1489        bus->read = netsec_phy_read;
1490        bus->write = netsec_phy_write;
1491        bus->parent = priv->dev;
1492        priv->mii_bus = bus;
1493
1494        if (dev_of_node(priv->dev)) {
1495                struct device_node *mdio_node, *parent = dev_of_node(priv->dev);
1496
1497                mdio_node = of_get_child_by_name(parent, "mdio");
1498                if (mdio_node) {
1499                        parent = mdio_node;
1500                } else {
1501                        /* older f/w doesn't populate the mdio subnode,
1502                         * allow relaxed upgrade of f/w in due time.
1503                         */
1504                        dev_info(priv->dev, "Upgrade f/w for mdio subnode!\n");
1505                }
1506
1507                ret = of_mdiobus_register(bus, parent);
1508                of_node_put(mdio_node);
1509
1510                if (ret) {
1511                        dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1512                        return ret;
1513                }
1514        } else {
1515                /* Mask out all PHYs from auto probing. */
1516                bus->phy_mask = ~0;
1517                ret = mdiobus_register(bus);
1518                if (ret) {
1519                        dev_err(priv->dev, "mdiobus register err(%d)\n", ret);
1520                        return ret;
1521                }
1522
1523                priv->phydev = get_phy_device(bus, phy_addr, false);
1524                if (IS_ERR(priv->phydev)) {
1525                        ret = PTR_ERR(priv->phydev);
1526                        dev_err(priv->dev, "get_phy_device err(%d)\n", ret);
1527                        priv->phydev = NULL;
1528                        return -ENODEV;
1529                }
1530
1531                ret = phy_device_register(priv->phydev);
1532                if (ret) {
1533                        mdiobus_unregister(bus);
1534                        dev_err(priv->dev,
1535                                "phy_device_register err(%d)\n", ret);
1536                }
1537        }
1538
1539        return ret;
1540}
1541
1542static int netsec_probe(struct platform_device *pdev)
1543{
1544        struct resource *mmio_res, *eeprom_res, *irq_res;
1545        u8 *mac, macbuf[ETH_ALEN];
1546        struct netsec_priv *priv;
1547        u32 hw_ver, phy_addr = 0;
1548        struct net_device *ndev;
1549        int ret;
1550
1551        mmio_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1552        if (!mmio_res) {
1553                dev_err(&pdev->dev, "No MMIO resource found.\n");
1554                return -ENODEV;
1555        }
1556
1557        eeprom_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1558        if (!eeprom_res) {
1559                dev_info(&pdev->dev, "No EEPROM resource found.\n");
1560                return -ENODEV;
1561        }
1562
1563        irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1564        if (!irq_res) {
1565                dev_err(&pdev->dev, "No IRQ resource found.\n");
1566                return -ENODEV;
1567        }
1568
1569        ndev = alloc_etherdev(sizeof(*priv));
1570        if (!ndev)
1571                return -ENOMEM;
1572
1573        priv = netdev_priv(ndev);
1574
1575        spin_lock_init(&priv->reglock);
1576        SET_NETDEV_DEV(ndev, &pdev->dev);
1577        platform_set_drvdata(pdev, priv);
1578        ndev->irq = irq_res->start;
1579        priv->dev = &pdev->dev;
1580        priv->ndev = ndev;
1581
1582        priv->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV |
1583                           NETIF_MSG_LINK | NETIF_MSG_PROBE;
1584
1585        priv->phy_interface = device_get_phy_mode(&pdev->dev);
1586        if (priv->phy_interface < 0) {
1587                dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
1588                ret = -ENODEV;
1589                goto free_ndev;
1590        }
1591
1592        priv->ioaddr = devm_ioremap(&pdev->dev, mmio_res->start,
1593                                    resource_size(mmio_res));
1594        if (!priv->ioaddr) {
1595                dev_err(&pdev->dev, "devm_ioremap() failed\n");
1596                ret = -ENXIO;
1597                goto free_ndev;
1598        }
1599
1600        priv->eeprom_base = devm_ioremap(&pdev->dev, eeprom_res->start,
1601                                         resource_size(eeprom_res));
1602        if (!priv->eeprom_base) {
1603                dev_err(&pdev->dev, "devm_ioremap() failed for EEPROM\n");
1604                ret = -ENXIO;
1605                goto free_ndev;
1606        }
1607
1608        mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
1609        if (mac)
1610                ether_addr_copy(ndev->dev_addr, mac);
1611
1612        if (priv->eeprom_base &&
1613            (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
1614                void __iomem *macp = priv->eeprom_base +
1615                                        NETSEC_EEPROM_MAC_ADDRESS;
1616
1617                ndev->dev_addr[0] = readb(macp + 3);
1618                ndev->dev_addr[1] = readb(macp + 2);
1619                ndev->dev_addr[2] = readb(macp + 1);
1620                ndev->dev_addr[3] = readb(macp + 0);
1621                ndev->dev_addr[4] = readb(macp + 7);
1622                ndev->dev_addr[5] = readb(macp + 6);
1623        }
1624
1625        if (!is_valid_ether_addr(ndev->dev_addr)) {
1626                dev_warn(&pdev->dev, "No MAC address found, using random\n");
1627                eth_hw_addr_random(ndev);
1628        }
1629
1630        if (dev_of_node(&pdev->dev))
1631                ret = netsec_of_probe(pdev, priv);
1632        else
1633                ret = netsec_acpi_probe(pdev, priv, &phy_addr);
1634        if (ret)
1635                goto free_ndev;
1636
1637        if (!priv->freq) {
1638                dev_err(&pdev->dev, "missing PHY reference clock frequency\n");
1639                ret = -ENODEV;
1640                goto free_ndev;
1641        }
1642
1643        /* default for throughput */
1644        priv->et_coalesce.rx_coalesce_usecs = 500;
1645        priv->et_coalesce.rx_max_coalesced_frames = 8;
1646        priv->et_coalesce.tx_coalesce_usecs = 500;
1647        priv->et_coalesce.tx_max_coalesced_frames = 8;
1648
1649        ret = device_property_read_u32(&pdev->dev, "max-frame-size",
1650                                       &ndev->max_mtu);
1651        if (ret < 0)
1652                ndev->max_mtu = ETH_DATA_LEN;
1653
1654        /* runtime_pm coverage just for probe, open/close also cover it */
1655        pm_runtime_enable(&pdev->dev);
1656        pm_runtime_get_sync(&pdev->dev);
1657
1658        hw_ver = netsec_read(priv, NETSEC_REG_F_TAIKI_VER);
1659        /* this driver only supports F_TAIKI style NETSEC */
1660        if (NETSEC_F_NETSEC_VER_MAJOR_NUM(hw_ver) !=
1661            NETSEC_F_NETSEC_VER_MAJOR_NUM(NETSEC_REG_NETSEC_VER_F_TAIKI)) {
1662                ret = -ENODEV;
1663                goto pm_disable;
1664        }
1665
1666        dev_info(&pdev->dev, "hardware revision %d.%d\n",
1667                 hw_ver >> 16, hw_ver & 0xffff);
1668
1669        netif_napi_add(ndev, &priv->napi, netsec_napi_poll, NAPI_BUDGET);
1670
1671        ndev->netdev_ops = &netsec_netdev_ops;
1672        ndev->ethtool_ops = &netsec_ethtool_ops;
1673
1674        ndev->features |= NETIF_F_HIGHDMA | NETIF_F_RXCSUM | NETIF_F_GSO |
1675                                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1676        ndev->hw_features = ndev->features;
1677
1678        priv->rx_cksum_offload_flag = true;
1679
1680        ret = netsec_register_mdio(priv, phy_addr);
1681        if (ret)
1682                goto unreg_napi;
1683
1684        if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)))
1685                dev_warn(&pdev->dev, "Failed to set DMA mask\n");
1686
1687        ret = register_netdev(ndev);
1688        if (ret) {
1689                netif_err(priv, probe, ndev, "register_netdev() failed\n");
1690                goto unreg_mii;
1691        }
1692
1693        pm_runtime_put_sync(&pdev->dev);
1694        return 0;
1695
1696unreg_mii:
1697        netsec_unregister_mdio(priv);
1698unreg_napi:
1699        netif_napi_del(&priv->napi);
1700pm_disable:
1701        pm_runtime_put_sync(&pdev->dev);
1702        pm_runtime_disable(&pdev->dev);
1703free_ndev:
1704        free_netdev(ndev);
1705        dev_err(&pdev->dev, "init failed\n");
1706
1707        return ret;
1708}
1709
1710static int netsec_remove(struct platform_device *pdev)
1711{
1712        struct netsec_priv *priv = platform_get_drvdata(pdev);
1713
1714        unregister_netdev(priv->ndev);
1715
1716        netsec_unregister_mdio(priv);
1717
1718        netif_napi_del(&priv->napi);
1719
1720        pm_runtime_disable(&pdev->dev);
1721        free_netdev(priv->ndev);
1722
1723        return 0;
1724}
1725
1726#ifdef CONFIG_PM
1727static int netsec_runtime_suspend(struct device *dev)
1728{
1729        struct netsec_priv *priv = dev_get_drvdata(dev);
1730
1731        netsec_write(priv, NETSEC_REG_CLK_EN, 0);
1732
1733        clk_disable_unprepare(priv->clk);
1734
1735        return 0;
1736}
1737
1738static int netsec_runtime_resume(struct device *dev)
1739{
1740        struct netsec_priv *priv = dev_get_drvdata(dev);
1741
1742        clk_prepare_enable(priv->clk);
1743
1744        netsec_write(priv, NETSEC_REG_CLK_EN, NETSEC_CLK_EN_REG_DOM_D |
1745                                               NETSEC_CLK_EN_REG_DOM_C |
1746                                               NETSEC_CLK_EN_REG_DOM_G);
1747        return 0;
1748}
1749#endif
1750
1751static const struct dev_pm_ops netsec_pm_ops = {
1752        SET_RUNTIME_PM_OPS(netsec_runtime_suspend, netsec_runtime_resume, NULL)
1753};
1754
1755static const struct of_device_id netsec_dt_ids[] = {
1756        { .compatible = "socionext,synquacer-netsec" },
1757        { }
1758};
1759MODULE_DEVICE_TABLE(of, netsec_dt_ids);
1760
1761#ifdef CONFIG_ACPI
1762static const struct acpi_device_id netsec_acpi_ids[] = {
1763        { "SCX0001" },
1764        { }
1765};
1766MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
1767#endif
1768
1769static struct platform_driver netsec_driver = {
1770        .probe  = netsec_probe,
1771        .remove = netsec_remove,
1772        .driver = {
1773                .name = "netsec",
1774                .pm = &netsec_pm_ops,
1775                .of_match_table = netsec_dt_ids,
1776                .acpi_match_table = ACPI_PTR(netsec_acpi_ids),
1777        },
1778};
1779module_platform_driver(netsec_driver);
1780
1781MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
1782MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
1783MODULE_DESCRIPTION("NETSEC Ethernet driver");
1784MODULE_LICENSE("GPL");
1785