linux/drivers/net/ethernet/mediatek/mtk_eth_soc.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *
   4 *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
   5 *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
   6 *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
   7 */
   8
   9#include <linux/of_device.h>
  10#include <linux/of_mdio.h>
  11#include <linux/of_net.h>
  12#include <linux/mfd/syscon.h>
  13#include <linux/regmap.h>
  14#include <linux/clk.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/if_vlan.h>
  17#include <linux/reset.h>
  18#include <linux/tcp.h>
  19#include <linux/interrupt.h>
  20#include <linux/pinctrl/devinfo.h>
  21#include <linux/phylink.h>
  22
  23#include "mtk_eth_soc.h"
  24
  25static int mtk_msg_level = -1;
  26module_param_named(msg_level, mtk_msg_level, int, 0);
  27MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
  28
  29#define MTK_ETHTOOL_STAT(x) { #x, \
  30                              offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
  31
  32/* strings used by ethtool */
  33static const struct mtk_ethtool_stats {
  34        char str[ETH_GSTRING_LEN];
  35        u32 offset;
  36} mtk_ethtool_stats[] = {
  37        MTK_ETHTOOL_STAT(tx_bytes),
  38        MTK_ETHTOOL_STAT(tx_packets),
  39        MTK_ETHTOOL_STAT(tx_skip),
  40        MTK_ETHTOOL_STAT(tx_collisions),
  41        MTK_ETHTOOL_STAT(rx_bytes),
  42        MTK_ETHTOOL_STAT(rx_packets),
  43        MTK_ETHTOOL_STAT(rx_overflow),
  44        MTK_ETHTOOL_STAT(rx_fcs_errors),
  45        MTK_ETHTOOL_STAT(rx_short_errors),
  46        MTK_ETHTOOL_STAT(rx_long_errors),
  47        MTK_ETHTOOL_STAT(rx_checksum_errors),
  48        MTK_ETHTOOL_STAT(rx_flow_control_packets),
  49};
  50
  51static const char * const mtk_clks_source_name[] = {
  52        "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
  53        "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
  54        "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
  55        "sgmii_ck", "eth2pll",
  56};
  57
  58void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
  59{
  60        __raw_writel(val, eth->base + reg);
  61}
  62
  63u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
  64{
  65        return __raw_readl(eth->base + reg);
  66}
  67
  68static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg)
  69{
  70        u32 val;
  71
  72        val = mtk_r32(eth, reg);
  73        val &= ~mask;
  74        val |= set;
  75        mtk_w32(eth, val, reg);
  76        return reg;
  77}
  78
  79static int mtk_mdio_busy_wait(struct mtk_eth *eth)
  80{
  81        unsigned long t_start = jiffies;
  82
  83        while (1) {
  84                if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
  85                        return 0;
  86                if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
  87                        break;
  88                usleep_range(10, 20);
  89        }
  90
  91        dev_err(eth->dev, "mdio: MDIO timeout\n");
  92        return -1;
  93}
  94
  95static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
  96                           u32 phy_register, u32 write_data)
  97{
  98        if (mtk_mdio_busy_wait(eth))
  99                return -1;
 100
 101        write_data &= 0xffff;
 102
 103        mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
 104                (phy_register << PHY_IAC_REG_SHIFT) |
 105                (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
 106                MTK_PHY_IAC);
 107
 108        if (mtk_mdio_busy_wait(eth))
 109                return -1;
 110
 111        return 0;
 112}
 113
 114static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
 115{
 116        u32 d;
 117
 118        if (mtk_mdio_busy_wait(eth))
 119                return 0xffff;
 120
 121        mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
 122                (phy_reg << PHY_IAC_REG_SHIFT) |
 123                (phy_addr << PHY_IAC_ADDR_SHIFT),
 124                MTK_PHY_IAC);
 125
 126        if (mtk_mdio_busy_wait(eth))
 127                return 0xffff;
 128
 129        d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
 130
 131        return d;
 132}
 133
 134static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
 135                          int phy_reg, u16 val)
 136{
 137        struct mtk_eth *eth = bus->priv;
 138
 139        return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
 140}
 141
 142static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
 143{
 144        struct mtk_eth *eth = bus->priv;
 145
 146        return _mtk_mdio_read(eth, phy_addr, phy_reg);
 147}
 148
 149static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
 150                                     phy_interface_t interface)
 151{
 152        u32 val;
 153
 154        /* Check DDR memory type.
 155         * Currently TRGMII mode with DDR2 memory is not supported.
 156         */
 157        regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
 158        if (interface == PHY_INTERFACE_MODE_TRGMII &&
 159            val & SYSCFG_DRAM_TYPE_DDR2) {
 160                dev_err(eth->dev,
 161                        "TRGMII mode with DDR2 memory is not supported!\n");
 162                return -EOPNOTSUPP;
 163        }
 164
 165        val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
 166                ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
 167
 168        regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
 169                           ETHSYS_TRGMII_MT7621_MASK, val);
 170
 171        return 0;
 172}
 173
 174static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
 175                                   phy_interface_t interface, int speed)
 176{
 177        u32 val;
 178        int ret;
 179
 180        if (interface == PHY_INTERFACE_MODE_TRGMII) {
 181                mtk_w32(eth, TRGMII_MODE, INTF_MODE);
 182                val = 500000000;
 183                ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
 184                if (ret)
 185                        dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
 186                return;
 187        }
 188
 189        val = (speed == SPEED_1000) ?
 190                INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
 191        mtk_w32(eth, val, INTF_MODE);
 192
 193        regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
 194                           ETHSYS_TRGMII_CLK_SEL362_5,
 195                           ETHSYS_TRGMII_CLK_SEL362_5);
 196
 197        val = (speed == SPEED_1000) ? 250000000 : 500000000;
 198        ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
 199        if (ret)
 200                dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
 201
 202        val = (speed == SPEED_1000) ?
 203                RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
 204        mtk_w32(eth, val, TRGMII_RCK_CTRL);
 205
 206        val = (speed == SPEED_1000) ?
 207                TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
 208        mtk_w32(eth, val, TRGMII_TCK_CTRL);
 209}
 210
 211static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
 212                           const struct phylink_link_state *state)
 213{
 214        struct mtk_mac *mac = container_of(config, struct mtk_mac,
 215                                           phylink_config);
 216        struct mtk_eth *eth = mac->hw;
 217        u32 mcr_cur, mcr_new, sid, i;
 218        int val, ge_mode, err;
 219
 220        /* MT76x8 has no hardware settings between for the MAC */
 221        if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
 222            mac->interface != state->interface) {
 223                /* Setup soc pin functions */
 224                switch (state->interface) {
 225                case PHY_INTERFACE_MODE_TRGMII:
 226                        if (mac->id)
 227                                goto err_phy;
 228                        if (!MTK_HAS_CAPS(mac->hw->soc->caps,
 229                                          MTK_GMAC1_TRGMII))
 230                                goto err_phy;
 231                        fallthrough;
 232                case PHY_INTERFACE_MODE_RGMII_TXID:
 233                case PHY_INTERFACE_MODE_RGMII_RXID:
 234                case PHY_INTERFACE_MODE_RGMII_ID:
 235                case PHY_INTERFACE_MODE_RGMII:
 236                case PHY_INTERFACE_MODE_MII:
 237                case PHY_INTERFACE_MODE_REVMII:
 238                case PHY_INTERFACE_MODE_RMII:
 239                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
 240                                err = mtk_gmac_rgmii_path_setup(eth, mac->id);
 241                                if (err)
 242                                        goto init_err;
 243                        }
 244                        break;
 245                case PHY_INTERFACE_MODE_1000BASEX:
 246                case PHY_INTERFACE_MODE_2500BASEX:
 247                case PHY_INTERFACE_MODE_SGMII:
 248                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
 249                                err = mtk_gmac_sgmii_path_setup(eth, mac->id);
 250                                if (err)
 251                                        goto init_err;
 252                        }
 253                        break;
 254                case PHY_INTERFACE_MODE_GMII:
 255                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
 256                                err = mtk_gmac_gephy_path_setup(eth, mac->id);
 257                                if (err)
 258                                        goto init_err;
 259                        }
 260                        break;
 261                default:
 262                        goto err_phy;
 263                }
 264
 265                /* Setup clock for 1st gmac */
 266                if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
 267                    !phy_interface_mode_is_8023z(state->interface) &&
 268                    MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
 269                        if (MTK_HAS_CAPS(mac->hw->soc->caps,
 270                                         MTK_TRGMII_MT7621_CLK)) {
 271                                if (mt7621_gmac0_rgmii_adjust(mac->hw,
 272                                                              state->interface))
 273                                        goto err_phy;
 274                        } else {
 275                                mtk_gmac0_rgmii_adjust(mac->hw,
 276                                                       state->interface,
 277                                                       state->speed);
 278
 279                                /* mt7623_pad_clk_setup */
 280                                for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
 281                                        mtk_w32(mac->hw,
 282                                                TD_DM_DRVP(8) | TD_DM_DRVN(8),
 283                                                TRGMII_TD_ODT(i));
 284
 285                                /* Assert/release MT7623 RXC reset */
 286                                mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
 287                                        TRGMII_RCK_CTRL);
 288                                mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
 289                        }
 290                }
 291
 292                ge_mode = 0;
 293                switch (state->interface) {
 294                case PHY_INTERFACE_MODE_MII:
 295                case PHY_INTERFACE_MODE_GMII:
 296                        ge_mode = 1;
 297                        break;
 298                case PHY_INTERFACE_MODE_REVMII:
 299                        ge_mode = 2;
 300                        break;
 301                case PHY_INTERFACE_MODE_RMII:
 302                        if (mac->id)
 303                                goto err_phy;
 304                        ge_mode = 3;
 305                        break;
 306                default:
 307                        break;
 308                }
 309
 310                /* put the gmac into the right mode */
 311                regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
 312                val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
 313                val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
 314                regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
 315
 316                mac->interface = state->interface;
 317        }
 318
 319        /* SGMII */
 320        if (state->interface == PHY_INTERFACE_MODE_SGMII ||
 321            phy_interface_mode_is_8023z(state->interface)) {
 322                /* The path GMAC to SGMII will be enabled once the SGMIISYS is
 323                 * being setup done.
 324                 */
 325                regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
 326
 327                regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
 328                                   SYSCFG0_SGMII_MASK,
 329                                   ~(u32)SYSCFG0_SGMII_MASK);
 330
 331                /* Decide how GMAC and SGMIISYS be mapped */
 332                sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
 333                       0 : mac->id;
 334
 335                /* Setup SGMIISYS with the determined property */
 336                if (state->interface != PHY_INTERFACE_MODE_SGMII)
 337                        err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
 338                                                         state);
 339                else if (phylink_autoneg_inband(mode))
 340                        err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
 341
 342                if (err)
 343                        goto init_err;
 344
 345                regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
 346                                   SYSCFG0_SGMII_MASK, val);
 347        } else if (phylink_autoneg_inband(mode)) {
 348                dev_err(eth->dev,
 349                        "In-band mode not supported in non SGMII mode!\n");
 350                return;
 351        }
 352
 353        /* Setup gmac */
 354        mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
 355        mcr_new = mcr_cur;
 356        mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
 357                   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
 358
 359        /* Only update control register when needed! */
 360        if (mcr_new != mcr_cur)
 361                mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
 362
 363        return;
 364
 365err_phy:
 366        dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
 367                mac->id, phy_modes(state->interface));
 368        return;
 369
 370init_err:
 371        dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
 372                mac->id, phy_modes(state->interface), err);
 373}
 374
 375static void mtk_mac_pcs_get_state(struct phylink_config *config,
 376                                  struct phylink_link_state *state)
 377{
 378        struct mtk_mac *mac = container_of(config, struct mtk_mac,
 379                                           phylink_config);
 380        u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id));
 381
 382        state->link = (pmsr & MAC_MSR_LINK);
 383        state->duplex = (pmsr & MAC_MSR_DPX) >> 1;
 384
 385        switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) {
 386        case 0:
 387                state->speed = SPEED_10;
 388                break;
 389        case MAC_MSR_SPEED_100:
 390                state->speed = SPEED_100;
 391                break;
 392        case MAC_MSR_SPEED_1000:
 393                state->speed = SPEED_1000;
 394                break;
 395        default:
 396                state->speed = SPEED_UNKNOWN;
 397                break;
 398        }
 399
 400        state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX);
 401        if (pmsr & MAC_MSR_RX_FC)
 402                state->pause |= MLO_PAUSE_RX;
 403        if (pmsr & MAC_MSR_TX_FC)
 404                state->pause |= MLO_PAUSE_TX;
 405}
 406
 407static void mtk_mac_an_restart(struct phylink_config *config)
 408{
 409        struct mtk_mac *mac = container_of(config, struct mtk_mac,
 410                                           phylink_config);
 411
 412        mtk_sgmii_restart_an(mac->hw, mac->id);
 413}
 414
 415static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
 416                              phy_interface_t interface)
 417{
 418        struct mtk_mac *mac = container_of(config, struct mtk_mac,
 419                                           phylink_config);
 420        u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
 421
 422        mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
 423        mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 424}
 425
 426static void mtk_mac_link_up(struct phylink_config *config,
 427                            struct phy_device *phy,
 428                            unsigned int mode, phy_interface_t interface,
 429                            int speed, int duplex, bool tx_pause, bool rx_pause)
 430{
 431        struct mtk_mac *mac = container_of(config, struct mtk_mac,
 432                                           phylink_config);
 433        u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
 434
 435        mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
 436                 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
 437                 MAC_MCR_FORCE_RX_FC);
 438
 439        /* Configure speed */
 440        switch (speed) {
 441        case SPEED_2500:
 442        case SPEED_1000:
 443                mcr |= MAC_MCR_SPEED_1000;
 444                break;
 445        case SPEED_100:
 446                mcr |= MAC_MCR_SPEED_100;
 447                break;
 448        }
 449
 450        /* Configure duplex */
 451        if (duplex == DUPLEX_FULL)
 452                mcr |= MAC_MCR_FORCE_DPX;
 453
 454        /* Configure pause modes - phylink will avoid these for half duplex */
 455        if (tx_pause)
 456                mcr |= MAC_MCR_FORCE_TX_FC;
 457        if (rx_pause)
 458                mcr |= MAC_MCR_FORCE_RX_FC;
 459
 460        mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
 461        mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 462}
 463
 464static void mtk_validate(struct phylink_config *config,
 465                         unsigned long *supported,
 466                         struct phylink_link_state *state)
 467{
 468        struct mtk_mac *mac = container_of(config, struct mtk_mac,
 469                                           phylink_config);
 470        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 471
 472        if (state->interface != PHY_INTERFACE_MODE_NA &&
 473            state->interface != PHY_INTERFACE_MODE_MII &&
 474            state->interface != PHY_INTERFACE_MODE_GMII &&
 475            !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
 476              phy_interface_mode_is_rgmii(state->interface)) &&
 477            !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
 478              !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
 479            !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
 480              (state->interface == PHY_INTERFACE_MODE_SGMII ||
 481               phy_interface_mode_is_8023z(state->interface)))) {
 482                linkmode_zero(supported);
 483                return;
 484        }
 485
 486        phylink_set_port_modes(mask);
 487        phylink_set(mask, Autoneg);
 488
 489        switch (state->interface) {
 490        case PHY_INTERFACE_MODE_TRGMII:
 491                phylink_set(mask, 1000baseT_Full);
 492                break;
 493        case PHY_INTERFACE_MODE_1000BASEX:
 494        case PHY_INTERFACE_MODE_2500BASEX:
 495                phylink_set(mask, 1000baseX_Full);
 496                phylink_set(mask, 2500baseX_Full);
 497                break;
 498        case PHY_INTERFACE_MODE_GMII:
 499        case PHY_INTERFACE_MODE_RGMII:
 500        case PHY_INTERFACE_MODE_RGMII_ID:
 501        case PHY_INTERFACE_MODE_RGMII_RXID:
 502        case PHY_INTERFACE_MODE_RGMII_TXID:
 503                phylink_set(mask, 1000baseT_Half);
 504                fallthrough;
 505        case PHY_INTERFACE_MODE_SGMII:
 506                phylink_set(mask, 1000baseT_Full);
 507                phylink_set(mask, 1000baseX_Full);
 508                fallthrough;
 509        case PHY_INTERFACE_MODE_MII:
 510        case PHY_INTERFACE_MODE_RMII:
 511        case PHY_INTERFACE_MODE_REVMII:
 512        case PHY_INTERFACE_MODE_NA:
 513        default:
 514                phylink_set(mask, 10baseT_Half);
 515                phylink_set(mask, 10baseT_Full);
 516                phylink_set(mask, 100baseT_Half);
 517                phylink_set(mask, 100baseT_Full);
 518                break;
 519        }
 520
 521        if (state->interface == PHY_INTERFACE_MODE_NA) {
 522                if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
 523                        phylink_set(mask, 1000baseT_Full);
 524                        phylink_set(mask, 1000baseX_Full);
 525                        phylink_set(mask, 2500baseX_Full);
 526                }
 527                if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
 528                        phylink_set(mask, 1000baseT_Full);
 529                        phylink_set(mask, 1000baseT_Half);
 530                        phylink_set(mask, 1000baseX_Full);
 531                }
 532                if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
 533                        phylink_set(mask, 1000baseT_Full);
 534                        phylink_set(mask, 1000baseT_Half);
 535                }
 536        }
 537
 538        phylink_set(mask, Pause);
 539        phylink_set(mask, Asym_Pause);
 540
 541        linkmode_and(supported, supported, mask);
 542        linkmode_and(state->advertising, state->advertising, mask);
 543
 544        /* We can only operate at 2500BaseX or 1000BaseX. If requested
 545         * to advertise both, only report advertising at 2500BaseX.
 546         */
 547        phylink_helper_basex_speed(state);
 548}
 549
 550static const struct phylink_mac_ops mtk_phylink_ops = {
 551        .validate = mtk_validate,
 552        .mac_pcs_get_state = mtk_mac_pcs_get_state,
 553        .mac_an_restart = mtk_mac_an_restart,
 554        .mac_config = mtk_mac_config,
 555        .mac_link_down = mtk_mac_link_down,
 556        .mac_link_up = mtk_mac_link_up,
 557};
 558
 559static int mtk_mdio_init(struct mtk_eth *eth)
 560{
 561        struct device_node *mii_np;
 562        int ret;
 563
 564        mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
 565        if (!mii_np) {
 566                dev_err(eth->dev, "no %s child node found", "mdio-bus");
 567                return -ENODEV;
 568        }
 569
 570        if (!of_device_is_available(mii_np)) {
 571                ret = -ENODEV;
 572                goto err_put_node;
 573        }
 574
 575        eth->mii_bus = devm_mdiobus_alloc(eth->dev);
 576        if (!eth->mii_bus) {
 577                ret = -ENOMEM;
 578                goto err_put_node;
 579        }
 580
 581        eth->mii_bus->name = "mdio";
 582        eth->mii_bus->read = mtk_mdio_read;
 583        eth->mii_bus->write = mtk_mdio_write;
 584        eth->mii_bus->priv = eth;
 585        eth->mii_bus->parent = eth->dev;
 586
 587        snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
 588        ret = of_mdiobus_register(eth->mii_bus, mii_np);
 589
 590err_put_node:
 591        of_node_put(mii_np);
 592        return ret;
 593}
 594
 595static void mtk_mdio_cleanup(struct mtk_eth *eth)
 596{
 597        if (!eth->mii_bus)
 598                return;
 599
 600        mdiobus_unregister(eth->mii_bus);
 601}
 602
 603static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
 604{
 605        unsigned long flags;
 606        u32 val;
 607
 608        spin_lock_irqsave(&eth->tx_irq_lock, flags);
 609        val = mtk_r32(eth, eth->tx_int_mask_reg);
 610        mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
 611        spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
 612}
 613
 614static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
 615{
 616        unsigned long flags;
 617        u32 val;
 618
 619        spin_lock_irqsave(&eth->tx_irq_lock, flags);
 620        val = mtk_r32(eth, eth->tx_int_mask_reg);
 621        mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
 622        spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
 623}
 624
 625static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
 626{
 627        unsigned long flags;
 628        u32 val;
 629
 630        spin_lock_irqsave(&eth->rx_irq_lock, flags);
 631        val = mtk_r32(eth, MTK_PDMA_INT_MASK);
 632        mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
 633        spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
 634}
 635
 636static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
 637{
 638        unsigned long flags;
 639        u32 val;
 640
 641        spin_lock_irqsave(&eth->rx_irq_lock, flags);
 642        val = mtk_r32(eth, MTK_PDMA_INT_MASK);
 643        mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
 644        spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
 645}
 646
 647static int mtk_set_mac_address(struct net_device *dev, void *p)
 648{
 649        int ret = eth_mac_addr(dev, p);
 650        struct mtk_mac *mac = netdev_priv(dev);
 651        struct mtk_eth *eth = mac->hw;
 652        const char *macaddr = dev->dev_addr;
 653
 654        if (ret)
 655                return ret;
 656
 657        if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
 658                return -EBUSY;
 659
 660        spin_lock_bh(&mac->hw->page_lock);
 661        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
 662                mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
 663                        MT7628_SDM_MAC_ADRH);
 664                mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
 665                        (macaddr[4] << 8) | macaddr[5],
 666                        MT7628_SDM_MAC_ADRL);
 667        } else {
 668                mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
 669                        MTK_GDMA_MAC_ADRH(mac->id));
 670                mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
 671                        (macaddr[4] << 8) | macaddr[5],
 672                        MTK_GDMA_MAC_ADRL(mac->id));
 673        }
 674        spin_unlock_bh(&mac->hw->page_lock);
 675
 676        return 0;
 677}
 678
 679void mtk_stats_update_mac(struct mtk_mac *mac)
 680{
 681        struct mtk_hw_stats *hw_stats = mac->hw_stats;
 682        unsigned int base = MTK_GDM1_TX_GBCNT;
 683        u64 stats;
 684
 685        base += hw_stats->reg_offset;
 686
 687        u64_stats_update_begin(&hw_stats->syncp);
 688
 689        hw_stats->rx_bytes += mtk_r32(mac->hw, base);
 690        stats =  mtk_r32(mac->hw, base + 0x04);
 691        if (stats)
 692                hw_stats->rx_bytes += (stats << 32);
 693        hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
 694        hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
 695        hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
 696        hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
 697        hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
 698        hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
 699        hw_stats->rx_flow_control_packets +=
 700                                        mtk_r32(mac->hw, base + 0x24);
 701        hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
 702        hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
 703        hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
 704        stats =  mtk_r32(mac->hw, base + 0x34);
 705        if (stats)
 706                hw_stats->tx_bytes += (stats << 32);
 707        hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
 708        u64_stats_update_end(&hw_stats->syncp);
 709}
 710
 711static void mtk_stats_update(struct mtk_eth *eth)
 712{
 713        int i;
 714
 715        for (i = 0; i < MTK_MAC_COUNT; i++) {
 716                if (!eth->mac[i] || !eth->mac[i]->hw_stats)
 717                        continue;
 718                if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
 719                        mtk_stats_update_mac(eth->mac[i]);
 720                        spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
 721                }
 722        }
 723}
 724
 725static void mtk_get_stats64(struct net_device *dev,
 726                            struct rtnl_link_stats64 *storage)
 727{
 728        struct mtk_mac *mac = netdev_priv(dev);
 729        struct mtk_hw_stats *hw_stats = mac->hw_stats;
 730        unsigned int start;
 731
 732        if (netif_running(dev) && netif_device_present(dev)) {
 733                if (spin_trylock_bh(&hw_stats->stats_lock)) {
 734                        mtk_stats_update_mac(mac);
 735                        spin_unlock_bh(&hw_stats->stats_lock);
 736                }
 737        }
 738
 739        do {
 740                start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
 741                storage->rx_packets = hw_stats->rx_packets;
 742                storage->tx_packets = hw_stats->tx_packets;
 743                storage->rx_bytes = hw_stats->rx_bytes;
 744                storage->tx_bytes = hw_stats->tx_bytes;
 745                storage->collisions = hw_stats->tx_collisions;
 746                storage->rx_length_errors = hw_stats->rx_short_errors +
 747                        hw_stats->rx_long_errors;
 748                storage->rx_over_errors = hw_stats->rx_overflow;
 749                storage->rx_crc_errors = hw_stats->rx_fcs_errors;
 750                storage->rx_errors = hw_stats->rx_checksum_errors;
 751                storage->tx_aborted_errors = hw_stats->tx_skip;
 752        } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
 753
 754        storage->tx_errors = dev->stats.tx_errors;
 755        storage->rx_dropped = dev->stats.rx_dropped;
 756        storage->tx_dropped = dev->stats.tx_dropped;
 757}
 758
 759static inline int mtk_max_frag_size(int mtu)
 760{
 761        /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
 762        if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
 763                mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
 764
 765        return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
 766                SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 767}
 768
 769static inline int mtk_max_buf_size(int frag_size)
 770{
 771        int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
 772                       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 773
 774        WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
 775
 776        return buf_size;
 777}
 778
 779static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
 780                                   struct mtk_rx_dma *dma_rxd)
 781{
 782        rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
 783        rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
 784        rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
 785        rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
 786}
 787
 788/* the qdma core needs scratch memory to be setup */
 789static int mtk_init_fq_dma(struct mtk_eth *eth)
 790{
 791        dma_addr_t phy_ring_tail;
 792        int cnt = MTK_DMA_SIZE;
 793        dma_addr_t dma_addr;
 794        int i;
 795
 796        eth->scratch_ring = dma_alloc_coherent(eth->dev,
 797                                               cnt * sizeof(struct mtk_tx_dma),
 798                                               &eth->phy_scratch_ring,
 799                                               GFP_ATOMIC);
 800        if (unlikely(!eth->scratch_ring))
 801                return -ENOMEM;
 802
 803        eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
 804                                    GFP_KERNEL);
 805        if (unlikely(!eth->scratch_head))
 806                return -ENOMEM;
 807
 808        dma_addr = dma_map_single(eth->dev,
 809                                  eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
 810                                  DMA_FROM_DEVICE);
 811        if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
 812                return -ENOMEM;
 813
 814        phy_ring_tail = eth->phy_scratch_ring +
 815                        (sizeof(struct mtk_tx_dma) * (cnt - 1));
 816
 817        for (i = 0; i < cnt; i++) {
 818                eth->scratch_ring[i].txd1 =
 819                                        (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
 820                if (i < cnt - 1)
 821                        eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
 822                                ((i + 1) * sizeof(struct mtk_tx_dma)));
 823                eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
 824        }
 825
 826        mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
 827        mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
 828        mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
 829        mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
 830
 831        return 0;
 832}
 833
 834static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
 835{
 836        void *ret = ring->dma;
 837
 838        return ret + (desc - ring->phys);
 839}
 840
 841static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
 842                                                    struct mtk_tx_dma *txd)
 843{
 844        int idx = txd - ring->dma;
 845
 846        return &ring->buf[idx];
 847}
 848
 849static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
 850                                       struct mtk_tx_dma *dma)
 851{
 852        return ring->dma_pdma - ring->dma + dma;
 853}
 854
 855static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
 856{
 857        return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
 858}
 859
 860static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
 861{
 862        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
 863                if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
 864                        dma_unmap_single(eth->dev,
 865                                         dma_unmap_addr(tx_buf, dma_addr0),
 866                                         dma_unmap_len(tx_buf, dma_len0),
 867                                         DMA_TO_DEVICE);
 868                } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
 869                        dma_unmap_page(eth->dev,
 870                                       dma_unmap_addr(tx_buf, dma_addr0),
 871                                       dma_unmap_len(tx_buf, dma_len0),
 872                                       DMA_TO_DEVICE);
 873                }
 874        } else {
 875                if (dma_unmap_len(tx_buf, dma_len0)) {
 876                        dma_unmap_page(eth->dev,
 877                                       dma_unmap_addr(tx_buf, dma_addr0),
 878                                       dma_unmap_len(tx_buf, dma_len0),
 879                                       DMA_TO_DEVICE);
 880                }
 881
 882                if (dma_unmap_len(tx_buf, dma_len1)) {
 883                        dma_unmap_page(eth->dev,
 884                                       dma_unmap_addr(tx_buf, dma_addr1),
 885                                       dma_unmap_len(tx_buf, dma_len1),
 886                                       DMA_TO_DEVICE);
 887                }
 888        }
 889
 890        tx_buf->flags = 0;
 891        if (tx_buf->skb &&
 892            (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
 893                dev_kfree_skb_any(tx_buf->skb);
 894        tx_buf->skb = NULL;
 895}
 896
 897static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
 898                         struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
 899                         size_t size, int idx)
 900{
 901        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
 902                dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
 903                dma_unmap_len_set(tx_buf, dma_len0, size);
 904        } else {
 905                if (idx & 1) {
 906                        txd->txd3 = mapped_addr;
 907                        txd->txd2 |= TX_DMA_PLEN1(size);
 908                        dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
 909                        dma_unmap_len_set(tx_buf, dma_len1, size);
 910                } else {
 911                        tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
 912                        txd->txd1 = mapped_addr;
 913                        txd->txd2 = TX_DMA_PLEN0(size);
 914                        dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
 915                        dma_unmap_len_set(tx_buf, dma_len0, size);
 916                }
 917        }
 918}
 919
 920static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
 921                      int tx_num, struct mtk_tx_ring *ring, bool gso)
 922{
 923        struct mtk_mac *mac = netdev_priv(dev);
 924        struct mtk_eth *eth = mac->hw;
 925        struct mtk_tx_dma *itxd, *txd;
 926        struct mtk_tx_dma *itxd_pdma, *txd_pdma;
 927        struct mtk_tx_buf *itx_buf, *tx_buf;
 928        dma_addr_t mapped_addr;
 929        unsigned int nr_frags;
 930        int i, n_desc = 1;
 931        u32 txd4 = 0, fport;
 932        int k = 0;
 933
 934        itxd = ring->next_free;
 935        itxd_pdma = qdma_to_pdma(ring, itxd);
 936        if (itxd == ring->last_free)
 937                return -ENOMEM;
 938
 939        /* set the forward port */
 940        fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
 941        txd4 |= fport;
 942
 943        itx_buf = mtk_desc_to_tx_buf(ring, itxd);
 944        memset(itx_buf, 0, sizeof(*itx_buf));
 945
 946        if (gso)
 947                txd4 |= TX_DMA_TSO;
 948
 949        /* TX Checksum offload */
 950        if (skb->ip_summed == CHECKSUM_PARTIAL)
 951                txd4 |= TX_DMA_CHKSUM;
 952
 953        /* VLAN header offload */
 954        if (skb_vlan_tag_present(skb))
 955                txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
 956
 957        mapped_addr = dma_map_single(eth->dev, skb->data,
 958                                     skb_headlen(skb), DMA_TO_DEVICE);
 959        if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
 960                return -ENOMEM;
 961
 962        WRITE_ONCE(itxd->txd1, mapped_addr);
 963        itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
 964        itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
 965                          MTK_TX_FLAGS_FPORT1;
 966        setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
 967                     k++);
 968
 969        /* TX SG offload */
 970        txd = itxd;
 971        txd_pdma = qdma_to_pdma(ring, txd);
 972        nr_frags = skb_shinfo(skb)->nr_frags;
 973
 974        for (i = 0; i < nr_frags; i++) {
 975                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 976                unsigned int offset = 0;
 977                int frag_size = skb_frag_size(frag);
 978
 979                while (frag_size) {
 980                        bool last_frag = false;
 981                        unsigned int frag_map_size;
 982                        bool new_desc = true;
 983
 984                        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
 985                            (i & 0x1)) {
 986                                txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
 987                                txd_pdma = qdma_to_pdma(ring, txd);
 988                                if (txd == ring->last_free)
 989                                        goto err_dma;
 990
 991                                n_desc++;
 992                        } else {
 993                                new_desc = false;
 994                        }
 995
 996
 997                        frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
 998                        mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
 999                                                       frag_map_size,
1000                                                       DMA_TO_DEVICE);
1001                        if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
1002                                goto err_dma;
1003
1004                        if (i == nr_frags - 1 &&
1005                            (frag_size - frag_map_size) == 0)
1006                                last_frag = true;
1007
1008                        WRITE_ONCE(txd->txd1, mapped_addr);
1009                        WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
1010                                               TX_DMA_PLEN0(frag_map_size) |
1011                                               last_frag * TX_DMA_LS0));
1012                        WRITE_ONCE(txd->txd4, fport);
1013
1014                        tx_buf = mtk_desc_to_tx_buf(ring, txd);
1015                        if (new_desc)
1016                                memset(tx_buf, 0, sizeof(*tx_buf));
1017                        tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
1018                        tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
1019                        tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
1020                                         MTK_TX_FLAGS_FPORT1;
1021
1022                        setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
1023                                     frag_map_size, k++);
1024
1025                        frag_size -= frag_map_size;
1026                        offset += frag_map_size;
1027                }
1028        }
1029
1030        /* store skb to cleanup */
1031        itx_buf->skb = skb;
1032
1033        WRITE_ONCE(itxd->txd4, txd4);
1034        WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
1035                                (!nr_frags * TX_DMA_LS0)));
1036        if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1037                if (k & 0x1)
1038                        txd_pdma->txd2 |= TX_DMA_LS0;
1039                else
1040                        txd_pdma->txd2 |= TX_DMA_LS1;
1041        }
1042
1043        netdev_sent_queue(dev, skb->len);
1044        skb_tx_timestamp(skb);
1045
1046        ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
1047        atomic_sub(n_desc, &ring->free_count);
1048
1049        /* make sure that all changes to the dma ring are flushed before we
1050         * continue
1051         */
1052        wmb();
1053
1054        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1055                if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
1056                    !netdev_xmit_more())
1057                        mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
1058        } else {
1059                int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
1060                                             ring->dma_size);
1061                mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
1062        }
1063
1064        return 0;
1065
1066err_dma:
1067        do {
1068                tx_buf = mtk_desc_to_tx_buf(ring, itxd);
1069
1070                /* unmap dma */
1071                mtk_tx_unmap(eth, tx_buf);
1072
1073                itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1074                if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1075                        itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
1076
1077                itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
1078                itxd_pdma = qdma_to_pdma(ring, itxd);
1079        } while (itxd != txd);
1080
1081        return -ENOMEM;
1082}
1083
1084static inline int mtk_cal_txd_req(struct sk_buff *skb)
1085{
1086        int i, nfrags;
1087        skb_frag_t *frag;
1088
1089        nfrags = 1;
1090        if (skb_is_gso(skb)) {
1091                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1092                        frag = &skb_shinfo(skb)->frags[i];
1093                        nfrags += DIV_ROUND_UP(skb_frag_size(frag),
1094                                                MTK_TX_DMA_BUF_LEN);
1095                }
1096        } else {
1097                nfrags += skb_shinfo(skb)->nr_frags;
1098        }
1099
1100        return nfrags;
1101}
1102
1103static int mtk_queue_stopped(struct mtk_eth *eth)
1104{
1105        int i;
1106
1107        for (i = 0; i < MTK_MAC_COUNT; i++) {
1108                if (!eth->netdev[i])
1109                        continue;
1110                if (netif_queue_stopped(eth->netdev[i]))
1111                        return 1;
1112        }
1113
1114        return 0;
1115}
1116
1117static void mtk_wake_queue(struct mtk_eth *eth)
1118{
1119        int i;
1120
1121        for (i = 0; i < MTK_MAC_COUNT; i++) {
1122                if (!eth->netdev[i])
1123                        continue;
1124                netif_wake_queue(eth->netdev[i]);
1125        }
1126}
1127
1128static void mtk_stop_queue(struct mtk_eth *eth)
1129{
1130        int i;
1131
1132        for (i = 0; i < MTK_MAC_COUNT; i++) {
1133                if (!eth->netdev[i])
1134                        continue;
1135                netif_stop_queue(eth->netdev[i]);
1136        }
1137}
1138
1139static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
1140{
1141        struct mtk_mac *mac = netdev_priv(dev);
1142        struct mtk_eth *eth = mac->hw;
1143        struct mtk_tx_ring *ring = &eth->tx_ring;
1144        struct net_device_stats *stats = &dev->stats;
1145        bool gso = false;
1146        int tx_num;
1147
1148        /* normally we can rely on the stack not calling this more than once,
1149         * however we have 2 queues running on the same ring so we need to lock
1150         * the ring access
1151         */
1152        spin_lock(&eth->page_lock);
1153
1154        if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1155                goto drop;
1156
1157        tx_num = mtk_cal_txd_req(skb);
1158        if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
1159                mtk_stop_queue(eth);
1160                netif_err(eth, tx_queued, dev,
1161                          "Tx Ring full when queue awake!\n");
1162                spin_unlock(&eth->page_lock);
1163                return NETDEV_TX_BUSY;
1164        }
1165
1166        /* TSO: fill MSS info in tcp checksum field */
1167        if (skb_is_gso(skb)) {
1168                if (skb_cow_head(skb, 0)) {
1169                        netif_warn(eth, tx_err, dev,
1170                                   "GSO expand head fail.\n");
1171                        goto drop;
1172                }
1173
1174                if (skb_shinfo(skb)->gso_type &
1175                                (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1176                        gso = true;
1177                        tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1178                }
1179        }
1180
1181        if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
1182                goto drop;
1183
1184        if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
1185                mtk_stop_queue(eth);
1186
1187        spin_unlock(&eth->page_lock);
1188
1189        return NETDEV_TX_OK;
1190
1191drop:
1192        spin_unlock(&eth->page_lock);
1193        stats->tx_dropped++;
1194        dev_kfree_skb_any(skb);
1195        return NETDEV_TX_OK;
1196}
1197
1198static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
1199{
1200        int i;
1201        struct mtk_rx_ring *ring;
1202        int idx;
1203
1204        if (!eth->hwlro)
1205                return &eth->rx_ring[0];
1206
1207        for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1208                ring = &eth->rx_ring[i];
1209                idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1210                if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
1211                        ring->calc_idx_update = true;
1212                        return ring;
1213                }
1214        }
1215
1216        return NULL;
1217}
1218
1219static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
1220{
1221        struct mtk_rx_ring *ring;
1222        int i;
1223
1224        if (!eth->hwlro) {
1225                ring = &eth->rx_ring[0];
1226                mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1227        } else {
1228                for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
1229                        ring = &eth->rx_ring[i];
1230                        if (ring->calc_idx_update) {
1231                                ring->calc_idx_update = false;
1232                                mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
1233                        }
1234                }
1235        }
1236}
1237
1238static int mtk_poll_rx(struct napi_struct *napi, int budget,
1239                       struct mtk_eth *eth)
1240{
1241        struct mtk_rx_ring *ring;
1242        int idx;
1243        struct sk_buff *skb;
1244        u8 *data, *new_data;
1245        struct mtk_rx_dma *rxd, trxd;
1246        int done = 0;
1247
1248        while (done < budget) {
1249                struct net_device *netdev;
1250                unsigned int pktlen;
1251                dma_addr_t dma_addr;
1252                int mac;
1253
1254                ring = mtk_get_rx_ring(eth);
1255                if (unlikely(!ring))
1256                        goto rx_done;
1257
1258                idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
1259                rxd = &ring->dma[idx];
1260                data = ring->data[idx];
1261
1262                mtk_rx_get_desc(&trxd, rxd);
1263                if (!(trxd.rxd2 & RX_DMA_DONE))
1264                        break;
1265
1266                /* find out which mac the packet come from. values start at 1 */
1267                if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
1268                        mac = 0;
1269                } else {
1270                        mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1271                                RX_DMA_FPORT_MASK;
1272                        mac--;
1273                }
1274
1275                if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
1276                             !eth->netdev[mac]))
1277                        goto release_desc;
1278
1279                netdev = eth->netdev[mac];
1280
1281                if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
1282                        goto release_desc;
1283
1284                /* alloc new buffer */
1285                new_data = napi_alloc_frag(ring->frag_size);
1286                if (unlikely(!new_data)) {
1287                        netdev->stats.rx_dropped++;
1288                        goto release_desc;
1289                }
1290                dma_addr = dma_map_single(eth->dev,
1291                                          new_data + NET_SKB_PAD +
1292                                          eth->ip_align,
1293                                          ring->buf_size,
1294                                          DMA_FROM_DEVICE);
1295                if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1296                        skb_free_frag(new_data);
1297                        netdev->stats.rx_dropped++;
1298                        goto release_desc;
1299                }
1300
1301                /* receive data */
1302                skb = build_skb(data, ring->frag_size);
1303                if (unlikely(!skb)) {
1304                        skb_free_frag(new_data);
1305                        netdev->stats.rx_dropped++;
1306                        goto release_desc;
1307                }
1308                skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1309
1310                dma_unmap_single(eth->dev, trxd.rxd1,
1311                                 ring->buf_size, DMA_FROM_DEVICE);
1312                pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1313                skb->dev = netdev;
1314                skb_put(skb, pktlen);
1315                if (trxd.rxd4 & eth->rx_dma_l4_valid)
1316                        skb->ip_summed = CHECKSUM_UNNECESSARY;
1317                else
1318                        skb_checksum_none_assert(skb);
1319                skb->protocol = eth_type_trans(skb, netdev);
1320
1321                if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1322                    RX_DMA_VID(trxd.rxd3))
1323                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1324                                               RX_DMA_VID(trxd.rxd3));
1325                skb_record_rx_queue(skb, 0);
1326                napi_gro_receive(napi, skb);
1327
1328                ring->data[idx] = new_data;
1329                rxd->rxd1 = (unsigned int)dma_addr;
1330
1331release_desc:
1332                if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1333                        rxd->rxd2 = RX_DMA_LSO;
1334                else
1335                        rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1336
1337                ring->calc_idx = idx;
1338
1339                done++;
1340        }
1341
1342rx_done:
1343        if (done) {
1344                /* make sure that all changes to the dma ring are flushed before
1345                 * we continue
1346                 */
1347                wmb();
1348                mtk_update_rx_cpu_idx(eth);
1349        }
1350
1351        return done;
1352}
1353
1354static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
1355                            unsigned int *done, unsigned int *bytes)
1356{
1357        struct mtk_tx_ring *ring = &eth->tx_ring;
1358        struct mtk_tx_dma *desc;
1359        struct sk_buff *skb;
1360        struct mtk_tx_buf *tx_buf;
1361        u32 cpu, dma;
1362
1363        cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1364        dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1365
1366        desc = mtk_qdma_phys_to_virt(ring, cpu);
1367
1368        while ((cpu != dma) && budget) {
1369                u32 next_cpu = desc->txd2;
1370                int mac = 0;
1371
1372                desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1373                if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1374                        break;
1375
1376                tx_buf = mtk_desc_to_tx_buf(ring, desc);
1377                if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1378                        mac = 1;
1379
1380                skb = tx_buf->skb;
1381                if (!skb)
1382                        break;
1383
1384                if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1385                        bytes[mac] += skb->len;
1386                        done[mac]++;
1387                        budget--;
1388                }
1389                mtk_tx_unmap(eth, tx_buf);
1390
1391                ring->last_free = desc;
1392                atomic_inc(&ring->free_count);
1393
1394                cpu = next_cpu;
1395        }
1396
1397        mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1398
1399        return budget;
1400}
1401
1402static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
1403                            unsigned int *done, unsigned int *bytes)
1404{
1405        struct mtk_tx_ring *ring = &eth->tx_ring;
1406        struct mtk_tx_dma *desc;
1407        struct sk_buff *skb;
1408        struct mtk_tx_buf *tx_buf;
1409        u32 cpu, dma;
1410
1411        cpu = ring->cpu_idx;
1412        dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
1413
1414        while ((cpu != dma) && budget) {
1415                tx_buf = &ring->buf[cpu];
1416                skb = tx_buf->skb;
1417                if (!skb)
1418                        break;
1419
1420                if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1421                        bytes[0] += skb->len;
1422                        done[0]++;
1423                        budget--;
1424                }
1425
1426                mtk_tx_unmap(eth, tx_buf);
1427
1428                desc = &ring->dma[cpu];
1429                ring->last_free = desc;
1430                atomic_inc(&ring->free_count);
1431
1432                cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
1433        }
1434
1435        ring->cpu_idx = cpu;
1436
1437        return budget;
1438}
1439
1440static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1441{
1442        struct mtk_tx_ring *ring = &eth->tx_ring;
1443        unsigned int done[MTK_MAX_DEVS];
1444        unsigned int bytes[MTK_MAX_DEVS];
1445        int total = 0, i;
1446
1447        memset(done, 0, sizeof(done));
1448        memset(bytes, 0, sizeof(bytes));
1449
1450        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1451                budget = mtk_poll_tx_qdma(eth, budget, done, bytes);
1452        else
1453                budget = mtk_poll_tx_pdma(eth, budget, done, bytes);
1454
1455        for (i = 0; i < MTK_MAC_COUNT; i++) {
1456                if (!eth->netdev[i] || !done[i])
1457                        continue;
1458                netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1459                total += done[i];
1460        }
1461
1462        if (mtk_queue_stopped(eth) &&
1463            (atomic_read(&ring->free_count) > ring->thresh))
1464                mtk_wake_queue(eth);
1465
1466        return total;
1467}
1468
1469static void mtk_handle_status_irq(struct mtk_eth *eth)
1470{
1471        u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1472
1473        if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1474                mtk_stats_update(eth);
1475                mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1476                        MTK_INT_STATUS2);
1477        }
1478}
1479
1480static int mtk_napi_tx(struct napi_struct *napi, int budget)
1481{
1482        struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1483        u32 status, mask;
1484        int tx_done = 0;
1485
1486        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
1487                mtk_handle_status_irq(eth);
1488        mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
1489        tx_done = mtk_poll_tx(eth, budget);
1490
1491        if (unlikely(netif_msg_intr(eth))) {
1492                status = mtk_r32(eth, eth->tx_int_status_reg);
1493                mask = mtk_r32(eth, eth->tx_int_mask_reg);
1494                dev_info(eth->dev,
1495                         "done tx %d, intr 0x%08x/0x%x\n",
1496                         tx_done, status, mask);
1497        }
1498
1499        if (tx_done == budget)
1500                return budget;
1501
1502        status = mtk_r32(eth, eth->tx_int_status_reg);
1503        if (status & MTK_TX_DONE_INT)
1504                return budget;
1505
1506        napi_complete(napi);
1507        mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1508
1509        return tx_done;
1510}
1511
1512static int mtk_napi_rx(struct napi_struct *napi, int budget)
1513{
1514        struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1515        u32 status, mask;
1516        int rx_done = 0;
1517        int remain_budget = budget;
1518
1519        mtk_handle_status_irq(eth);
1520
1521poll_again:
1522        mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1523        rx_done = mtk_poll_rx(napi, remain_budget, eth);
1524
1525        if (unlikely(netif_msg_intr(eth))) {
1526                status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1527                mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1528                dev_info(eth->dev,
1529                         "done rx %d, intr 0x%08x/0x%x\n",
1530                         rx_done, status, mask);
1531        }
1532        if (rx_done == remain_budget)
1533                return budget;
1534
1535        status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1536        if (status & MTK_RX_DONE_INT) {
1537                remain_budget -= rx_done;
1538                goto poll_again;
1539        }
1540        napi_complete(napi);
1541        mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1542
1543        return rx_done + budget - remain_budget;
1544}
1545
1546static int mtk_tx_alloc(struct mtk_eth *eth)
1547{
1548        struct mtk_tx_ring *ring = &eth->tx_ring;
1549        int i, sz = sizeof(*ring->dma);
1550
1551        ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1552                               GFP_KERNEL);
1553        if (!ring->buf)
1554                goto no_tx_mem;
1555
1556        ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1557                                       &ring->phys, GFP_ATOMIC);
1558        if (!ring->dma)
1559                goto no_tx_mem;
1560
1561        for (i = 0; i < MTK_DMA_SIZE; i++) {
1562                int next = (i + 1) % MTK_DMA_SIZE;
1563                u32 next_ptr = ring->phys + next * sz;
1564
1565                ring->dma[i].txd2 = next_ptr;
1566                ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1567        }
1568
1569        /* On MT7688 (PDMA only) this driver uses the ring->dma structs
1570         * only as the framework. The real HW descriptors are the PDMA
1571         * descriptors in ring->dma_pdma.
1572         */
1573        if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1574                ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
1575                                                    &ring->phys_pdma,
1576                                                    GFP_ATOMIC);
1577                if (!ring->dma_pdma)
1578                        goto no_tx_mem;
1579
1580                for (i = 0; i < MTK_DMA_SIZE; i++) {
1581                        ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
1582                        ring->dma_pdma[i].txd4 = 0;
1583                }
1584        }
1585
1586        ring->dma_size = MTK_DMA_SIZE;
1587        atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1588        ring->next_free = &ring->dma[0];
1589        ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1590        ring->thresh = MAX_SKB_FRAGS;
1591
1592        /* make sure that all changes to the dma ring are flushed before we
1593         * continue
1594         */
1595        wmb();
1596
1597        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
1598                mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1599                mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1600                mtk_w32(eth,
1601                        ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1602                        MTK_QTX_CRX_PTR);
1603                mtk_w32(eth,
1604                        ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1605                        MTK_QTX_DRX_PTR);
1606                mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
1607                        MTK_QTX_CFG(0));
1608        } else {
1609                mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
1610                mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
1611                mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
1612                mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
1613        }
1614
1615        return 0;
1616
1617no_tx_mem:
1618        return -ENOMEM;
1619}
1620
1621static void mtk_tx_clean(struct mtk_eth *eth)
1622{
1623        struct mtk_tx_ring *ring = &eth->tx_ring;
1624        int i;
1625
1626        if (ring->buf) {
1627                for (i = 0; i < MTK_DMA_SIZE; i++)
1628                        mtk_tx_unmap(eth, &ring->buf[i]);
1629                kfree(ring->buf);
1630                ring->buf = NULL;
1631        }
1632
1633        if (ring->dma) {
1634                dma_free_coherent(eth->dev,
1635                                  MTK_DMA_SIZE * sizeof(*ring->dma),
1636                                  ring->dma,
1637                                  ring->phys);
1638                ring->dma = NULL;
1639        }
1640
1641        if (ring->dma_pdma) {
1642                dma_free_coherent(eth->dev,
1643                                  MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
1644                                  ring->dma_pdma,
1645                                  ring->phys_pdma);
1646                ring->dma_pdma = NULL;
1647        }
1648}
1649
1650static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1651{
1652        struct mtk_rx_ring *ring;
1653        int rx_data_len, rx_dma_size;
1654        int i;
1655        u32 offset = 0;
1656
1657        if (rx_flag == MTK_RX_FLAGS_QDMA) {
1658                if (ring_no)
1659                        return -EINVAL;
1660                ring = &eth->rx_ring_qdma;
1661                offset = 0x1000;
1662        } else {
1663                ring = &eth->rx_ring[ring_no];
1664        }
1665
1666        if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1667                rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1668                rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1669        } else {
1670                rx_data_len = ETH_DATA_LEN;
1671                rx_dma_size = MTK_DMA_SIZE;
1672        }
1673
1674        ring->frag_size = mtk_max_frag_size(rx_data_len);
1675        ring->buf_size = mtk_max_buf_size(ring->frag_size);
1676        ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1677                             GFP_KERNEL);
1678        if (!ring->data)
1679                return -ENOMEM;
1680
1681        for (i = 0; i < rx_dma_size; i++) {
1682                ring->data[i] = netdev_alloc_frag(ring->frag_size);
1683                if (!ring->data[i])
1684                        return -ENOMEM;
1685        }
1686
1687        ring->dma = dma_alloc_coherent(eth->dev,
1688                                       rx_dma_size * sizeof(*ring->dma),
1689                                       &ring->phys, GFP_ATOMIC);
1690        if (!ring->dma)
1691                return -ENOMEM;
1692
1693        for (i = 0; i < rx_dma_size; i++) {
1694                dma_addr_t dma_addr = dma_map_single(eth->dev,
1695                                ring->data[i] + NET_SKB_PAD + eth->ip_align,
1696                                ring->buf_size,
1697                                DMA_FROM_DEVICE);
1698                if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1699                        return -ENOMEM;
1700                ring->dma[i].rxd1 = (unsigned int)dma_addr;
1701
1702                if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
1703                        ring->dma[i].rxd2 = RX_DMA_LSO;
1704                else
1705                        ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1706        }
1707        ring->dma_size = rx_dma_size;
1708        ring->calc_idx_update = false;
1709        ring->calc_idx = rx_dma_size - 1;
1710        ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1711        /* make sure that all changes to the dma ring are flushed before we
1712         * continue
1713         */
1714        wmb();
1715
1716        mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1717        mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1718        mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1719        mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1720
1721        return 0;
1722}
1723
1724static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1725{
1726        int i;
1727
1728        if (ring->data && ring->dma) {
1729                for (i = 0; i < ring->dma_size; i++) {
1730                        if (!ring->data[i])
1731                                continue;
1732                        if (!ring->dma[i].rxd1)
1733                                continue;
1734                        dma_unmap_single(eth->dev,
1735                                         ring->dma[i].rxd1,
1736                                         ring->buf_size,
1737                                         DMA_FROM_DEVICE);
1738                        skb_free_frag(ring->data[i]);
1739                }
1740                kfree(ring->data);
1741                ring->data = NULL;
1742        }
1743
1744        if (ring->dma) {
1745                dma_free_coherent(eth->dev,
1746                                  ring->dma_size * sizeof(*ring->dma),
1747                                  ring->dma,
1748                                  ring->phys);
1749                ring->dma = NULL;
1750        }
1751}
1752
1753static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1754{
1755        int i;
1756        u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1757        u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1758
1759        /* set LRO rings to auto-learn modes */
1760        ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1761
1762        /* validate LRO ring */
1763        ring_ctrl_dw2 |= MTK_RING_VLD;
1764
1765        /* set AGE timer (unit: 20us) */
1766        ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1767        ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1768
1769        /* set max AGG timer (unit: 20us) */
1770        ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1771
1772        /* set max LRO AGG count */
1773        ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1774        ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1775
1776        for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1777                mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1778                mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1779                mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1780        }
1781
1782        /* IPv4 checksum update enable */
1783        lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1784
1785        /* switch priority comparison to packet count mode */
1786        lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1787
1788        /* bandwidth threshold setting */
1789        mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1790
1791        /* auto-learn score delta setting */
1792        mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1793
1794        /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
1795        mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1796                MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1797
1798        /* set HW LRO mode & the max aggregation count for rx packets */
1799        lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1800
1801        /* the minimal remaining room of SDL0 in RXD for lro aggregation */
1802        lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1803
1804        /* enable HW LRO */
1805        lro_ctrl_dw0 |= MTK_LRO_EN;
1806
1807        mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1808        mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1809
1810        return 0;
1811}
1812
1813static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1814{
1815        int i;
1816        u32 val;
1817
1818        /* relinquish lro rings, flush aggregated packets */
1819        mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1820
1821        /* wait for relinquishments done */
1822        for (i = 0; i < 10; i++) {
1823                val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1824                if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1825                        msleep(20);
1826                        continue;
1827                }
1828                break;
1829        }
1830
1831        /* invalidate lro rings */
1832        for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1833                mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1834
1835        /* disable HW LRO */
1836        mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1837}
1838
1839static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1840{
1841        u32 reg_val;
1842
1843        reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1844
1845        /* invalidate the IP setting */
1846        mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1847
1848        mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1849
1850        /* validate the IP setting */
1851        mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1852}
1853
1854static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1855{
1856        u32 reg_val;
1857
1858        reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1859
1860        /* invalidate the IP setting */
1861        mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1862
1863        mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1864}
1865
1866static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1867{
1868        int cnt = 0;
1869        int i;
1870
1871        for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1872                if (mac->hwlro_ip[i])
1873                        cnt++;
1874        }
1875
1876        return cnt;
1877}
1878
1879static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1880                                struct ethtool_rxnfc *cmd)
1881{
1882        struct ethtool_rx_flow_spec *fsp =
1883                (struct ethtool_rx_flow_spec *)&cmd->fs;
1884        struct mtk_mac *mac = netdev_priv(dev);
1885        struct mtk_eth *eth = mac->hw;
1886        int hwlro_idx;
1887
1888        if ((fsp->flow_type != TCP_V4_FLOW) ||
1889            (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1890            (fsp->location > 1))
1891                return -EINVAL;
1892
1893        mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1894        hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1895
1896        mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1897
1898        mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1899
1900        return 0;
1901}
1902
1903static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1904                                struct ethtool_rxnfc *cmd)
1905{
1906        struct ethtool_rx_flow_spec *fsp =
1907                (struct ethtool_rx_flow_spec *)&cmd->fs;
1908        struct mtk_mac *mac = netdev_priv(dev);
1909        struct mtk_eth *eth = mac->hw;
1910        int hwlro_idx;
1911
1912        if (fsp->location > 1)
1913                return -EINVAL;
1914
1915        mac->hwlro_ip[fsp->location] = 0;
1916        hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1917
1918        mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1919
1920        mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1921
1922        return 0;
1923}
1924
1925static void mtk_hwlro_netdev_disable(struct net_device *dev)
1926{
1927        struct mtk_mac *mac = netdev_priv(dev);
1928        struct mtk_eth *eth = mac->hw;
1929        int i, hwlro_idx;
1930
1931        for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1932                mac->hwlro_ip[i] = 0;
1933                hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1934
1935                mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1936        }
1937
1938        mac->hwlro_ip_cnt = 0;
1939}
1940
1941static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1942                                    struct ethtool_rxnfc *cmd)
1943{
1944        struct mtk_mac *mac = netdev_priv(dev);
1945        struct ethtool_rx_flow_spec *fsp =
1946                (struct ethtool_rx_flow_spec *)&cmd->fs;
1947
1948        /* only tcp dst ipv4 is meaningful, others are meaningless */
1949        fsp->flow_type = TCP_V4_FLOW;
1950        fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1951        fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1952
1953        fsp->h_u.tcp_ip4_spec.ip4src = 0;
1954        fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1955        fsp->h_u.tcp_ip4_spec.psrc = 0;
1956        fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1957        fsp->h_u.tcp_ip4_spec.pdst = 0;
1958        fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1959        fsp->h_u.tcp_ip4_spec.tos = 0;
1960        fsp->m_u.tcp_ip4_spec.tos = 0xff;
1961
1962        return 0;
1963}
1964
1965static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1966                                  struct ethtool_rxnfc *cmd,
1967                                  u32 *rule_locs)
1968{
1969        struct mtk_mac *mac = netdev_priv(dev);
1970        int cnt = 0;
1971        int i;
1972
1973        for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1974                if (mac->hwlro_ip[i]) {
1975                        rule_locs[cnt] = i;
1976                        cnt++;
1977                }
1978        }
1979
1980        cmd->rule_cnt = cnt;
1981
1982        return 0;
1983}
1984
1985static netdev_features_t mtk_fix_features(struct net_device *dev,
1986                                          netdev_features_t features)
1987{
1988        if (!(features & NETIF_F_LRO)) {
1989                struct mtk_mac *mac = netdev_priv(dev);
1990                int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1991
1992                if (ip_cnt) {
1993                        netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1994
1995                        features |= NETIF_F_LRO;
1996                }
1997        }
1998
1999        return features;
2000}
2001
2002static int mtk_set_features(struct net_device *dev, netdev_features_t features)
2003{
2004        int err = 0;
2005
2006        if (!((dev->features ^ features) & NETIF_F_LRO))
2007                return 0;
2008
2009        if (!(features & NETIF_F_LRO))
2010                mtk_hwlro_netdev_disable(dev);
2011
2012        return err;
2013}
2014
2015/* wait for DMA to finish whatever it is doing before we start using it again */
2016static int mtk_dma_busy_wait(struct mtk_eth *eth)
2017{
2018        unsigned long t_start = jiffies;
2019
2020        while (1) {
2021                if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2022                        if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
2023                              (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2024                                return 0;
2025                } else {
2026                        if (!(mtk_r32(eth, MTK_PDMA_GLO_CFG) &
2027                              (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
2028                                return 0;
2029                }
2030
2031                if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
2032                        break;
2033        }
2034
2035        dev_err(eth->dev, "DMA init timeout\n");
2036        return -1;
2037}
2038
2039static int mtk_dma_init(struct mtk_eth *eth)
2040{
2041        int err;
2042        u32 i;
2043
2044        if (mtk_dma_busy_wait(eth))
2045                return -EBUSY;
2046
2047        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2048                /* QDMA needs scratch memory for internal reordering of the
2049                 * descriptors
2050                 */
2051                err = mtk_init_fq_dma(eth);
2052                if (err)
2053                        return err;
2054        }
2055
2056        err = mtk_tx_alloc(eth);
2057        if (err)
2058                return err;
2059
2060        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2061                err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
2062                if (err)
2063                        return err;
2064        }
2065
2066        err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
2067        if (err)
2068                return err;
2069
2070        if (eth->hwlro) {
2071                for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
2072                        err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
2073                        if (err)
2074                                return err;
2075                }
2076                err = mtk_hwlro_rx_init(eth);
2077                if (err)
2078                        return err;
2079        }
2080
2081        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2082                /* Enable random early drop and set drop threshold
2083                 * automatically
2084                 */
2085                mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
2086                        FC_THRES_MIN, MTK_QDMA_FC_THRES);
2087                mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
2088        }
2089
2090        return 0;
2091}
2092
2093static void mtk_dma_free(struct mtk_eth *eth)
2094{
2095        int i;
2096
2097        for (i = 0; i < MTK_MAC_COUNT; i++)
2098                if (eth->netdev[i])
2099                        netdev_reset_queue(eth->netdev[i]);
2100        if (eth->scratch_ring) {
2101                dma_free_coherent(eth->dev,
2102                                  MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
2103                                  eth->scratch_ring,
2104                                  eth->phy_scratch_ring);
2105                eth->scratch_ring = NULL;
2106                eth->phy_scratch_ring = 0;
2107        }
2108        mtk_tx_clean(eth);
2109        mtk_rx_clean(eth, &eth->rx_ring[0]);
2110        mtk_rx_clean(eth, &eth->rx_ring_qdma);
2111
2112        if (eth->hwlro) {
2113                mtk_hwlro_rx_uninit(eth);
2114                for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
2115                        mtk_rx_clean(eth, &eth->rx_ring[i]);
2116        }
2117
2118        kfree(eth->scratch_head);
2119}
2120
2121static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
2122{
2123        struct mtk_mac *mac = netdev_priv(dev);
2124        struct mtk_eth *eth = mac->hw;
2125
2126        eth->netdev[mac->id]->stats.tx_errors++;
2127        netif_err(eth, tx_err, dev,
2128                  "transmit timed out\n");
2129        schedule_work(&eth->pending_work);
2130}
2131
2132static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
2133{
2134        struct mtk_eth *eth = _eth;
2135
2136        if (likely(napi_schedule_prep(&eth->rx_napi))) {
2137                __napi_schedule(&eth->rx_napi);
2138                mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2139        }
2140
2141        return IRQ_HANDLED;
2142}
2143
2144static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
2145{
2146        struct mtk_eth *eth = _eth;
2147
2148        if (likely(napi_schedule_prep(&eth->tx_napi))) {
2149                __napi_schedule(&eth->tx_napi);
2150                mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2151        }
2152
2153        return IRQ_HANDLED;
2154}
2155
2156static irqreturn_t mtk_handle_irq(int irq, void *_eth)
2157{
2158        struct mtk_eth *eth = _eth;
2159
2160        if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
2161                if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
2162                        mtk_handle_irq_rx(irq, _eth);
2163        }
2164        if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
2165                if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
2166                        mtk_handle_irq_tx(irq, _eth);
2167        }
2168
2169        return IRQ_HANDLED;
2170}
2171
2172#ifdef CONFIG_NET_POLL_CONTROLLER
2173static void mtk_poll_controller(struct net_device *dev)
2174{
2175        struct mtk_mac *mac = netdev_priv(dev);
2176        struct mtk_eth *eth = mac->hw;
2177
2178        mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2179        mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2180        mtk_handle_irq_rx(eth->irq[2], dev);
2181        mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2182        mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2183}
2184#endif
2185
2186static int mtk_start_dma(struct mtk_eth *eth)
2187{
2188        u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
2189        int err;
2190
2191        err = mtk_dma_init(eth);
2192        if (err) {
2193                mtk_dma_free(eth);
2194                return err;
2195        }
2196
2197        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2198                mtk_w32(eth,
2199                        MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
2200                        MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
2201                        MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
2202                        MTK_RX_BT_32DWORDS,
2203                        MTK_QDMA_GLO_CFG);
2204
2205                mtk_w32(eth,
2206                        MTK_RX_DMA_EN | rx_2b_offset |
2207                        MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
2208                        MTK_PDMA_GLO_CFG);
2209        } else {
2210                mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
2211                        MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
2212                        MTK_PDMA_GLO_CFG);
2213        }
2214
2215        return 0;
2216}
2217
2218static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
2219{
2220        int i;
2221
2222        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
2223                return;
2224
2225        for (i = 0; i < MTK_MAC_COUNT; i++) {
2226                u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
2227
2228                /* default setup the forward port to send frame to PDMA */
2229                val &= ~0xffff;
2230
2231                /* Enable RX checksum */
2232                val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2233
2234                val |= config;
2235
2236                mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2237        }
2238        /* Reset and enable PSE */
2239        mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
2240        mtk_w32(eth, 0, MTK_RST_GL);
2241}
2242
2243static int mtk_open(struct net_device *dev)
2244{
2245        struct mtk_mac *mac = netdev_priv(dev);
2246        struct mtk_eth *eth = mac->hw;
2247        int err;
2248
2249        err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
2250        if (err) {
2251                netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
2252                           err);
2253                return err;
2254        }
2255
2256        /* we run 2 netdevs on the same dma ring so we only bring it up once */
2257        if (!refcount_read(&eth->dma_refcnt)) {
2258                int err = mtk_start_dma(eth);
2259
2260                if (err)
2261                        return err;
2262
2263                mtk_gdm_config(eth, MTK_GDMA_TO_PDMA);
2264
2265                napi_enable(&eth->tx_napi);
2266                napi_enable(&eth->rx_napi);
2267                mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
2268                mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
2269                refcount_set(&eth->dma_refcnt, 1);
2270        }
2271        else
2272                refcount_inc(&eth->dma_refcnt);
2273
2274        phylink_start(mac->phylink);
2275        netif_start_queue(dev);
2276        return 0;
2277}
2278
2279static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
2280{
2281        u32 val;
2282        int i;
2283
2284        /* stop the dma engine */
2285        spin_lock_bh(&eth->page_lock);
2286        val = mtk_r32(eth, glo_cfg);
2287        mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
2288                glo_cfg);
2289        spin_unlock_bh(&eth->page_lock);
2290
2291        /* wait for dma stop */
2292        for (i = 0; i < 10; i++) {
2293                val = mtk_r32(eth, glo_cfg);
2294                if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
2295                        msleep(20);
2296                        continue;
2297                }
2298                break;
2299        }
2300}
2301
2302static int mtk_stop(struct net_device *dev)
2303{
2304        struct mtk_mac *mac = netdev_priv(dev);
2305        struct mtk_eth *eth = mac->hw;
2306
2307        phylink_stop(mac->phylink);
2308
2309        netif_tx_disable(dev);
2310
2311        phylink_disconnect_phy(mac->phylink);
2312
2313        /* only shutdown DMA if this is the last user */
2314        if (!refcount_dec_and_test(&eth->dma_refcnt))
2315                return 0;
2316
2317        mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
2318
2319        mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
2320        mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
2321        napi_disable(&eth->tx_napi);
2322        napi_disable(&eth->rx_napi);
2323
2324        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
2325                mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
2326        mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
2327
2328        mtk_dma_free(eth);
2329
2330        return 0;
2331}
2332
2333static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
2334{
2335        regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2336                           reset_bits,
2337                           reset_bits);
2338
2339        usleep_range(1000, 1100);
2340        regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
2341                           reset_bits,
2342                           ~reset_bits);
2343        mdelay(10);
2344}
2345
2346static void mtk_clk_disable(struct mtk_eth *eth)
2347{
2348        int clk;
2349
2350        for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
2351                clk_disable_unprepare(eth->clks[clk]);
2352}
2353
2354static int mtk_clk_enable(struct mtk_eth *eth)
2355{
2356        int clk, ret;
2357
2358        for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
2359                ret = clk_prepare_enable(eth->clks[clk]);
2360                if (ret)
2361                        goto err_disable_clks;
2362        }
2363
2364        return 0;
2365
2366err_disable_clks:
2367        while (--clk >= 0)
2368                clk_disable_unprepare(eth->clks[clk]);
2369
2370        return ret;
2371}
2372
2373static int mtk_hw_init(struct mtk_eth *eth)
2374{
2375        int i, val, ret;
2376
2377        if (test_and_set_bit(MTK_HW_INIT, &eth->state))
2378                return 0;
2379
2380        pm_runtime_enable(eth->dev);
2381        pm_runtime_get_sync(eth->dev);
2382
2383        ret = mtk_clk_enable(eth);
2384        if (ret)
2385                goto err_disable_pm;
2386
2387        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2388                ret = device_reset(eth->dev);
2389                if (ret) {
2390                        dev_err(eth->dev, "MAC reset failed!\n");
2391                        goto err_disable_pm;
2392                }
2393
2394                /* enable interrupt delay for RX */
2395                mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2396
2397                /* disable delay and normal interrupt */
2398                mtk_tx_irq_disable(eth, ~0);
2399                mtk_rx_irq_disable(eth, ~0);
2400
2401                return 0;
2402        }
2403
2404        /* Non-MT7628 handling... */
2405        ethsys_reset(eth, RSTCTRL_FE);
2406        ethsys_reset(eth, RSTCTRL_PPE);
2407
2408        if (eth->pctl) {
2409                /* Set GE2 driving and slew rate */
2410                regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
2411
2412                /* set GE2 TDSEL */
2413                regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
2414
2415                /* set GE2 TUNE */
2416                regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
2417        }
2418
2419        /* Set linkdown as the default for each GMAC. Its own MCR would be set
2420         * up with the more appropriate value when mtk_mac_config call is being
2421         * invoked.
2422         */
2423        for (i = 0; i < MTK_MAC_COUNT; i++)
2424                mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
2425
2426        /* Indicates CDM to parse the MTK special tag from CPU
2427         * which also is working out for untag packets.
2428         */
2429        val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
2430        mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
2431
2432        /* Enable RX VLan Offloading */
2433        mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
2434
2435        /* enable interrupt delay for RX */
2436        mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
2437
2438        /* disable delay and normal interrupt */
2439        mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
2440        mtk_tx_irq_disable(eth, ~0);
2441        mtk_rx_irq_disable(eth, ~0);
2442
2443        /* FE int grouping */
2444        mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
2445        mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
2446        mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
2447        mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
2448        mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
2449
2450        return 0;
2451
2452err_disable_pm:
2453        pm_runtime_put_sync(eth->dev);
2454        pm_runtime_disable(eth->dev);
2455
2456        return ret;
2457}
2458
2459static int mtk_hw_deinit(struct mtk_eth *eth)
2460{
2461        if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
2462                return 0;
2463
2464        mtk_clk_disable(eth);
2465
2466        pm_runtime_put_sync(eth->dev);
2467        pm_runtime_disable(eth->dev);
2468
2469        return 0;
2470}
2471
2472static int __init mtk_init(struct net_device *dev)
2473{
2474        struct mtk_mac *mac = netdev_priv(dev);
2475        struct mtk_eth *eth = mac->hw;
2476        const char *mac_addr;
2477
2478        mac_addr = of_get_mac_address(mac->of_node);
2479        if (!IS_ERR(mac_addr))
2480                ether_addr_copy(dev->dev_addr, mac_addr);
2481
2482        /* If the mac address is invalid, use random mac address  */
2483        if (!is_valid_ether_addr(dev->dev_addr)) {
2484                eth_hw_addr_random(dev);
2485                dev_err(eth->dev, "generated random MAC address %pM\n",
2486                        dev->dev_addr);
2487        }
2488
2489        return 0;
2490}
2491
2492static void mtk_uninit(struct net_device *dev)
2493{
2494        struct mtk_mac *mac = netdev_priv(dev);
2495        struct mtk_eth *eth = mac->hw;
2496
2497        phylink_disconnect_phy(mac->phylink);
2498        mtk_tx_irq_disable(eth, ~0);
2499        mtk_rx_irq_disable(eth, ~0);
2500}
2501
2502static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2503{
2504        struct mtk_mac *mac = netdev_priv(dev);
2505
2506        switch (cmd) {
2507        case SIOCGMIIPHY:
2508        case SIOCGMIIREG:
2509        case SIOCSMIIREG:
2510                return phylink_mii_ioctl(mac->phylink, ifr, cmd);
2511        default:
2512                break;
2513        }
2514
2515        return -EOPNOTSUPP;
2516}
2517
2518static void mtk_pending_work(struct work_struct *work)
2519{
2520        struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2521        int err, i;
2522        unsigned long restart = 0;
2523
2524        rtnl_lock();
2525
2526        dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2527
2528        while (test_and_set_bit_lock(MTK_RESETTING, &eth->state))
2529                cpu_relax();
2530
2531        dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2532        /* stop all devices to make sure that dma is properly shut down */
2533        for (i = 0; i < MTK_MAC_COUNT; i++) {
2534                if (!eth->netdev[i])
2535                        continue;
2536                mtk_stop(eth->netdev[i]);
2537                __set_bit(i, &restart);
2538        }
2539        dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2540
2541        /* restart underlying hardware such as power, clock, pin mux
2542         * and the connected phy
2543         */
2544        mtk_hw_deinit(eth);
2545
2546        if (eth->dev->pins)
2547                pinctrl_select_state(eth->dev->pins->p,
2548                                     eth->dev->pins->default_state);
2549        mtk_hw_init(eth);
2550
2551        /* restart DMA and enable IRQs */
2552        for (i = 0; i < MTK_MAC_COUNT; i++) {
2553                if (!test_bit(i, &restart))
2554                        continue;
2555                err = mtk_open(eth->netdev[i]);
2556                if (err) {
2557                        netif_alert(eth, ifup, eth->netdev[i],
2558                              "Driver up/down cycle failed, closing device.\n");
2559                        dev_close(eth->netdev[i]);
2560                }
2561        }
2562
2563        dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2564
2565        clear_bit_unlock(MTK_RESETTING, &eth->state);
2566
2567        rtnl_unlock();
2568}
2569
2570static int mtk_free_dev(struct mtk_eth *eth)
2571{
2572        int i;
2573
2574        for (i = 0; i < MTK_MAC_COUNT; i++) {
2575                if (!eth->netdev[i])
2576                        continue;
2577                free_netdev(eth->netdev[i]);
2578        }
2579
2580        return 0;
2581}
2582
2583static int mtk_unreg_dev(struct mtk_eth *eth)
2584{
2585        int i;
2586
2587        for (i = 0; i < MTK_MAC_COUNT; i++) {
2588                if (!eth->netdev[i])
2589                        continue;
2590                unregister_netdev(eth->netdev[i]);
2591        }
2592
2593        return 0;
2594}
2595
2596static int mtk_cleanup(struct mtk_eth *eth)
2597{
2598        mtk_unreg_dev(eth);
2599        mtk_free_dev(eth);
2600        cancel_work_sync(&eth->pending_work);
2601
2602        return 0;
2603}
2604
2605static int mtk_get_link_ksettings(struct net_device *ndev,
2606                                  struct ethtool_link_ksettings *cmd)
2607{
2608        struct mtk_mac *mac = netdev_priv(ndev);
2609
2610        if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2611                return -EBUSY;
2612
2613        return phylink_ethtool_ksettings_get(mac->phylink, cmd);
2614}
2615
2616static int mtk_set_link_ksettings(struct net_device *ndev,
2617                                  const struct ethtool_link_ksettings *cmd)
2618{
2619        struct mtk_mac *mac = netdev_priv(ndev);
2620
2621        if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2622                return -EBUSY;
2623
2624        return phylink_ethtool_ksettings_set(mac->phylink, cmd);
2625}
2626
2627static void mtk_get_drvinfo(struct net_device *dev,
2628                            struct ethtool_drvinfo *info)
2629{
2630        struct mtk_mac *mac = netdev_priv(dev);
2631
2632        strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2633        strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2634        info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2635}
2636
2637static u32 mtk_get_msglevel(struct net_device *dev)
2638{
2639        struct mtk_mac *mac = netdev_priv(dev);
2640
2641        return mac->hw->msg_enable;
2642}
2643
2644static void mtk_set_msglevel(struct net_device *dev, u32 value)
2645{
2646        struct mtk_mac *mac = netdev_priv(dev);
2647
2648        mac->hw->msg_enable = value;
2649}
2650
2651static int mtk_nway_reset(struct net_device *dev)
2652{
2653        struct mtk_mac *mac = netdev_priv(dev);
2654
2655        if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2656                return -EBUSY;
2657
2658        if (!mac->phylink)
2659                return -ENOTSUPP;
2660
2661        return phylink_ethtool_nway_reset(mac->phylink);
2662}
2663
2664static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2665{
2666        int i;
2667
2668        switch (stringset) {
2669        case ETH_SS_STATS:
2670                for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2671                        memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2672                        data += ETH_GSTRING_LEN;
2673                }
2674                break;
2675        }
2676}
2677
2678static int mtk_get_sset_count(struct net_device *dev, int sset)
2679{
2680        switch (sset) {
2681        case ETH_SS_STATS:
2682                return ARRAY_SIZE(mtk_ethtool_stats);
2683        default:
2684                return -EOPNOTSUPP;
2685        }
2686}
2687
2688static void mtk_get_ethtool_stats(struct net_device *dev,
2689                                  struct ethtool_stats *stats, u64 *data)
2690{
2691        struct mtk_mac *mac = netdev_priv(dev);
2692        struct mtk_hw_stats *hwstats = mac->hw_stats;
2693        u64 *data_src, *data_dst;
2694        unsigned int start;
2695        int i;
2696
2697        if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2698                return;
2699
2700        if (netif_running(dev) && netif_device_present(dev)) {
2701                if (spin_trylock_bh(&hwstats->stats_lock)) {
2702                        mtk_stats_update_mac(mac);
2703                        spin_unlock_bh(&hwstats->stats_lock);
2704                }
2705        }
2706
2707        data_src = (u64 *)hwstats;
2708
2709        do {
2710                data_dst = data;
2711                start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2712
2713                for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2714                        *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2715        } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2716}
2717
2718static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2719                         u32 *rule_locs)
2720{
2721        int ret = -EOPNOTSUPP;
2722
2723        switch (cmd->cmd) {
2724        case ETHTOOL_GRXRINGS:
2725                if (dev->hw_features & NETIF_F_LRO) {
2726                        cmd->data = MTK_MAX_RX_RING_NUM;
2727                        ret = 0;
2728                }
2729                break;
2730        case ETHTOOL_GRXCLSRLCNT:
2731                if (dev->hw_features & NETIF_F_LRO) {
2732                        struct mtk_mac *mac = netdev_priv(dev);
2733
2734                        cmd->rule_cnt = mac->hwlro_ip_cnt;
2735                        ret = 0;
2736                }
2737                break;
2738        case ETHTOOL_GRXCLSRULE:
2739                if (dev->hw_features & NETIF_F_LRO)
2740                        ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2741                break;
2742        case ETHTOOL_GRXCLSRLALL:
2743                if (dev->hw_features & NETIF_F_LRO)
2744                        ret = mtk_hwlro_get_fdir_all(dev, cmd,
2745                                                     rule_locs);
2746                break;
2747        default:
2748                break;
2749        }
2750
2751        return ret;
2752}
2753
2754static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2755{
2756        int ret = -EOPNOTSUPP;
2757
2758        switch (cmd->cmd) {
2759        case ETHTOOL_SRXCLSRLINS:
2760                if (dev->hw_features & NETIF_F_LRO)
2761                        ret = mtk_hwlro_add_ipaddr(dev, cmd);
2762                break;
2763        case ETHTOOL_SRXCLSRLDEL:
2764                if (dev->hw_features & NETIF_F_LRO)
2765                        ret = mtk_hwlro_del_ipaddr(dev, cmd);
2766                break;
2767        default:
2768                break;
2769        }
2770
2771        return ret;
2772}
2773
2774static const struct ethtool_ops mtk_ethtool_ops = {
2775        .get_link_ksettings     = mtk_get_link_ksettings,
2776        .set_link_ksettings     = mtk_set_link_ksettings,
2777        .get_drvinfo            = mtk_get_drvinfo,
2778        .get_msglevel           = mtk_get_msglevel,
2779        .set_msglevel           = mtk_set_msglevel,
2780        .nway_reset             = mtk_nway_reset,
2781        .get_link               = ethtool_op_get_link,
2782        .get_strings            = mtk_get_strings,
2783        .get_sset_count         = mtk_get_sset_count,
2784        .get_ethtool_stats      = mtk_get_ethtool_stats,
2785        .get_rxnfc              = mtk_get_rxnfc,
2786        .set_rxnfc              = mtk_set_rxnfc,
2787};
2788
2789static const struct net_device_ops mtk_netdev_ops = {
2790        .ndo_init               = mtk_init,
2791        .ndo_uninit             = mtk_uninit,
2792        .ndo_open               = mtk_open,
2793        .ndo_stop               = mtk_stop,
2794        .ndo_start_xmit         = mtk_start_xmit,
2795        .ndo_set_mac_address    = mtk_set_mac_address,
2796        .ndo_validate_addr      = eth_validate_addr,
2797        .ndo_do_ioctl           = mtk_do_ioctl,
2798        .ndo_tx_timeout         = mtk_tx_timeout,
2799        .ndo_get_stats64        = mtk_get_stats64,
2800        .ndo_fix_features       = mtk_fix_features,
2801        .ndo_set_features       = mtk_set_features,
2802#ifdef CONFIG_NET_POLL_CONTROLLER
2803        .ndo_poll_controller    = mtk_poll_controller,
2804#endif
2805};
2806
2807static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2808{
2809        const __be32 *_id = of_get_property(np, "reg", NULL);
2810        phy_interface_t phy_mode;
2811        struct phylink *phylink;
2812        struct mtk_mac *mac;
2813        int id, err;
2814
2815        if (!_id) {
2816                dev_err(eth->dev, "missing mac id\n");
2817                return -EINVAL;
2818        }
2819
2820        id = be32_to_cpup(_id);
2821        if (id >= MTK_MAC_COUNT) {
2822                dev_err(eth->dev, "%d is not a valid mac id\n", id);
2823                return -EINVAL;
2824        }
2825
2826        if (eth->netdev[id]) {
2827                dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2828                return -EINVAL;
2829        }
2830
2831        eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2832        if (!eth->netdev[id]) {
2833                dev_err(eth->dev, "alloc_etherdev failed\n");
2834                return -ENOMEM;
2835        }
2836        mac = netdev_priv(eth->netdev[id]);
2837        eth->mac[id] = mac;
2838        mac->id = id;
2839        mac->hw = eth;
2840        mac->of_node = np;
2841
2842        memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2843        mac->hwlro_ip_cnt = 0;
2844
2845        mac->hw_stats = devm_kzalloc(eth->dev,
2846                                     sizeof(*mac->hw_stats),
2847                                     GFP_KERNEL);
2848        if (!mac->hw_stats) {
2849                dev_err(eth->dev, "failed to allocate counter memory\n");
2850                err = -ENOMEM;
2851                goto free_netdev;
2852        }
2853        spin_lock_init(&mac->hw_stats->stats_lock);
2854        u64_stats_init(&mac->hw_stats->syncp);
2855        mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2856
2857        /* phylink create */
2858        err = of_get_phy_mode(np, &phy_mode);
2859        if (err) {
2860                dev_err(eth->dev, "incorrect phy-mode\n");
2861                goto free_netdev;
2862        }
2863
2864        /* mac config is not set */
2865        mac->interface = PHY_INTERFACE_MODE_NA;
2866        mac->mode = MLO_AN_PHY;
2867        mac->speed = SPEED_UNKNOWN;
2868
2869        mac->phylink_config.dev = &eth->netdev[id]->dev;
2870        mac->phylink_config.type = PHYLINK_NETDEV;
2871
2872        phylink = phylink_create(&mac->phylink_config,
2873                                 of_fwnode_handle(mac->of_node),
2874                                 phy_mode, &mtk_phylink_ops);
2875        if (IS_ERR(phylink)) {
2876                err = PTR_ERR(phylink);
2877                goto free_netdev;
2878        }
2879
2880        mac->phylink = phylink;
2881
2882        SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2883        eth->netdev[id]->watchdog_timeo = 5 * HZ;
2884        eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2885        eth->netdev[id]->base_addr = (unsigned long)eth->base;
2886
2887        eth->netdev[id]->hw_features = eth->soc->hw_features;
2888        if (eth->hwlro)
2889                eth->netdev[id]->hw_features |= NETIF_F_LRO;
2890
2891        eth->netdev[id]->vlan_features = eth->soc->hw_features &
2892                ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2893        eth->netdev[id]->features |= eth->soc->hw_features;
2894        eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2895
2896        eth->netdev[id]->irq = eth->irq[0];
2897        eth->netdev[id]->dev.of_node = np;
2898
2899        eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
2900
2901        return 0;
2902
2903free_netdev:
2904        free_netdev(eth->netdev[id]);
2905        return err;
2906}
2907
2908static int mtk_probe(struct platform_device *pdev)
2909{
2910        struct device_node *mac_np;
2911        struct mtk_eth *eth;
2912        int err, i;
2913
2914        eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2915        if (!eth)
2916                return -ENOMEM;
2917
2918        eth->soc = of_device_get_match_data(&pdev->dev);
2919
2920        eth->dev = &pdev->dev;
2921        eth->base = devm_platform_ioremap_resource(pdev, 0);
2922        if (IS_ERR(eth->base))
2923                return PTR_ERR(eth->base);
2924
2925        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
2926                eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
2927                eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
2928        } else {
2929                eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
2930                eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
2931        }
2932
2933        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2934                eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
2935                eth->ip_align = NET_IP_ALIGN;
2936        } else {
2937                eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
2938        }
2939
2940        spin_lock_init(&eth->page_lock);
2941        spin_lock_init(&eth->tx_irq_lock);
2942        spin_lock_init(&eth->rx_irq_lock);
2943
2944        if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
2945                eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2946                                                              "mediatek,ethsys");
2947                if (IS_ERR(eth->ethsys)) {
2948                        dev_err(&pdev->dev, "no ethsys regmap found\n");
2949                        return PTR_ERR(eth->ethsys);
2950                }
2951        }
2952
2953        if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
2954                eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2955                                                             "mediatek,infracfg");
2956                if (IS_ERR(eth->infra)) {
2957                        dev_err(&pdev->dev, "no infracfg regmap found\n");
2958                        return PTR_ERR(eth->infra);
2959                }
2960        }
2961
2962        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2963                eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
2964                                          GFP_KERNEL);
2965                if (!eth->sgmii)
2966                        return -ENOMEM;
2967
2968                err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node,
2969                                     eth->soc->ana_rgc3);
2970
2971                if (err)
2972                        return err;
2973        }
2974
2975        if (eth->soc->required_pctl) {
2976                eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2977                                                            "mediatek,pctl");
2978                if (IS_ERR(eth->pctl)) {
2979                        dev_err(&pdev->dev, "no pctl regmap found\n");
2980                        return PTR_ERR(eth->pctl);
2981                }
2982        }
2983
2984        for (i = 0; i < 3; i++) {
2985                if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
2986                        eth->irq[i] = eth->irq[0];
2987                else
2988                        eth->irq[i] = platform_get_irq(pdev, i);
2989                if (eth->irq[i] < 0) {
2990                        dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2991                        return -ENXIO;
2992                }
2993        }
2994        for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2995                eth->clks[i] = devm_clk_get(eth->dev,
2996                                            mtk_clks_source_name[i]);
2997                if (IS_ERR(eth->clks[i])) {
2998                        if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2999                                return -EPROBE_DEFER;
3000                        if (eth->soc->required_clks & BIT(i)) {
3001                                dev_err(&pdev->dev, "clock %s not found\n",
3002                                        mtk_clks_source_name[i]);
3003                                return -EINVAL;
3004                        }
3005                        eth->clks[i] = NULL;
3006                }
3007        }
3008
3009        eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
3010        INIT_WORK(&eth->pending_work, mtk_pending_work);
3011
3012        err = mtk_hw_init(eth);
3013        if (err)
3014                return err;
3015
3016        eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
3017
3018        for_each_child_of_node(pdev->dev.of_node, mac_np) {
3019                if (!of_device_is_compatible(mac_np,
3020                                             "mediatek,eth-mac"))
3021                        continue;
3022
3023                if (!of_device_is_available(mac_np))
3024                        continue;
3025
3026                err = mtk_add_mac(eth, mac_np);
3027                if (err) {
3028                        of_node_put(mac_np);
3029                        goto err_deinit_hw;
3030                }
3031        }
3032
3033        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
3034                err = devm_request_irq(eth->dev, eth->irq[0],
3035                                       mtk_handle_irq, 0,
3036                                       dev_name(eth->dev), eth);
3037        } else {
3038                err = devm_request_irq(eth->dev, eth->irq[1],
3039                                       mtk_handle_irq_tx, 0,
3040                                       dev_name(eth->dev), eth);
3041                if (err)
3042                        goto err_free_dev;
3043
3044                err = devm_request_irq(eth->dev, eth->irq[2],
3045                                       mtk_handle_irq_rx, 0,
3046                                       dev_name(eth->dev), eth);
3047        }
3048        if (err)
3049                goto err_free_dev;
3050
3051        /* No MT7628/88 support yet */
3052        if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
3053                err = mtk_mdio_init(eth);
3054                if (err)
3055                        goto err_free_dev;
3056        }
3057
3058        for (i = 0; i < MTK_MAX_DEVS; i++) {
3059                if (!eth->netdev[i])
3060                        continue;
3061
3062                err = register_netdev(eth->netdev[i]);
3063                if (err) {
3064                        dev_err(eth->dev, "error bringing up device\n");
3065                        goto err_deinit_mdio;
3066                } else
3067                        netif_info(eth, probe, eth->netdev[i],
3068                                   "mediatek frame engine at 0x%08lx, irq %d\n",
3069                                   eth->netdev[i]->base_addr, eth->irq[0]);
3070        }
3071
3072        /* we run 2 devices on the same DMA ring so we need a dummy device
3073         * for NAPI to work
3074         */
3075        init_dummy_netdev(&eth->dummy_dev);
3076        netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
3077                       MTK_NAPI_WEIGHT);
3078        netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
3079                       MTK_NAPI_WEIGHT);
3080
3081        platform_set_drvdata(pdev, eth);
3082
3083        return 0;
3084
3085err_deinit_mdio:
3086        mtk_mdio_cleanup(eth);
3087err_free_dev:
3088        mtk_free_dev(eth);
3089err_deinit_hw:
3090        mtk_hw_deinit(eth);
3091
3092        return err;
3093}
3094
3095static int mtk_remove(struct platform_device *pdev)
3096{
3097        struct mtk_eth *eth = platform_get_drvdata(pdev);
3098        struct mtk_mac *mac;
3099        int i;
3100
3101        /* stop all devices to make sure that dma is properly shut down */
3102        for (i = 0; i < MTK_MAC_COUNT; i++) {
3103                if (!eth->netdev[i])
3104                        continue;
3105                mtk_stop(eth->netdev[i]);
3106                mac = netdev_priv(eth->netdev[i]);
3107                phylink_disconnect_phy(mac->phylink);
3108        }
3109
3110        mtk_hw_deinit(eth);
3111
3112        netif_napi_del(&eth->tx_napi);
3113        netif_napi_del(&eth->rx_napi);
3114        mtk_cleanup(eth);
3115        mtk_mdio_cleanup(eth);
3116
3117        return 0;
3118}
3119
3120static const struct mtk_soc_data mt2701_data = {
3121        .caps = MT7623_CAPS | MTK_HWLRO,
3122        .hw_features = MTK_HW_FEATURES,
3123        .required_clks = MT7623_CLKS_BITMAP,
3124        .required_pctl = true,
3125};
3126
3127static const struct mtk_soc_data mt7621_data = {
3128        .caps = MT7621_CAPS,
3129        .hw_features = MTK_HW_FEATURES,
3130        .required_clks = MT7621_CLKS_BITMAP,
3131        .required_pctl = false,
3132};
3133
3134static const struct mtk_soc_data mt7622_data = {
3135        .ana_rgc3 = 0x2028,
3136        .caps = MT7622_CAPS | MTK_HWLRO,
3137        .hw_features = MTK_HW_FEATURES,
3138        .required_clks = MT7622_CLKS_BITMAP,
3139        .required_pctl = false,
3140};
3141
3142static const struct mtk_soc_data mt7623_data = {
3143        .caps = MT7623_CAPS | MTK_HWLRO,
3144        .hw_features = MTK_HW_FEATURES,
3145        .required_clks = MT7623_CLKS_BITMAP,
3146        .required_pctl = true,
3147};
3148
3149static const struct mtk_soc_data mt7629_data = {
3150        .ana_rgc3 = 0x128,
3151        .caps = MT7629_CAPS | MTK_HWLRO,
3152        .hw_features = MTK_HW_FEATURES,
3153        .required_clks = MT7629_CLKS_BITMAP,
3154        .required_pctl = false,
3155};
3156
3157static const struct mtk_soc_data rt5350_data = {
3158        .caps = MT7628_CAPS,
3159        .hw_features = MTK_HW_FEATURES_MT7628,
3160        .required_clks = MT7628_CLKS_BITMAP,
3161        .required_pctl = false,
3162};
3163
3164const struct of_device_id of_mtk_match[] = {
3165        { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
3166        { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data},
3167        { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
3168        { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
3169        { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
3170        { .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
3171        {},
3172};
3173MODULE_DEVICE_TABLE(of, of_mtk_match);
3174
3175static struct platform_driver mtk_driver = {
3176        .probe = mtk_probe,
3177        .remove = mtk_remove,
3178        .driver = {
3179                .name = "mtk_soc_eth",
3180                .of_match_table = of_mtk_match,
3181        },
3182};
3183
3184module_platform_driver(mtk_driver);
3185
3186MODULE_LICENSE("GPL");
3187MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
3188MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
3189