1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/of_device.h>
16#include <linux/of_mdio.h>
17#include <linux/of_net.h>
18#include <linux/mfd/syscon.h>
19#include <linux/regmap.h>
20#include <linux/clk.h>
21#include <linux/pm_runtime.h>
22#include <linux/if_vlan.h>
23#include <linux/reset.h>
24#include <linux/tcp.h>
25#include <linux/interrupt.h>
26#include <linux/pinctrl/devinfo.h>
27
28#include "mtk_eth_soc.h"
29
30static int mtk_msg_level = -1;
31module_param_named(msg_level, mtk_msg_level, int, 0);
32MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
33
34#define MTK_ETHTOOL_STAT(x) { #x, \
35 offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
36
37
38static const struct mtk_ethtool_stats {
39 char str[ETH_GSTRING_LEN];
40 u32 offset;
41} mtk_ethtool_stats[] = {
42 MTK_ETHTOOL_STAT(tx_bytes),
43 MTK_ETHTOOL_STAT(tx_packets),
44 MTK_ETHTOOL_STAT(tx_skip),
45 MTK_ETHTOOL_STAT(tx_collisions),
46 MTK_ETHTOOL_STAT(rx_bytes),
47 MTK_ETHTOOL_STAT(rx_packets),
48 MTK_ETHTOOL_STAT(rx_overflow),
49 MTK_ETHTOOL_STAT(rx_fcs_errors),
50 MTK_ETHTOOL_STAT(rx_short_errors),
51 MTK_ETHTOOL_STAT(rx_long_errors),
52 MTK_ETHTOOL_STAT(rx_checksum_errors),
53 MTK_ETHTOOL_STAT(rx_flow_control_packets),
54};
55
56static const char * const mtk_clks_source_name[] = {
57 "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m",
58 "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll"
59};
60
61void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
62{
63 __raw_writel(val, eth->base + reg);
64}
65
66u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
67{
68 return __raw_readl(eth->base + reg);
69}
70
71static int mtk_mdio_busy_wait(struct mtk_eth *eth)
72{
73 unsigned long t_start = jiffies;
74
75 while (1) {
76 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
77 return 0;
78 if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
79 break;
80 usleep_range(10, 20);
81 }
82
83 dev_err(eth->dev, "mdio: MDIO timeout\n");
84 return -1;
85}
86
87static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
88 u32 phy_register, u32 write_data)
89{
90 if (mtk_mdio_busy_wait(eth))
91 return -1;
92
93 write_data &= 0xffff;
94
95 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
96 (phy_register << PHY_IAC_REG_SHIFT) |
97 (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
98 MTK_PHY_IAC);
99
100 if (mtk_mdio_busy_wait(eth))
101 return -1;
102
103 return 0;
104}
105
106static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
107{
108 u32 d;
109
110 if (mtk_mdio_busy_wait(eth))
111 return 0xffff;
112
113 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
114 (phy_reg << PHY_IAC_REG_SHIFT) |
115 (phy_addr << PHY_IAC_ADDR_SHIFT),
116 MTK_PHY_IAC);
117
118 if (mtk_mdio_busy_wait(eth))
119 return 0xffff;
120
121 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
122
123 return d;
124}
125
126static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
127 int phy_reg, u16 val)
128{
129 struct mtk_eth *eth = bus->priv;
130
131 return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
132}
133
134static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
135{
136 struct mtk_eth *eth = bus->priv;
137
138 return _mtk_mdio_read(eth, phy_addr, phy_reg);
139}
140
141static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
142{
143 u32 val;
144 int ret;
145
146 val = (speed == SPEED_1000) ?
147 INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
148 mtk_w32(eth, val, INTF_MODE);
149
150 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
151 ETHSYS_TRGMII_CLK_SEL362_5,
152 ETHSYS_TRGMII_CLK_SEL362_5);
153
154 val = (speed == SPEED_1000) ? 250000000 : 500000000;
155 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
156 if (ret)
157 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
158
159 val = (speed == SPEED_1000) ?
160 RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100;
161 mtk_w32(eth, val, TRGMII_RCK_CTRL);
162
163 val = (speed == SPEED_1000) ?
164 TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100;
165 mtk_w32(eth, val, TRGMII_TCK_CTRL);
166}
167
168static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id)
169{
170 u32 val;
171
172
173 regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER,
174 SGMII_LINK_TIMER_DEFAULT);
175
176 regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val);
177 val |= SGMII_REMOTE_FAULT_DIS;
178 regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val);
179
180 regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val);
181 val |= SGMII_AN_RESTART;
182 regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val);
183
184 regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val);
185 val &= ~SGMII_PHYA_PWD;
186 regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val);
187
188
189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) {
190 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
191 val &= ~SYSCFG0_SGMII_MASK;
192 val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2;
193 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
194
195 dev_info(eth->dev, "setup shared sgmii for gmac=%d\n",
196 mac_id);
197 }
198
199
200
201
202 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) &&
203 !mac_id) {
204 mtk_w32(eth, 0, MTK_MAC_MISC);
205 dev_info(eth->dev, "setup gmac1 going through sgmii");
206 }
207}
208
209static void mtk_phy_link_adjust(struct net_device *dev)
210{
211 struct mtk_mac *mac = netdev_priv(dev);
212 u16 lcl_adv = 0, rmt_adv = 0;
213 u8 flowctrl;
214 u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
215 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
216 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
217 MAC_MCR_BACKPR_EN;
218
219 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
220 return;
221
222 switch (dev->phydev->speed) {
223 case SPEED_1000:
224 mcr |= MAC_MCR_SPEED_1000;
225 break;
226 case SPEED_100:
227 mcr |= MAC_MCR_SPEED_100;
228 break;
229 };
230
231 if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
232 !mac->id && !mac->trgmii)
233 mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed);
234
235 if (dev->phydev->link)
236 mcr |= MAC_MCR_FORCE_LINK;
237
238 if (dev->phydev->duplex) {
239 mcr |= MAC_MCR_FORCE_DPX;
240
241 if (dev->phydev->pause)
242 rmt_adv = LPA_PAUSE_CAP;
243 if (dev->phydev->asym_pause)
244 rmt_adv |= LPA_PAUSE_ASYM;
245
246 lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
247 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
248
249 if (flowctrl & FLOW_CTRL_TX)
250 mcr |= MAC_MCR_FORCE_TX_FC;
251 if (flowctrl & FLOW_CTRL_RX)
252 mcr |= MAC_MCR_FORCE_RX_FC;
253
254 netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
255 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
256 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
257 }
258
259 mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
260
261 if (dev->phydev->link)
262 netif_carrier_on(dev);
263 else
264 netif_carrier_off(dev);
265
266 if (!of_phy_is_fixed_link(mac->of_node))
267 phy_print_status(dev->phydev);
268}
269
270static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac,
271 struct device_node *phy_node)
272{
273 struct phy_device *phydev;
274 int phy_mode;
275
276 phy_mode = of_get_phy_mode(phy_node);
277 if (phy_mode < 0) {
278 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
279 return -EINVAL;
280 }
281
282 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
283 mtk_phy_link_adjust, 0, phy_mode);
284 if (!phydev) {
285 dev_err(eth->dev, "could not connect to PHY\n");
286 return -ENODEV;
287 }
288
289 dev_info(eth->dev,
290 "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n",
291 mac->id, phydev_name(phydev), phydev->phy_id,
292 phydev->drv->name);
293
294 return 0;
295}
296
297static int mtk_phy_connect(struct net_device *dev)
298{
299 struct mtk_mac *mac = netdev_priv(dev);
300 struct mtk_eth *eth;
301 struct device_node *np;
302 u32 val;
303
304 eth = mac->hw;
305 np = of_parse_phandle(mac->of_node, "phy-handle", 0);
306 if (!np && of_phy_is_fixed_link(mac->of_node))
307 if (!of_phy_register_fixed_link(mac->of_node))
308 np = of_node_get(mac->of_node);
309 if (!np)
310 return -ENODEV;
311
312 mac->ge_mode = 0;
313 switch (of_get_phy_mode(np)) {
314 case PHY_INTERFACE_MODE_TRGMII:
315 mac->trgmii = true;
316 case PHY_INTERFACE_MODE_RGMII_TXID:
317 case PHY_INTERFACE_MODE_RGMII_RXID:
318 case PHY_INTERFACE_MODE_RGMII_ID:
319 case PHY_INTERFACE_MODE_RGMII:
320 break;
321 case PHY_INTERFACE_MODE_SGMII:
322 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII))
323 mtk_gmac_sgmii_hw_setup(eth, mac->id);
324 break;
325 case PHY_INTERFACE_MODE_MII:
326 mac->ge_mode = 1;
327 break;
328 case PHY_INTERFACE_MODE_REVMII:
329 mac->ge_mode = 2;
330 break;
331 case PHY_INTERFACE_MODE_RMII:
332 if (!mac->id)
333 goto err_phy;
334 mac->ge_mode = 3;
335 break;
336 default:
337 goto err_phy;
338 }
339
340
341 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
342 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
343 val |= SYSCFG0_GE_MODE(mac->ge_mode, mac->id);
344 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
345
346
347 if (mtk_phy_connect_node(eth, mac, np))
348 goto err_phy;
349
350 dev->phydev->autoneg = AUTONEG_ENABLE;
351 dev->phydev->speed = 0;
352 dev->phydev->duplex = 0;
353
354 phy_set_max_speed(dev->phydev, SPEED_1000);
355 phy_support_asym_pause(dev->phydev);
356 linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
357 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
358 dev->phydev->advertising);
359 phy_start_aneg(dev->phydev);
360
361 of_node_put(np);
362
363 return 0;
364
365err_phy:
366 if (of_phy_is_fixed_link(mac->of_node))
367 of_phy_deregister_fixed_link(mac->of_node);
368 of_node_put(np);
369 dev_err(eth->dev, "%s: invalid phy\n", __func__);
370 return -EINVAL;
371}
372
373static int mtk_mdio_init(struct mtk_eth *eth)
374{
375 struct device_node *mii_np;
376 int ret;
377
378 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
379 if (!mii_np) {
380 dev_err(eth->dev, "no %s child node found", "mdio-bus");
381 return -ENODEV;
382 }
383
384 if (!of_device_is_available(mii_np)) {
385 ret = -ENODEV;
386 goto err_put_node;
387 }
388
389 eth->mii_bus = devm_mdiobus_alloc(eth->dev);
390 if (!eth->mii_bus) {
391 ret = -ENOMEM;
392 goto err_put_node;
393 }
394
395 eth->mii_bus->name = "mdio";
396 eth->mii_bus->read = mtk_mdio_read;
397 eth->mii_bus->write = mtk_mdio_write;
398 eth->mii_bus->priv = eth;
399 eth->mii_bus->parent = eth->dev;
400
401 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
402 ret = of_mdiobus_register(eth->mii_bus, mii_np);
403
404err_put_node:
405 of_node_put(mii_np);
406 return ret;
407}
408
409static void mtk_mdio_cleanup(struct mtk_eth *eth)
410{
411 if (!eth->mii_bus)
412 return;
413
414 mdiobus_unregister(eth->mii_bus);
415}
416
417static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
418{
419 unsigned long flags;
420 u32 val;
421
422 spin_lock_irqsave(ð->tx_irq_lock, flags);
423 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
424 mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
425 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
426}
427
428static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
429{
430 unsigned long flags;
431 u32 val;
432
433 spin_lock_irqsave(ð->tx_irq_lock, flags);
434 val = mtk_r32(eth, MTK_QDMA_INT_MASK);
435 mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
436 spin_unlock_irqrestore(ð->tx_irq_lock, flags);
437}
438
439static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
440{
441 unsigned long flags;
442 u32 val;
443
444 spin_lock_irqsave(ð->rx_irq_lock, flags);
445 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
446 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
447 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
448}
449
450static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
451{
452 unsigned long flags;
453 u32 val;
454
455 spin_lock_irqsave(ð->rx_irq_lock, flags);
456 val = mtk_r32(eth, MTK_PDMA_INT_MASK);
457 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
458 spin_unlock_irqrestore(ð->rx_irq_lock, flags);
459}
460
461static int mtk_set_mac_address(struct net_device *dev, void *p)
462{
463 int ret = eth_mac_addr(dev, p);
464 struct mtk_mac *mac = netdev_priv(dev);
465 const char *macaddr = dev->dev_addr;
466
467 if (ret)
468 return ret;
469
470 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
471 return -EBUSY;
472
473 spin_lock_bh(&mac->hw->page_lock);
474 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
475 MTK_GDMA_MAC_ADRH(mac->id));
476 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
477 (macaddr[4] << 8) | macaddr[5],
478 MTK_GDMA_MAC_ADRL(mac->id));
479 spin_unlock_bh(&mac->hw->page_lock);
480
481 return 0;
482}
483
484void mtk_stats_update_mac(struct mtk_mac *mac)
485{
486 struct mtk_hw_stats *hw_stats = mac->hw_stats;
487 unsigned int base = MTK_GDM1_TX_GBCNT;
488 u64 stats;
489
490 base += hw_stats->reg_offset;
491
492 u64_stats_update_begin(&hw_stats->syncp);
493
494 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
495 stats = mtk_r32(mac->hw, base + 0x04);
496 if (stats)
497 hw_stats->rx_bytes += (stats << 32);
498 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
499 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
500 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
501 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
502 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
503 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
504 hw_stats->rx_flow_control_packets +=
505 mtk_r32(mac->hw, base + 0x24);
506 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
507 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
508 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
509 stats = mtk_r32(mac->hw, base + 0x34);
510 if (stats)
511 hw_stats->tx_bytes += (stats << 32);
512 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
513 u64_stats_update_end(&hw_stats->syncp);
514}
515
516static void mtk_stats_update(struct mtk_eth *eth)
517{
518 int i;
519
520 for (i = 0; i < MTK_MAC_COUNT; i++) {
521 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
522 continue;
523 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) {
524 mtk_stats_update_mac(eth->mac[i]);
525 spin_unlock(ð->mac[i]->hw_stats->stats_lock);
526 }
527 }
528}
529
530static void mtk_get_stats64(struct net_device *dev,
531 struct rtnl_link_stats64 *storage)
532{
533 struct mtk_mac *mac = netdev_priv(dev);
534 struct mtk_hw_stats *hw_stats = mac->hw_stats;
535 unsigned int start;
536
537 if (netif_running(dev) && netif_device_present(dev)) {
538 if (spin_trylock_bh(&hw_stats->stats_lock)) {
539 mtk_stats_update_mac(mac);
540 spin_unlock_bh(&hw_stats->stats_lock);
541 }
542 }
543
544 do {
545 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
546 storage->rx_packets = hw_stats->rx_packets;
547 storage->tx_packets = hw_stats->tx_packets;
548 storage->rx_bytes = hw_stats->rx_bytes;
549 storage->tx_bytes = hw_stats->tx_bytes;
550 storage->collisions = hw_stats->tx_collisions;
551 storage->rx_length_errors = hw_stats->rx_short_errors +
552 hw_stats->rx_long_errors;
553 storage->rx_over_errors = hw_stats->rx_overflow;
554 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
555 storage->rx_errors = hw_stats->rx_checksum_errors;
556 storage->tx_aborted_errors = hw_stats->tx_skip;
557 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
558
559 storage->tx_errors = dev->stats.tx_errors;
560 storage->rx_dropped = dev->stats.rx_dropped;
561 storage->tx_dropped = dev->stats.tx_dropped;
562}
563
564static inline int mtk_max_frag_size(int mtu)
565{
566
567 if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
568 mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
569
570 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
571 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
572}
573
574static inline int mtk_max_buf_size(int frag_size)
575{
576 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
577 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
578
579 WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
580
581 return buf_size;
582}
583
584static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
585 struct mtk_rx_dma *dma_rxd)
586{
587 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
588 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
589 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
590 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
591}
592
593
594static int mtk_init_fq_dma(struct mtk_eth *eth)
595{
596 dma_addr_t phy_ring_tail;
597 int cnt = MTK_DMA_SIZE;
598 dma_addr_t dma_addr;
599 int i;
600
601 eth->scratch_ring = dma_alloc_coherent(eth->dev,
602 cnt * sizeof(struct mtk_tx_dma),
603 ð->phy_scratch_ring,
604 GFP_ATOMIC | __GFP_ZERO);
605 if (unlikely(!eth->scratch_ring))
606 return -ENOMEM;
607
608 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
609 GFP_KERNEL);
610 if (unlikely(!eth->scratch_head))
611 return -ENOMEM;
612
613 dma_addr = dma_map_single(eth->dev,
614 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
615 DMA_FROM_DEVICE);
616 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
617 return -ENOMEM;
618
619 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
620 phy_ring_tail = eth->phy_scratch_ring +
621 (sizeof(struct mtk_tx_dma) * (cnt - 1));
622
623 for (i = 0; i < cnt; i++) {
624 eth->scratch_ring[i].txd1 =
625 (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
626 if (i < cnt - 1)
627 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
628 ((i + 1) * sizeof(struct mtk_tx_dma)));
629 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
630 }
631
632 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
633 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
634 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
635 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
636
637 return 0;
638}
639
640static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
641{
642 void *ret = ring->dma;
643
644 return ret + (desc - ring->phys);
645}
646
647static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
648 struct mtk_tx_dma *txd)
649{
650 int idx = txd - ring->dma;
651
652 return &ring->buf[idx];
653}
654
655static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
656{
657 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
658 dma_unmap_single(eth->dev,
659 dma_unmap_addr(tx_buf, dma_addr0),
660 dma_unmap_len(tx_buf, dma_len0),
661 DMA_TO_DEVICE);
662 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
663 dma_unmap_page(eth->dev,
664 dma_unmap_addr(tx_buf, dma_addr0),
665 dma_unmap_len(tx_buf, dma_len0),
666 DMA_TO_DEVICE);
667 }
668 tx_buf->flags = 0;
669 if (tx_buf->skb &&
670 (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC))
671 dev_kfree_skb_any(tx_buf->skb);
672 tx_buf->skb = NULL;
673}
674
675static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
676 int tx_num, struct mtk_tx_ring *ring, bool gso)
677{
678 struct mtk_mac *mac = netdev_priv(dev);
679 struct mtk_eth *eth = mac->hw;
680 struct mtk_tx_dma *itxd, *txd;
681 struct mtk_tx_buf *itx_buf, *tx_buf;
682 dma_addr_t mapped_addr;
683 unsigned int nr_frags;
684 int i, n_desc = 1;
685 u32 txd4 = 0, fport;
686
687 itxd = ring->next_free;
688 if (itxd == ring->last_free)
689 return -ENOMEM;
690
691
692 fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
693 txd4 |= fport;
694
695 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
696 memset(itx_buf, 0, sizeof(*itx_buf));
697
698 if (gso)
699 txd4 |= TX_DMA_TSO;
700
701
702 if (skb->ip_summed == CHECKSUM_PARTIAL)
703 txd4 |= TX_DMA_CHKSUM;
704
705
706 if (skb_vlan_tag_present(skb))
707 txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
708
709 mapped_addr = dma_map_single(eth->dev, skb->data,
710 skb_headlen(skb), DMA_TO_DEVICE);
711 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
712 return -ENOMEM;
713
714 WRITE_ONCE(itxd->txd1, mapped_addr);
715 itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
716 itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
717 MTK_TX_FLAGS_FPORT1;
718 dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr);
719 dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb));
720
721
722 txd = itxd;
723 nr_frags = skb_shinfo(skb)->nr_frags;
724 for (i = 0; i < nr_frags; i++) {
725 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
726 unsigned int offset = 0;
727 int frag_size = skb_frag_size(frag);
728
729 while (frag_size) {
730 bool last_frag = false;
731 unsigned int frag_map_size;
732
733 txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
734 if (txd == ring->last_free)
735 goto err_dma;
736
737 n_desc++;
738 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
739 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
740 frag_map_size,
741 DMA_TO_DEVICE);
742 if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
743 goto err_dma;
744
745 if (i == nr_frags - 1 &&
746 (frag_size - frag_map_size) == 0)
747 last_frag = true;
748
749 WRITE_ONCE(txd->txd1, mapped_addr);
750 WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
751 TX_DMA_PLEN0(frag_map_size) |
752 last_frag * TX_DMA_LS0));
753 WRITE_ONCE(txd->txd4, fport);
754
755 tx_buf = mtk_desc_to_tx_buf(ring, txd);
756 memset(tx_buf, 0, sizeof(*tx_buf));
757 tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
758 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
759 tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
760 MTK_TX_FLAGS_FPORT1;
761
762 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
763 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
764 frag_size -= frag_map_size;
765 offset += frag_map_size;
766 }
767 }
768
769
770 itx_buf->skb = skb;
771
772 WRITE_ONCE(itxd->txd4, txd4);
773 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
774 (!nr_frags * TX_DMA_LS0)));
775
776 netdev_sent_queue(dev, skb->len);
777 skb_tx_timestamp(skb);
778
779 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
780 atomic_sub(n_desc, &ring->free_count);
781
782
783
784
785 wmb();
786
787 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
788 !netdev_xmit_more())
789 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
790
791 return 0;
792
793err_dma:
794 do {
795 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
796
797
798 mtk_tx_unmap(eth, tx_buf);
799
800 itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
801 itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
802 } while (itxd != txd);
803
804 return -ENOMEM;
805}
806
807static inline int mtk_cal_txd_req(struct sk_buff *skb)
808{
809 int i, nfrags;
810 struct skb_frag_struct *frag;
811
812 nfrags = 1;
813 if (skb_is_gso(skb)) {
814 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
815 frag = &skb_shinfo(skb)->frags[i];
816 nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN);
817 }
818 } else {
819 nfrags += skb_shinfo(skb)->nr_frags;
820 }
821
822 return nfrags;
823}
824
825static int mtk_queue_stopped(struct mtk_eth *eth)
826{
827 int i;
828
829 for (i = 0; i < MTK_MAC_COUNT; i++) {
830 if (!eth->netdev[i])
831 continue;
832 if (netif_queue_stopped(eth->netdev[i]))
833 return 1;
834 }
835
836 return 0;
837}
838
839static void mtk_wake_queue(struct mtk_eth *eth)
840{
841 int i;
842
843 for (i = 0; i < MTK_MAC_COUNT; i++) {
844 if (!eth->netdev[i])
845 continue;
846 netif_wake_queue(eth->netdev[i]);
847 }
848}
849
850static void mtk_stop_queue(struct mtk_eth *eth)
851{
852 int i;
853
854 for (i = 0; i < MTK_MAC_COUNT; i++) {
855 if (!eth->netdev[i])
856 continue;
857 netif_stop_queue(eth->netdev[i]);
858 }
859}
860
861static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
862{
863 struct mtk_mac *mac = netdev_priv(dev);
864 struct mtk_eth *eth = mac->hw;
865 struct mtk_tx_ring *ring = ð->tx_ring;
866 struct net_device_stats *stats = &dev->stats;
867 bool gso = false;
868 int tx_num;
869
870
871
872
873
874 spin_lock(ð->page_lock);
875
876 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
877 goto drop;
878
879 tx_num = mtk_cal_txd_req(skb);
880 if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
881 mtk_stop_queue(eth);
882 netif_err(eth, tx_queued, dev,
883 "Tx Ring full when queue awake!\n");
884 spin_unlock(ð->page_lock);
885 return NETDEV_TX_BUSY;
886 }
887
888
889 if (skb_is_gso(skb)) {
890 if (skb_cow_head(skb, 0)) {
891 netif_warn(eth, tx_err, dev,
892 "GSO expand head fail.\n");
893 goto drop;
894 }
895
896 if (skb_shinfo(skb)->gso_type &
897 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
898 gso = true;
899 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
900 }
901 }
902
903 if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
904 goto drop;
905
906 if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
907 mtk_stop_queue(eth);
908
909 spin_unlock(ð->page_lock);
910
911 return NETDEV_TX_OK;
912
913drop:
914 spin_unlock(ð->page_lock);
915 stats->tx_dropped++;
916 dev_kfree_skb_any(skb);
917 return NETDEV_TX_OK;
918}
919
920static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
921{
922 int i;
923 struct mtk_rx_ring *ring;
924 int idx;
925
926 if (!eth->hwlro)
927 return ð->rx_ring[0];
928
929 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
930 ring = ð->rx_ring[i];
931 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
932 if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
933 ring->calc_idx_update = true;
934 return ring;
935 }
936 }
937
938 return NULL;
939}
940
941static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
942{
943 struct mtk_rx_ring *ring;
944 int i;
945
946 if (!eth->hwlro) {
947 ring = ð->rx_ring[0];
948 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
949 } else {
950 for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
951 ring = ð->rx_ring[i];
952 if (ring->calc_idx_update) {
953 ring->calc_idx_update = false;
954 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
955 }
956 }
957 }
958}
959
960static int mtk_poll_rx(struct napi_struct *napi, int budget,
961 struct mtk_eth *eth)
962{
963 struct mtk_rx_ring *ring;
964 int idx;
965 struct sk_buff *skb;
966 u8 *data, *new_data;
967 struct mtk_rx_dma *rxd, trxd;
968 int done = 0;
969
970 while (done < budget) {
971 struct net_device *netdev;
972 unsigned int pktlen;
973 dma_addr_t dma_addr;
974 int mac = 0;
975
976 ring = mtk_get_rx_ring(eth);
977 if (unlikely(!ring))
978 goto rx_done;
979
980 idx = NEXT_RX_DESP_IDX(ring->calc_idx, ring->dma_size);
981 rxd = &ring->dma[idx];
982 data = ring->data[idx];
983
984 mtk_rx_get_desc(&trxd, rxd);
985 if (!(trxd.rxd2 & RX_DMA_DONE))
986 break;
987
988
989 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
990 RX_DMA_FPORT_MASK;
991 mac--;
992
993 if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
994 !eth->netdev[mac]))
995 goto release_desc;
996
997 netdev = eth->netdev[mac];
998
999 if (unlikely(test_bit(MTK_RESETTING, ð->state)))
1000 goto release_desc;
1001
1002
1003 new_data = napi_alloc_frag(ring->frag_size);
1004 if (unlikely(!new_data)) {
1005 netdev->stats.rx_dropped++;
1006 goto release_desc;
1007 }
1008 dma_addr = dma_map_single(eth->dev,
1009 new_data + NET_SKB_PAD,
1010 ring->buf_size,
1011 DMA_FROM_DEVICE);
1012 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
1013 skb_free_frag(new_data);
1014 netdev->stats.rx_dropped++;
1015 goto release_desc;
1016 }
1017
1018
1019 skb = build_skb(data, ring->frag_size);
1020 if (unlikely(!skb)) {
1021 skb_free_frag(new_data);
1022 netdev->stats.rx_dropped++;
1023 goto release_desc;
1024 }
1025 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1026
1027 dma_unmap_single(eth->dev, trxd.rxd1,
1028 ring->buf_size, DMA_FROM_DEVICE);
1029 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1030 skb->dev = netdev;
1031 skb_put(skb, pktlen);
1032 if (trxd.rxd4 & RX_DMA_L4_VALID)
1033 skb->ip_summed = CHECKSUM_UNNECESSARY;
1034 else
1035 skb_checksum_none_assert(skb);
1036 skb->protocol = eth_type_trans(skb, netdev);
1037
1038 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1039 RX_DMA_VID(trxd.rxd3))
1040 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1041 RX_DMA_VID(trxd.rxd3));
1042 skb_record_rx_queue(skb, 0);
1043 napi_gro_receive(napi, skb);
1044
1045 ring->data[idx] = new_data;
1046 rxd->rxd1 = (unsigned int)dma_addr;
1047
1048release_desc:
1049 rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
1050
1051 ring->calc_idx = idx;
1052
1053 done++;
1054 }
1055
1056rx_done:
1057 if (done) {
1058
1059
1060
1061 wmb();
1062 mtk_update_rx_cpu_idx(eth);
1063 }
1064
1065 return done;
1066}
1067
1068static int mtk_poll_tx(struct mtk_eth *eth, int budget)
1069{
1070 struct mtk_tx_ring *ring = ð->tx_ring;
1071 struct mtk_tx_dma *desc;
1072 struct sk_buff *skb;
1073 struct mtk_tx_buf *tx_buf;
1074 unsigned int done[MTK_MAX_DEVS];
1075 unsigned int bytes[MTK_MAX_DEVS];
1076 u32 cpu, dma;
1077 int total = 0, i;
1078
1079 memset(done, 0, sizeof(done));
1080 memset(bytes, 0, sizeof(bytes));
1081
1082 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1083 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1084
1085 desc = mtk_qdma_phys_to_virt(ring, cpu);
1086
1087 while ((cpu != dma) && budget) {
1088 u32 next_cpu = desc->txd2;
1089 int mac = 0;
1090
1091 desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
1092 if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
1093 break;
1094
1095 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1096 if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
1097 mac = 1;
1098
1099 skb = tx_buf->skb;
1100 if (!skb)
1101 break;
1102
1103 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
1104 bytes[mac] += skb->len;
1105 done[mac]++;
1106 budget--;
1107 }
1108 mtk_tx_unmap(eth, tx_buf);
1109
1110 ring->last_free = desc;
1111 atomic_inc(&ring->free_count);
1112
1113 cpu = next_cpu;
1114 }
1115
1116 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1117
1118 for (i = 0; i < MTK_MAC_COUNT; i++) {
1119 if (!eth->netdev[i] || !done[i])
1120 continue;
1121 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1122 total += done[i];
1123 }
1124
1125 if (mtk_queue_stopped(eth) &&
1126 (atomic_read(&ring->free_count) > ring->thresh))
1127 mtk_wake_queue(eth);
1128
1129 return total;
1130}
1131
1132static void mtk_handle_status_irq(struct mtk_eth *eth)
1133{
1134 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
1135
1136 if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
1137 mtk_stats_update(eth);
1138 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
1139 MTK_INT_STATUS2);
1140 }
1141}
1142
1143static int mtk_napi_tx(struct napi_struct *napi, int budget)
1144{
1145 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
1146 u32 status, mask;
1147 int tx_done = 0;
1148
1149 mtk_handle_status_irq(eth);
1150 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
1151 tx_done = mtk_poll_tx(eth, budget);
1152
1153 if (unlikely(netif_msg_intr(eth))) {
1154 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1155 mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
1156 dev_info(eth->dev,
1157 "done tx %d, intr 0x%08x/0x%x\n",
1158 tx_done, status, mask);
1159 }
1160
1161 if (tx_done == budget)
1162 return budget;
1163
1164 status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
1165 if (status & MTK_TX_DONE_INT)
1166 return budget;
1167
1168 napi_complete(napi);
1169 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1170
1171 return tx_done;
1172}
1173
1174static int mtk_napi_rx(struct napi_struct *napi, int budget)
1175{
1176 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1177 u32 status, mask;
1178 int rx_done = 0;
1179 int remain_budget = budget;
1180
1181 mtk_handle_status_irq(eth);
1182
1183poll_again:
1184 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
1185 rx_done = mtk_poll_rx(napi, remain_budget, eth);
1186
1187 if (unlikely(netif_msg_intr(eth))) {
1188 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1189 mask = mtk_r32(eth, MTK_PDMA_INT_MASK);
1190 dev_info(eth->dev,
1191 "done rx %d, intr 0x%08x/0x%x\n",
1192 rx_done, status, mask);
1193 }
1194 if (rx_done == remain_budget)
1195 return budget;
1196
1197 status = mtk_r32(eth, MTK_PDMA_INT_STATUS);
1198 if (status & MTK_RX_DONE_INT) {
1199 remain_budget -= rx_done;
1200 goto poll_again;
1201 }
1202 napi_complete(napi);
1203 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1204
1205 return rx_done + budget - remain_budget;
1206}
1207
1208static int mtk_tx_alloc(struct mtk_eth *eth)
1209{
1210 struct mtk_tx_ring *ring = ð->tx_ring;
1211 int i, sz = sizeof(*ring->dma);
1212
1213 ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
1214 GFP_KERNEL);
1215 if (!ring->buf)
1216 goto no_tx_mem;
1217
1218 ring->dma = dma_alloc_coherent(eth->dev,
1219 MTK_DMA_SIZE * sz,
1220 &ring->phys,
1221 GFP_ATOMIC | __GFP_ZERO);
1222 if (!ring->dma)
1223 goto no_tx_mem;
1224
1225 memset(ring->dma, 0, MTK_DMA_SIZE * sz);
1226 for (i = 0; i < MTK_DMA_SIZE; i++) {
1227 int next = (i + 1) % MTK_DMA_SIZE;
1228 u32 next_ptr = ring->phys + next * sz;
1229
1230 ring->dma[i].txd2 = next_ptr;
1231 ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
1232 }
1233
1234 atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
1235 ring->next_free = &ring->dma[0];
1236 ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
1237 ring->thresh = MAX_SKB_FRAGS;
1238
1239
1240
1241
1242 wmb();
1243
1244 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
1245 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
1246 mtk_w32(eth,
1247 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1248 MTK_QTX_CRX_PTR);
1249 mtk_w32(eth,
1250 ring->phys + ((MTK_DMA_SIZE - 1) * sz),
1251 MTK_QTX_DRX_PTR);
1252 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1253
1254 return 0;
1255
1256no_tx_mem:
1257 return -ENOMEM;
1258}
1259
1260static void mtk_tx_clean(struct mtk_eth *eth)
1261{
1262 struct mtk_tx_ring *ring = ð->tx_ring;
1263 int i;
1264
1265 if (ring->buf) {
1266 for (i = 0; i < MTK_DMA_SIZE; i++)
1267 mtk_tx_unmap(eth, &ring->buf[i]);
1268 kfree(ring->buf);
1269 ring->buf = NULL;
1270 }
1271
1272 if (ring->dma) {
1273 dma_free_coherent(eth->dev,
1274 MTK_DMA_SIZE * sizeof(*ring->dma),
1275 ring->dma,
1276 ring->phys);
1277 ring->dma = NULL;
1278 }
1279}
1280
1281static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
1282{
1283 struct mtk_rx_ring *ring;
1284 int rx_data_len, rx_dma_size;
1285 int i;
1286 u32 offset = 0;
1287
1288 if (rx_flag == MTK_RX_FLAGS_QDMA) {
1289 if (ring_no)
1290 return -EINVAL;
1291 ring = ð->rx_ring_qdma;
1292 offset = 0x1000;
1293 } else {
1294 ring = ð->rx_ring[ring_no];
1295 }
1296
1297 if (rx_flag == MTK_RX_FLAGS_HWLRO) {
1298 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
1299 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
1300 } else {
1301 rx_data_len = ETH_DATA_LEN;
1302 rx_dma_size = MTK_DMA_SIZE;
1303 }
1304
1305 ring->frag_size = mtk_max_frag_size(rx_data_len);
1306 ring->buf_size = mtk_max_buf_size(ring->frag_size);
1307 ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
1308 GFP_KERNEL);
1309 if (!ring->data)
1310 return -ENOMEM;
1311
1312 for (i = 0; i < rx_dma_size; i++) {
1313 ring->data[i] = netdev_alloc_frag(ring->frag_size);
1314 if (!ring->data[i])
1315 return -ENOMEM;
1316 }
1317
1318 ring->dma = dma_alloc_coherent(eth->dev,
1319 rx_dma_size * sizeof(*ring->dma),
1320 &ring->phys,
1321 GFP_ATOMIC | __GFP_ZERO);
1322 if (!ring->dma)
1323 return -ENOMEM;
1324
1325 for (i = 0; i < rx_dma_size; i++) {
1326 dma_addr_t dma_addr = dma_map_single(eth->dev,
1327 ring->data[i] + NET_SKB_PAD,
1328 ring->buf_size,
1329 DMA_FROM_DEVICE);
1330 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
1331 return -ENOMEM;
1332 ring->dma[i].rxd1 = (unsigned int)dma_addr;
1333
1334 ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
1335 }
1336 ring->dma_size = rx_dma_size;
1337 ring->calc_idx_update = false;
1338 ring->calc_idx = rx_dma_size - 1;
1339 ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
1340
1341
1342
1343 wmb();
1344
1345 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
1346 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
1347 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
1348 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
1349
1350 return 0;
1351}
1352
1353static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
1354{
1355 int i;
1356
1357 if (ring->data && ring->dma) {
1358 for (i = 0; i < ring->dma_size; i++) {
1359 if (!ring->data[i])
1360 continue;
1361 if (!ring->dma[i].rxd1)
1362 continue;
1363 dma_unmap_single(eth->dev,
1364 ring->dma[i].rxd1,
1365 ring->buf_size,
1366 DMA_FROM_DEVICE);
1367 skb_free_frag(ring->data[i]);
1368 }
1369 kfree(ring->data);
1370 ring->data = NULL;
1371 }
1372
1373 if (ring->dma) {
1374 dma_free_coherent(eth->dev,
1375 ring->dma_size * sizeof(*ring->dma),
1376 ring->dma,
1377 ring->phys);
1378 ring->dma = NULL;
1379 }
1380}
1381
1382static int mtk_hwlro_rx_init(struct mtk_eth *eth)
1383{
1384 int i;
1385 u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
1386 u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
1387
1388
1389 ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
1390
1391
1392 ring_ctrl_dw2 |= MTK_RING_VLD;
1393
1394
1395 ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
1396 ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
1397
1398
1399 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
1400
1401
1402 ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
1403 ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
1404
1405 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1406 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
1407 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
1408 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
1409 }
1410
1411
1412 lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
1413
1414
1415 lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
1416
1417
1418 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
1419
1420
1421 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
1422
1423
1424 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
1425 MTK_PDMA_LRO_ALT_REFRESH_TIMER);
1426
1427
1428 lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
1429
1430
1431 lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
1432
1433
1434 lro_ctrl_dw0 |= MTK_LRO_EN;
1435
1436 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
1437 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
1438
1439 return 0;
1440}
1441
1442static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
1443{
1444 int i;
1445 u32 val;
1446
1447
1448 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
1449
1450
1451 for (i = 0; i < 10; i++) {
1452 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
1453 if (val & MTK_LRO_RING_RELINQUISH_DONE) {
1454 msleep(20);
1455 continue;
1456 }
1457 break;
1458 }
1459
1460
1461 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1462 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
1463
1464
1465 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
1466}
1467
1468static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
1469{
1470 u32 reg_val;
1471
1472 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1473
1474
1475 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1476
1477 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
1478
1479
1480 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1481}
1482
1483static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
1484{
1485 u32 reg_val;
1486
1487 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
1488
1489
1490 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
1491
1492 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
1493}
1494
1495static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
1496{
1497 int cnt = 0;
1498 int i;
1499
1500 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1501 if (mac->hwlro_ip[i])
1502 cnt++;
1503 }
1504
1505 return cnt;
1506}
1507
1508static int mtk_hwlro_add_ipaddr(struct net_device *dev,
1509 struct ethtool_rxnfc *cmd)
1510{
1511 struct ethtool_rx_flow_spec *fsp =
1512 (struct ethtool_rx_flow_spec *)&cmd->fs;
1513 struct mtk_mac *mac = netdev_priv(dev);
1514 struct mtk_eth *eth = mac->hw;
1515 int hwlro_idx;
1516
1517 if ((fsp->flow_type != TCP_V4_FLOW) ||
1518 (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
1519 (fsp->location > 1))
1520 return -EINVAL;
1521
1522 mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
1523 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1524
1525 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1526
1527 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
1528
1529 return 0;
1530}
1531
1532static int mtk_hwlro_del_ipaddr(struct net_device *dev,
1533 struct ethtool_rxnfc *cmd)
1534{
1535 struct ethtool_rx_flow_spec *fsp =
1536 (struct ethtool_rx_flow_spec *)&cmd->fs;
1537 struct mtk_mac *mac = netdev_priv(dev);
1538 struct mtk_eth *eth = mac->hw;
1539 int hwlro_idx;
1540
1541 if (fsp->location > 1)
1542 return -EINVAL;
1543
1544 mac->hwlro_ip[fsp->location] = 0;
1545 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
1546
1547 mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1548
1549 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1550
1551 return 0;
1552}
1553
1554static void mtk_hwlro_netdev_disable(struct net_device *dev)
1555{
1556 struct mtk_mac *mac = netdev_priv(dev);
1557 struct mtk_eth *eth = mac->hw;
1558 int i, hwlro_idx;
1559
1560 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1561 mac->hwlro_ip[i] = 0;
1562 hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
1563
1564 mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
1565 }
1566
1567 mac->hwlro_ip_cnt = 0;
1568}
1569
1570static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
1571 struct ethtool_rxnfc *cmd)
1572{
1573 struct mtk_mac *mac = netdev_priv(dev);
1574 struct ethtool_rx_flow_spec *fsp =
1575 (struct ethtool_rx_flow_spec *)&cmd->fs;
1576
1577
1578 fsp->flow_type = TCP_V4_FLOW;
1579 fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
1580 fsp->m_u.tcp_ip4_spec.ip4dst = 0;
1581
1582 fsp->h_u.tcp_ip4_spec.ip4src = 0;
1583 fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
1584 fsp->h_u.tcp_ip4_spec.psrc = 0;
1585 fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
1586 fsp->h_u.tcp_ip4_spec.pdst = 0;
1587 fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
1588 fsp->h_u.tcp_ip4_spec.tos = 0;
1589 fsp->m_u.tcp_ip4_spec.tos = 0xff;
1590
1591 return 0;
1592}
1593
1594static int mtk_hwlro_get_fdir_all(struct net_device *dev,
1595 struct ethtool_rxnfc *cmd,
1596 u32 *rule_locs)
1597{
1598 struct mtk_mac *mac = netdev_priv(dev);
1599 int cnt = 0;
1600 int i;
1601
1602 for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
1603 if (mac->hwlro_ip[i]) {
1604 rule_locs[cnt] = i;
1605 cnt++;
1606 }
1607 }
1608
1609 cmd->rule_cnt = cnt;
1610
1611 return 0;
1612}
1613
1614static netdev_features_t mtk_fix_features(struct net_device *dev,
1615 netdev_features_t features)
1616{
1617 if (!(features & NETIF_F_LRO)) {
1618 struct mtk_mac *mac = netdev_priv(dev);
1619 int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
1620
1621 if (ip_cnt) {
1622 netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
1623
1624 features |= NETIF_F_LRO;
1625 }
1626 }
1627
1628 return features;
1629}
1630
1631static int mtk_set_features(struct net_device *dev, netdev_features_t features)
1632{
1633 int err = 0;
1634
1635 if (!((dev->features ^ features) & NETIF_F_LRO))
1636 return 0;
1637
1638 if (!(features & NETIF_F_LRO))
1639 mtk_hwlro_netdev_disable(dev);
1640
1641 return err;
1642}
1643
1644
1645static int mtk_dma_busy_wait(struct mtk_eth *eth)
1646{
1647 unsigned long t_start = jiffies;
1648
1649 while (1) {
1650 if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) &
1651 (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)))
1652 return 0;
1653 if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT))
1654 break;
1655 }
1656
1657 dev_err(eth->dev, "DMA init timeout\n");
1658 return -1;
1659}
1660
1661static int mtk_dma_init(struct mtk_eth *eth)
1662{
1663 int err;
1664 u32 i;
1665
1666 if (mtk_dma_busy_wait(eth))
1667 return -EBUSY;
1668
1669
1670
1671
1672 err = mtk_init_fq_dma(eth);
1673 if (err)
1674 return err;
1675
1676 err = mtk_tx_alloc(eth);
1677 if (err)
1678 return err;
1679
1680 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
1681 if (err)
1682 return err;
1683
1684 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
1685 if (err)
1686 return err;
1687
1688 if (eth->hwlro) {
1689 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
1690 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
1691 if (err)
1692 return err;
1693 }
1694 err = mtk_hwlro_rx_init(eth);
1695 if (err)
1696 return err;
1697 }
1698
1699
1700 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
1701 MTK_QDMA_FC_THRES);
1702 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1703
1704 return 0;
1705}
1706
1707static void mtk_dma_free(struct mtk_eth *eth)
1708{
1709 int i;
1710
1711 for (i = 0; i < MTK_MAC_COUNT; i++)
1712 if (eth->netdev[i])
1713 netdev_reset_queue(eth->netdev[i]);
1714 if (eth->scratch_ring) {
1715 dma_free_coherent(eth->dev,
1716 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
1717 eth->scratch_ring,
1718 eth->phy_scratch_ring);
1719 eth->scratch_ring = NULL;
1720 eth->phy_scratch_ring = 0;
1721 }
1722 mtk_tx_clean(eth);
1723 mtk_rx_clean(eth, ð->rx_ring[0]);
1724 mtk_rx_clean(eth, ð->rx_ring_qdma);
1725
1726 if (eth->hwlro) {
1727 mtk_hwlro_rx_uninit(eth);
1728 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
1729 mtk_rx_clean(eth, ð->rx_ring[i]);
1730 }
1731
1732 kfree(eth->scratch_head);
1733}
1734
1735static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
1736{
1737 struct mtk_mac *mac = netdev_priv(dev);
1738 struct mtk_eth *eth = mac->hw;
1739
1740 eth->netdev[mac->id]->stats.tx_errors++;
1741 netif_err(eth, tx_err, dev,
1742 "transmit timed out\n");
1743 schedule_work(ð->pending_work);
1744}
1745
1746static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
1747{
1748 struct mtk_eth *eth = _eth;
1749
1750 if (likely(napi_schedule_prep(ð->rx_napi))) {
1751 __napi_schedule(ð->rx_napi);
1752 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1753 }
1754
1755 return IRQ_HANDLED;
1756}
1757
1758static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
1759{
1760 struct mtk_eth *eth = _eth;
1761
1762 if (likely(napi_schedule_prep(ð->tx_napi))) {
1763 __napi_schedule(ð->tx_napi);
1764 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1765 }
1766
1767 return IRQ_HANDLED;
1768}
1769
1770#ifdef CONFIG_NET_POLL_CONTROLLER
1771static void mtk_poll_controller(struct net_device *dev)
1772{
1773 struct mtk_mac *mac = netdev_priv(dev);
1774 struct mtk_eth *eth = mac->hw;
1775
1776 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1777 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1778 mtk_handle_irq_rx(eth->irq[2], dev);
1779 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1780 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1781}
1782#endif
1783
1784static int mtk_start_dma(struct mtk_eth *eth)
1785{
1786 int err;
1787
1788 err = mtk_dma_init(eth);
1789 if (err) {
1790 mtk_dma_free(eth);
1791 return err;
1792 }
1793
1794 mtk_w32(eth,
1795 MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
1796 MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO |
1797 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1798 MTK_RX_BT_32DWORDS,
1799 MTK_QDMA_GLO_CFG);
1800
1801 mtk_w32(eth,
1802 MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
1803 MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
1804 MTK_PDMA_GLO_CFG);
1805
1806 return 0;
1807}
1808
1809static int mtk_open(struct net_device *dev)
1810{
1811 struct mtk_mac *mac = netdev_priv(dev);
1812 struct mtk_eth *eth = mac->hw;
1813
1814
1815 if (!refcount_read(ð->dma_refcnt)) {
1816 int err = mtk_start_dma(eth);
1817
1818 if (err)
1819 return err;
1820
1821 napi_enable(ð->tx_napi);
1822 napi_enable(ð->rx_napi);
1823 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
1824 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
1825 refcount_set(ð->dma_refcnt, 1);
1826 }
1827 else
1828 refcount_inc(ð->dma_refcnt);
1829
1830 phy_start(dev->phydev);
1831 netif_start_queue(dev);
1832
1833 return 0;
1834}
1835
1836static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1837{
1838 u32 val;
1839 int i;
1840
1841
1842 spin_lock_bh(ð->page_lock);
1843 val = mtk_r32(eth, glo_cfg);
1844 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1845 glo_cfg);
1846 spin_unlock_bh(ð->page_lock);
1847
1848
1849 for (i = 0; i < 10; i++) {
1850 val = mtk_r32(eth, glo_cfg);
1851 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1852 msleep(20);
1853 continue;
1854 }
1855 break;
1856 }
1857}
1858
1859static int mtk_stop(struct net_device *dev)
1860{
1861 struct mtk_mac *mac = netdev_priv(dev);
1862 struct mtk_eth *eth = mac->hw;
1863
1864 netif_tx_disable(dev);
1865 phy_stop(dev->phydev);
1866
1867
1868 if (!refcount_dec_and_test(ð->dma_refcnt))
1869 return 0;
1870
1871 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
1872 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
1873 napi_disable(ð->tx_napi);
1874 napi_disable(ð->rx_napi);
1875
1876 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1877 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
1878
1879 mtk_dma_free(eth);
1880
1881 return 0;
1882}
1883
1884static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
1885{
1886 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1887 reset_bits,
1888 reset_bits);
1889
1890 usleep_range(1000, 1100);
1891 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
1892 reset_bits,
1893 ~reset_bits);
1894 mdelay(10);
1895}
1896
1897static void mtk_clk_disable(struct mtk_eth *eth)
1898{
1899 int clk;
1900
1901 for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
1902 clk_disable_unprepare(eth->clks[clk]);
1903}
1904
1905static int mtk_clk_enable(struct mtk_eth *eth)
1906{
1907 int clk, ret;
1908
1909 for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
1910 ret = clk_prepare_enable(eth->clks[clk]);
1911 if (ret)
1912 goto err_disable_clks;
1913 }
1914
1915 return 0;
1916
1917err_disable_clks:
1918 while (--clk >= 0)
1919 clk_disable_unprepare(eth->clks[clk]);
1920
1921 return ret;
1922}
1923
1924static int mtk_hw_init(struct mtk_eth *eth)
1925{
1926 int i, val, ret;
1927
1928 if (test_and_set_bit(MTK_HW_INIT, ð->state))
1929 return 0;
1930
1931 pm_runtime_enable(eth->dev);
1932 pm_runtime_get_sync(eth->dev);
1933
1934 ret = mtk_clk_enable(eth);
1935 if (ret)
1936 goto err_disable_pm;
1937
1938 ethsys_reset(eth, RSTCTRL_FE);
1939 ethsys_reset(eth, RSTCTRL_PPE);
1940
1941 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
1942 for (i = 0; i < MTK_MAC_COUNT; i++) {
1943 if (!eth->mac[i])
1944 continue;
1945 val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, eth->mac[i]->id);
1946 val |= SYSCFG0_GE_MODE(eth->mac[i]->ge_mode, eth->mac[i]->id);
1947 }
1948 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
1949
1950 if (eth->pctl) {
1951
1952 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
1953
1954
1955 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
1956
1957
1958 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
1959 }
1960
1961
1962
1963
1964
1965 for (i = 0; i < MTK_MAC_COUNT; i++)
1966 mtk_w32(eth, 0, MTK_MAC_MCR(i));
1967
1968
1969
1970
1971 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
1972 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
1973
1974
1975 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
1976
1977
1978 mtk_w32(eth, MTK_PDMA_DELAY_RX_DELAY, MTK_PDMA_DELAY_INT);
1979
1980
1981 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1982 mtk_tx_irq_disable(eth, ~0);
1983 mtk_rx_irq_disable(eth, ~0);
1984 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
1985 mtk_w32(eth, 0, MTK_RST_GL);
1986
1987
1988 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
1989 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
1990 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
1991 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
1992 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
1993
1994 for (i = 0; i < 2; i++) {
1995 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
1996
1997
1998 val &= ~0xffff;
1999
2000
2001 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
2002
2003
2004 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
2005 }
2006
2007 return 0;
2008
2009err_disable_pm:
2010 pm_runtime_put_sync(eth->dev);
2011 pm_runtime_disable(eth->dev);
2012
2013 return ret;
2014}
2015
2016static int mtk_hw_deinit(struct mtk_eth *eth)
2017{
2018 if (!test_and_clear_bit(MTK_HW_INIT, ð->state))
2019 return 0;
2020
2021 mtk_clk_disable(eth);
2022
2023 pm_runtime_put_sync(eth->dev);
2024 pm_runtime_disable(eth->dev);
2025
2026 return 0;
2027}
2028
2029static int __init mtk_init(struct net_device *dev)
2030{
2031 struct mtk_mac *mac = netdev_priv(dev);
2032 struct mtk_eth *eth = mac->hw;
2033 const char *mac_addr;
2034
2035 mac_addr = of_get_mac_address(mac->of_node);
2036 if (mac_addr)
2037 ether_addr_copy(dev->dev_addr, mac_addr);
2038
2039
2040 if (!is_valid_ether_addr(dev->dev_addr)) {
2041 eth_hw_addr_random(dev);
2042 dev_err(eth->dev, "generated random MAC address %pM\n",
2043 dev->dev_addr);
2044 }
2045
2046 return mtk_phy_connect(dev);
2047}
2048
2049static void mtk_uninit(struct net_device *dev)
2050{
2051 struct mtk_mac *mac = netdev_priv(dev);
2052 struct mtk_eth *eth = mac->hw;
2053
2054 phy_disconnect(dev->phydev);
2055 if (of_phy_is_fixed_link(mac->of_node))
2056 of_phy_deregister_fixed_link(mac->of_node);
2057 mtk_tx_irq_disable(eth, ~0);
2058 mtk_rx_irq_disable(eth, ~0);
2059}
2060
2061static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2062{
2063 switch (cmd) {
2064 case SIOCGMIIPHY:
2065 case SIOCGMIIREG:
2066 case SIOCSMIIREG:
2067 return phy_mii_ioctl(dev->phydev, ifr, cmd);
2068 default:
2069 break;
2070 }
2071
2072 return -EOPNOTSUPP;
2073}
2074
2075static void mtk_pending_work(struct work_struct *work)
2076{
2077 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
2078 int err, i;
2079 unsigned long restart = 0;
2080
2081 rtnl_lock();
2082
2083 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
2084
2085 while (test_and_set_bit_lock(MTK_RESETTING, ð->state))
2086 cpu_relax();
2087
2088 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__);
2089
2090 for (i = 0; i < MTK_MAC_COUNT; i++) {
2091 if (!eth->netdev[i])
2092 continue;
2093 mtk_stop(eth->netdev[i]);
2094 __set_bit(i, &restart);
2095 }
2096 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
2097
2098
2099
2100
2101 mtk_hw_deinit(eth);
2102
2103 if (eth->dev->pins)
2104 pinctrl_select_state(eth->dev->pins->p,
2105 eth->dev->pins->default_state);
2106 mtk_hw_init(eth);
2107
2108 for (i = 0; i < MTK_MAC_COUNT; i++) {
2109 if (!eth->mac[i] ||
2110 of_phy_is_fixed_link(eth->mac[i]->of_node))
2111 continue;
2112 err = phy_init_hw(eth->netdev[i]->phydev);
2113 if (err)
2114 dev_err(eth->dev, "%s: PHY init failed.\n",
2115 eth->netdev[i]->name);
2116 }
2117
2118
2119 for (i = 0; i < MTK_MAC_COUNT; i++) {
2120 if (!test_bit(i, &restart))
2121 continue;
2122 err = mtk_open(eth->netdev[i]);
2123 if (err) {
2124 netif_alert(eth, ifup, eth->netdev[i],
2125 "Driver up/down cycle failed, closing device.\n");
2126 dev_close(eth->netdev[i]);
2127 }
2128 }
2129
2130 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
2131
2132 clear_bit_unlock(MTK_RESETTING, ð->state);
2133
2134 rtnl_unlock();
2135}
2136
2137static int mtk_free_dev(struct mtk_eth *eth)
2138{
2139 int i;
2140
2141 for (i = 0; i < MTK_MAC_COUNT; i++) {
2142 if (!eth->netdev[i])
2143 continue;
2144 free_netdev(eth->netdev[i]);
2145 }
2146
2147 return 0;
2148}
2149
2150static int mtk_unreg_dev(struct mtk_eth *eth)
2151{
2152 int i;
2153
2154 for (i = 0; i < MTK_MAC_COUNT; i++) {
2155 if (!eth->netdev[i])
2156 continue;
2157 unregister_netdev(eth->netdev[i]);
2158 }
2159
2160 return 0;
2161}
2162
2163static int mtk_cleanup(struct mtk_eth *eth)
2164{
2165 mtk_unreg_dev(eth);
2166 mtk_free_dev(eth);
2167 cancel_work_sync(ð->pending_work);
2168
2169 return 0;
2170}
2171
2172static int mtk_get_link_ksettings(struct net_device *ndev,
2173 struct ethtool_link_ksettings *cmd)
2174{
2175 struct mtk_mac *mac = netdev_priv(ndev);
2176
2177 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2178 return -EBUSY;
2179
2180 phy_ethtool_ksettings_get(ndev->phydev, cmd);
2181
2182 return 0;
2183}
2184
2185static int mtk_set_link_ksettings(struct net_device *ndev,
2186 const struct ethtool_link_ksettings *cmd)
2187{
2188 struct mtk_mac *mac = netdev_priv(ndev);
2189
2190 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2191 return -EBUSY;
2192
2193 return phy_ethtool_ksettings_set(ndev->phydev, cmd);
2194}
2195
2196static void mtk_get_drvinfo(struct net_device *dev,
2197 struct ethtool_drvinfo *info)
2198{
2199 struct mtk_mac *mac = netdev_priv(dev);
2200
2201 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
2202 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
2203 info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
2204}
2205
2206static u32 mtk_get_msglevel(struct net_device *dev)
2207{
2208 struct mtk_mac *mac = netdev_priv(dev);
2209
2210 return mac->hw->msg_enable;
2211}
2212
2213static void mtk_set_msglevel(struct net_device *dev, u32 value)
2214{
2215 struct mtk_mac *mac = netdev_priv(dev);
2216
2217 mac->hw->msg_enable = value;
2218}
2219
2220static int mtk_nway_reset(struct net_device *dev)
2221{
2222 struct mtk_mac *mac = netdev_priv(dev);
2223
2224 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2225 return -EBUSY;
2226
2227 return genphy_restart_aneg(dev->phydev);
2228}
2229
2230static u32 mtk_get_link(struct net_device *dev)
2231{
2232 struct mtk_mac *mac = netdev_priv(dev);
2233 int err;
2234
2235 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2236 return -EBUSY;
2237
2238 err = genphy_update_link(dev->phydev);
2239 if (err)
2240 return ethtool_op_get_link(dev);
2241
2242 return dev->phydev->link;
2243}
2244
2245static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2246{
2247 int i;
2248
2249 switch (stringset) {
2250 case ETH_SS_STATS:
2251 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
2252 memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
2253 data += ETH_GSTRING_LEN;
2254 }
2255 break;
2256 }
2257}
2258
2259static int mtk_get_sset_count(struct net_device *dev, int sset)
2260{
2261 switch (sset) {
2262 case ETH_SS_STATS:
2263 return ARRAY_SIZE(mtk_ethtool_stats);
2264 default:
2265 return -EOPNOTSUPP;
2266 }
2267}
2268
2269static void mtk_get_ethtool_stats(struct net_device *dev,
2270 struct ethtool_stats *stats, u64 *data)
2271{
2272 struct mtk_mac *mac = netdev_priv(dev);
2273 struct mtk_hw_stats *hwstats = mac->hw_stats;
2274 u64 *data_src, *data_dst;
2275 unsigned int start;
2276 int i;
2277
2278 if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
2279 return;
2280
2281 if (netif_running(dev) && netif_device_present(dev)) {
2282 if (spin_trylock_bh(&hwstats->stats_lock)) {
2283 mtk_stats_update_mac(mac);
2284 spin_unlock_bh(&hwstats->stats_lock);
2285 }
2286 }
2287
2288 data_src = (u64 *)hwstats;
2289
2290 do {
2291 data_dst = data;
2292 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
2293
2294 for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
2295 *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
2296 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
2297}
2298
2299static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2300 u32 *rule_locs)
2301{
2302 int ret = -EOPNOTSUPP;
2303
2304 switch (cmd->cmd) {
2305 case ETHTOOL_GRXRINGS:
2306 if (dev->features & NETIF_F_LRO) {
2307 cmd->data = MTK_MAX_RX_RING_NUM;
2308 ret = 0;
2309 }
2310 break;
2311 case ETHTOOL_GRXCLSRLCNT:
2312 if (dev->features & NETIF_F_LRO) {
2313 struct mtk_mac *mac = netdev_priv(dev);
2314
2315 cmd->rule_cnt = mac->hwlro_ip_cnt;
2316 ret = 0;
2317 }
2318 break;
2319 case ETHTOOL_GRXCLSRULE:
2320 if (dev->features & NETIF_F_LRO)
2321 ret = mtk_hwlro_get_fdir_entry(dev, cmd);
2322 break;
2323 case ETHTOOL_GRXCLSRLALL:
2324 if (dev->features & NETIF_F_LRO)
2325 ret = mtk_hwlro_get_fdir_all(dev, cmd,
2326 rule_locs);
2327 break;
2328 default:
2329 break;
2330 }
2331
2332 return ret;
2333}
2334
2335static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2336{
2337 int ret = -EOPNOTSUPP;
2338
2339 switch (cmd->cmd) {
2340 case ETHTOOL_SRXCLSRLINS:
2341 if (dev->features & NETIF_F_LRO)
2342 ret = mtk_hwlro_add_ipaddr(dev, cmd);
2343 break;
2344 case ETHTOOL_SRXCLSRLDEL:
2345 if (dev->features & NETIF_F_LRO)
2346 ret = mtk_hwlro_del_ipaddr(dev, cmd);
2347 break;
2348 default:
2349 break;
2350 }
2351
2352 return ret;
2353}
2354
2355static const struct ethtool_ops mtk_ethtool_ops = {
2356 .get_link_ksettings = mtk_get_link_ksettings,
2357 .set_link_ksettings = mtk_set_link_ksettings,
2358 .get_drvinfo = mtk_get_drvinfo,
2359 .get_msglevel = mtk_get_msglevel,
2360 .set_msglevel = mtk_set_msglevel,
2361 .nway_reset = mtk_nway_reset,
2362 .get_link = mtk_get_link,
2363 .get_strings = mtk_get_strings,
2364 .get_sset_count = mtk_get_sset_count,
2365 .get_ethtool_stats = mtk_get_ethtool_stats,
2366 .get_rxnfc = mtk_get_rxnfc,
2367 .set_rxnfc = mtk_set_rxnfc,
2368};
2369
2370static const struct net_device_ops mtk_netdev_ops = {
2371 .ndo_init = mtk_init,
2372 .ndo_uninit = mtk_uninit,
2373 .ndo_open = mtk_open,
2374 .ndo_stop = mtk_stop,
2375 .ndo_start_xmit = mtk_start_xmit,
2376 .ndo_set_mac_address = mtk_set_mac_address,
2377 .ndo_validate_addr = eth_validate_addr,
2378 .ndo_do_ioctl = mtk_do_ioctl,
2379 .ndo_tx_timeout = mtk_tx_timeout,
2380 .ndo_get_stats64 = mtk_get_stats64,
2381 .ndo_fix_features = mtk_fix_features,
2382 .ndo_set_features = mtk_set_features,
2383#ifdef CONFIG_NET_POLL_CONTROLLER
2384 .ndo_poll_controller = mtk_poll_controller,
2385#endif
2386};
2387
2388static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
2389{
2390 struct mtk_mac *mac;
2391 const __be32 *_id = of_get_property(np, "reg", NULL);
2392 int id, err;
2393
2394 if (!_id) {
2395 dev_err(eth->dev, "missing mac id\n");
2396 return -EINVAL;
2397 }
2398
2399 id = be32_to_cpup(_id);
2400 if (id >= MTK_MAC_COUNT) {
2401 dev_err(eth->dev, "%d is not a valid mac id\n", id);
2402 return -EINVAL;
2403 }
2404
2405 if (eth->netdev[id]) {
2406 dev_err(eth->dev, "duplicate mac id found: %d\n", id);
2407 return -EINVAL;
2408 }
2409
2410 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
2411 if (!eth->netdev[id]) {
2412 dev_err(eth->dev, "alloc_etherdev failed\n");
2413 return -ENOMEM;
2414 }
2415 mac = netdev_priv(eth->netdev[id]);
2416 eth->mac[id] = mac;
2417 mac->id = id;
2418 mac->hw = eth;
2419 mac->of_node = np;
2420
2421 memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
2422 mac->hwlro_ip_cnt = 0;
2423
2424 mac->hw_stats = devm_kzalloc(eth->dev,
2425 sizeof(*mac->hw_stats),
2426 GFP_KERNEL);
2427 if (!mac->hw_stats) {
2428 dev_err(eth->dev, "failed to allocate counter memory\n");
2429 err = -ENOMEM;
2430 goto free_netdev;
2431 }
2432 spin_lock_init(&mac->hw_stats->stats_lock);
2433 u64_stats_init(&mac->hw_stats->syncp);
2434 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2435
2436 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2437 eth->netdev[id]->watchdog_timeo = 5 * HZ;
2438 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2439 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2440
2441 eth->netdev[id]->hw_features = MTK_HW_FEATURES;
2442 if (eth->hwlro)
2443 eth->netdev[id]->hw_features |= NETIF_F_LRO;
2444
2445 eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
2446 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2447 eth->netdev[id]->features |= MTK_HW_FEATURES;
2448 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
2449
2450 eth->netdev[id]->irq = eth->irq[0];
2451 eth->netdev[id]->dev.of_node = np;
2452
2453 return 0;
2454
2455free_netdev:
2456 free_netdev(eth->netdev[id]);
2457 return err;
2458}
2459
2460static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id)
2461{
2462 u32 val[2], id[4];
2463
2464 regmap_read(eth->ethsys, ETHSYS_CHIPID0_3, &val[0]);
2465 regmap_read(eth->ethsys, ETHSYS_CHIPID4_7, &val[1]);
2466
2467 id[3] = ((val[0] >> 16) & 0xff) - '0';
2468 id[2] = ((val[0] >> 24) & 0xff) - '0';
2469 id[1] = (val[1] & 0xff) - '0';
2470 id[0] = ((val[1] >> 8) & 0xff) - '0';
2471
2472 *chip_id = (id[3] * 1000) + (id[2] * 100) +
2473 (id[1] * 10) + id[0];
2474
2475 if (!(*chip_id)) {
2476 dev_err(eth->dev, "failed to get chip id\n");
2477 return -ENODEV;
2478 }
2479
2480 dev_info(eth->dev, "chip id = %d\n", *chip_id);
2481
2482 return 0;
2483}
2484
2485static bool mtk_is_hwlro_supported(struct mtk_eth *eth)
2486{
2487 switch (eth->chip_id) {
2488 case MT7622_ETH:
2489 case MT7623_ETH:
2490 return true;
2491 }
2492
2493 return false;
2494}
2495
2496static int mtk_probe(struct platform_device *pdev)
2497{
2498 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2499 struct device_node *mac_np;
2500 struct mtk_eth *eth;
2501 int err;
2502 int i;
2503
2504 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2505 if (!eth)
2506 return -ENOMEM;
2507
2508 eth->soc = of_device_get_match_data(&pdev->dev);
2509
2510 eth->dev = &pdev->dev;
2511 eth->base = devm_ioremap_resource(&pdev->dev, res);
2512 if (IS_ERR(eth->base))
2513 return PTR_ERR(eth->base);
2514
2515 spin_lock_init(ð->page_lock);
2516 spin_lock_init(ð->tx_irq_lock);
2517 spin_lock_init(ð->rx_irq_lock);
2518
2519 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2520 "mediatek,ethsys");
2521 if (IS_ERR(eth->ethsys)) {
2522 dev_err(&pdev->dev, "no ethsys regmap found\n");
2523 return PTR_ERR(eth->ethsys);
2524 }
2525
2526 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
2527 eth->sgmiisys =
2528 syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2529 "mediatek,sgmiisys");
2530 if (IS_ERR(eth->sgmiisys)) {
2531 dev_err(&pdev->dev, "no sgmiisys regmap found\n");
2532 return PTR_ERR(eth->sgmiisys);
2533 }
2534 }
2535
2536 if (eth->soc->required_pctl) {
2537 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2538 "mediatek,pctl");
2539 if (IS_ERR(eth->pctl)) {
2540 dev_err(&pdev->dev, "no pctl regmap found\n");
2541 return PTR_ERR(eth->pctl);
2542 }
2543 }
2544
2545 for (i = 0; i < 3; i++) {
2546 eth->irq[i] = platform_get_irq(pdev, i);
2547 if (eth->irq[i] < 0) {
2548 dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
2549 return -ENXIO;
2550 }
2551 }
2552 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
2553 eth->clks[i] = devm_clk_get(eth->dev,
2554 mtk_clks_source_name[i]);
2555 if (IS_ERR(eth->clks[i])) {
2556 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
2557 return -EPROBE_DEFER;
2558 if (eth->soc->required_clks & BIT(i)) {
2559 dev_err(&pdev->dev, "clock %s not found\n",
2560 mtk_clks_source_name[i]);
2561 return -EINVAL;
2562 }
2563 eth->clks[i] = NULL;
2564 }
2565 }
2566
2567 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2568 INIT_WORK(ð->pending_work, mtk_pending_work);
2569
2570 err = mtk_hw_init(eth);
2571 if (err)
2572 return err;
2573
2574 err = mtk_get_chip_id(eth, ð->chip_id);
2575 if (err)
2576 return err;
2577
2578 eth->hwlro = mtk_is_hwlro_supported(eth);
2579
2580 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2581 if (!of_device_is_compatible(mac_np,
2582 "mediatek,eth-mac"))
2583 continue;
2584
2585 if (!of_device_is_available(mac_np))
2586 continue;
2587
2588 err = mtk_add_mac(eth, mac_np);
2589 if (err)
2590 goto err_deinit_hw;
2591 }
2592
2593 err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
2594 dev_name(eth->dev), eth);
2595 if (err)
2596 goto err_free_dev;
2597
2598 err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
2599 dev_name(eth->dev), eth);
2600 if (err)
2601 goto err_free_dev;
2602
2603 err = mtk_mdio_init(eth);
2604 if (err)
2605 goto err_free_dev;
2606
2607 for (i = 0; i < MTK_MAX_DEVS; i++) {
2608 if (!eth->netdev[i])
2609 continue;
2610
2611 err = register_netdev(eth->netdev[i]);
2612 if (err) {
2613 dev_err(eth->dev, "error bringing up device\n");
2614 goto err_deinit_mdio;
2615 } else
2616 netif_info(eth, probe, eth->netdev[i],
2617 "mediatek frame engine at 0x%08lx, irq %d\n",
2618 eth->netdev[i]->base_addr, eth->irq[0]);
2619 }
2620
2621
2622
2623
2624 init_dummy_netdev(ð->dummy_dev);
2625 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx,
2626 MTK_NAPI_WEIGHT);
2627 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx,
2628 MTK_NAPI_WEIGHT);
2629
2630 platform_set_drvdata(pdev, eth);
2631
2632 return 0;
2633
2634err_deinit_mdio:
2635 mtk_mdio_cleanup(eth);
2636err_free_dev:
2637 mtk_free_dev(eth);
2638err_deinit_hw:
2639 mtk_hw_deinit(eth);
2640
2641 return err;
2642}
2643
2644static int mtk_remove(struct platform_device *pdev)
2645{
2646 struct mtk_eth *eth = platform_get_drvdata(pdev);
2647 int i;
2648
2649
2650 for (i = 0; i < MTK_MAC_COUNT; i++) {
2651 if (!eth->netdev[i])
2652 continue;
2653 mtk_stop(eth->netdev[i]);
2654 }
2655
2656 mtk_hw_deinit(eth);
2657
2658 netif_napi_del(ð->tx_napi);
2659 netif_napi_del(ð->rx_napi);
2660 mtk_cleanup(eth);
2661 mtk_mdio_cleanup(eth);
2662
2663 return 0;
2664}
2665
2666static const struct mtk_soc_data mt2701_data = {
2667 .caps = MTK_GMAC1_TRGMII,
2668 .required_clks = MT7623_CLKS_BITMAP,
2669 .required_pctl = true,
2670};
2671
2672static const struct mtk_soc_data mt7622_data = {
2673 .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW,
2674 .required_clks = MT7622_CLKS_BITMAP,
2675 .required_pctl = false,
2676};
2677
2678static const struct mtk_soc_data mt7623_data = {
2679 .caps = MTK_GMAC1_TRGMII,
2680 .required_clks = MT7623_CLKS_BITMAP,
2681 .required_pctl = true,
2682};
2683
2684const struct of_device_id of_mtk_match[] = {
2685 { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data},
2686 { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
2687 { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
2688 {},
2689};
2690MODULE_DEVICE_TABLE(of, of_mtk_match);
2691
2692static struct platform_driver mtk_driver = {
2693 .probe = mtk_probe,
2694 .remove = mtk_remove,
2695 .driver = {
2696 .name = "mtk_soc_eth",
2697 .of_match_table = of_mtk_match,
2698 },
2699};
2700
2701module_platform_driver(mtk_driver);
2702
2703MODULE_LICENSE("GPL");
2704MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2705MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
2706