1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/clk.h>
10#include <linux/clk-provider.h>
11#include <linux/crc32.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/circ_buf.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21#include <linux/gpio/consumer.h>
22#include <linux/interrupt.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/dma-mapping.h>
26#include <linux/platform_data/macb.h>
27#include <linux/platform_device.h>
28#include <linux/phylink.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/of_gpio.h>
32#include <linux/of_mdio.h>
33#include <linux/of_net.h>
34#include <linux/ip.h>
35#include <linux/udp.h>
36#include <linux/tcp.h>
37#include <linux/iopoll.h>
38#include <linux/pm_runtime.h>
39#include "macb.h"
40
41
42struct sifive_fu540_macb_mgmt {
43 void __iomem *reg;
44 unsigned long rate;
45 struct clk_hw hw;
46};
47
48#define MACB_RX_BUFFER_SIZE 128
49#define RX_BUFFER_MULTIPLE 64
50
51#define DEFAULT_RX_RING_SIZE 512
52#define MIN_RX_RING_SIZE 64
53#define MAX_RX_RING_SIZE 8192
54#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
55 * (bp)->rx_ring_size)
56
57#define DEFAULT_TX_RING_SIZE 512
58#define MIN_TX_RING_SIZE 64
59#define MAX_TX_RING_SIZE 4096
60#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
61 * (bp)->tx_ring_size)
62
63
64#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
65
66#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
67#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
68 | MACB_BIT(ISR_RLE) \
69 | MACB_BIT(TXERR))
70#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
71 | MACB_BIT(TXUBR))
72
73
74#define MACB_TX_LEN_ALIGN 8
75#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
76#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
77
78#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
79#define MACB_NETIF_LSO NETIF_F_TSO
80
81#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
82#define MACB_WOL_ENABLED (0x1 << 1)
83
84
85
86
87#define MACB_HALT_TIMEOUT 1230
88
89#define MACB_PM_TIMEOUT 100
90
91#define MACB_MDIO_TIMEOUT 1000000
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120static unsigned int macb_dma_desc_get_size(struct macb *bp)
121{
122#ifdef MACB_EXT_DESC
123 unsigned int desc_size;
124
125 switch (bp->hw_dma_cap) {
126 case HW_DMA_CAP_64B:
127 desc_size = sizeof(struct macb_dma_desc)
128 + sizeof(struct macb_dma_desc_64);
129 break;
130 case HW_DMA_CAP_PTP:
131 desc_size = sizeof(struct macb_dma_desc)
132 + sizeof(struct macb_dma_desc_ptp);
133 break;
134 case HW_DMA_CAP_64B_PTP:
135 desc_size = sizeof(struct macb_dma_desc)
136 + sizeof(struct macb_dma_desc_64)
137 + sizeof(struct macb_dma_desc_ptp);
138 break;
139 default:
140 desc_size = sizeof(struct macb_dma_desc);
141 }
142 return desc_size;
143#endif
144 return sizeof(struct macb_dma_desc);
145}
146
147static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
148{
149#ifdef MACB_EXT_DESC
150 switch (bp->hw_dma_cap) {
151 case HW_DMA_CAP_64B:
152 case HW_DMA_CAP_PTP:
153 desc_idx <<= 1;
154 break;
155 case HW_DMA_CAP_64B_PTP:
156 desc_idx *= 3;
157 break;
158 default:
159 break;
160 }
161#endif
162 return desc_idx;
163}
164
165#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
166static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
167{
168 return (struct macb_dma_desc_64 *)((void *)desc
169 + sizeof(struct macb_dma_desc));
170}
171#endif
172
173
174static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
175{
176 return index & (bp->tx_ring_size - 1);
177}
178
179static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
180 unsigned int index)
181{
182 index = macb_tx_ring_wrap(queue->bp, index);
183 index = macb_adj_dma_desc_idx(queue->bp, index);
184 return &queue->tx_ring[index];
185}
186
187static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
188 unsigned int index)
189{
190 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
191}
192
193static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
194{
195 dma_addr_t offset;
196
197 offset = macb_tx_ring_wrap(queue->bp, index) *
198 macb_dma_desc_get_size(queue->bp);
199
200 return queue->tx_ring_dma + offset;
201}
202
203static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
204{
205 return index & (bp->rx_ring_size - 1);
206}
207
208static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
209{
210 index = macb_rx_ring_wrap(queue->bp, index);
211 index = macb_adj_dma_desc_idx(queue->bp, index);
212 return &queue->rx_ring[index];
213}
214
215static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
216{
217 return queue->rx_buffers + queue->bp->rx_buffer_size *
218 macb_rx_ring_wrap(queue->bp, index);
219}
220
221
222static u32 hw_readl_native(struct macb *bp, int offset)
223{
224 return __raw_readl(bp->regs + offset);
225}
226
227static void hw_writel_native(struct macb *bp, int offset, u32 value)
228{
229 __raw_writel(value, bp->regs + offset);
230}
231
232static u32 hw_readl(struct macb *bp, int offset)
233{
234 return readl_relaxed(bp->regs + offset);
235}
236
237static void hw_writel(struct macb *bp, int offset, u32 value)
238{
239 writel_relaxed(value, bp->regs + offset);
240}
241
242
243
244
245
246static bool hw_is_native_io(void __iomem *addr)
247{
248 u32 value = MACB_BIT(LLB);
249
250 __raw_writel(value, addr + MACB_NCR);
251 value = __raw_readl(addr + MACB_NCR);
252
253
254 __raw_writel(0, addr + MACB_NCR);
255
256 return value == MACB_BIT(LLB);
257}
258
259static bool hw_is_gem(void __iomem *addr, bool native_io)
260{
261 u32 id;
262
263 if (native_io)
264 id = __raw_readl(addr + MACB_MID);
265 else
266 id = readl_relaxed(addr + MACB_MID);
267
268 return MACB_BFEXT(IDNUM, id) >= 0x2;
269}
270
271static void macb_set_hwaddr(struct macb *bp)
272{
273 u32 bottom;
274 u16 top;
275
276 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
277 macb_or_gem_writel(bp, SA1B, bottom);
278 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
279 macb_or_gem_writel(bp, SA1T, top);
280
281
282 macb_or_gem_writel(bp, SA2B, 0);
283 macb_or_gem_writel(bp, SA2T, 0);
284 macb_or_gem_writel(bp, SA3B, 0);
285 macb_or_gem_writel(bp, SA3T, 0);
286 macb_or_gem_writel(bp, SA4B, 0);
287 macb_or_gem_writel(bp, SA4T, 0);
288}
289
290static void macb_get_hwaddr(struct macb *bp)
291{
292 u32 bottom;
293 u16 top;
294 u8 addr[6];
295 int i;
296
297
298 for (i = 0; i < 4; i++) {
299 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
300 top = macb_or_gem_readl(bp, SA1T + i * 8);
301
302 addr[0] = bottom & 0xff;
303 addr[1] = (bottom >> 8) & 0xff;
304 addr[2] = (bottom >> 16) & 0xff;
305 addr[3] = (bottom >> 24) & 0xff;
306 addr[4] = top & 0xff;
307 addr[5] = (top >> 8) & 0xff;
308
309 if (is_valid_ether_addr(addr)) {
310 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
311 return;
312 }
313 }
314
315 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
316 eth_hw_addr_random(bp->dev);
317}
318
319static int macb_mdio_wait_for_idle(struct macb *bp)
320{
321 u32 val;
322
323 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
324 1, MACB_MDIO_TIMEOUT);
325}
326
327static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
328{
329 struct macb *bp = bus->priv;
330 int status;
331
332 status = pm_runtime_get_sync(&bp->pdev->dev);
333 if (status < 0)
334 goto mdio_pm_exit;
335
336 status = macb_mdio_wait_for_idle(bp);
337 if (status < 0)
338 goto mdio_read_exit;
339
340 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
341 | MACB_BF(RW, MACB_MAN_READ)
342 | MACB_BF(PHYA, mii_id)
343 | MACB_BF(REGA, regnum)
344 | MACB_BF(CODE, MACB_MAN_CODE)));
345
346 status = macb_mdio_wait_for_idle(bp);
347 if (status < 0)
348 goto mdio_read_exit;
349
350 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
351
352mdio_read_exit:
353 pm_runtime_mark_last_busy(&bp->pdev->dev);
354 pm_runtime_put_autosuspend(&bp->pdev->dev);
355mdio_pm_exit:
356 return status;
357}
358
359static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
360 u16 value)
361{
362 struct macb *bp = bus->priv;
363 int status;
364
365 status = pm_runtime_get_sync(&bp->pdev->dev);
366 if (status < 0)
367 goto mdio_pm_exit;
368
369 status = macb_mdio_wait_for_idle(bp);
370 if (status < 0)
371 goto mdio_write_exit;
372
373 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
374 | MACB_BF(RW, MACB_MAN_WRITE)
375 | MACB_BF(PHYA, mii_id)
376 | MACB_BF(REGA, regnum)
377 | MACB_BF(CODE, MACB_MAN_CODE)
378 | MACB_BF(DATA, value)));
379
380 status = macb_mdio_wait_for_idle(bp);
381 if (status < 0)
382 goto mdio_write_exit;
383
384mdio_write_exit:
385 pm_runtime_mark_last_busy(&bp->pdev->dev);
386 pm_runtime_put_autosuspend(&bp->pdev->dev);
387mdio_pm_exit:
388 return status;
389}
390
391static void macb_init_buffers(struct macb *bp)
392{
393 struct macb_queue *queue;
394 unsigned int q;
395
396 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
397 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
398#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
399 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
400 queue_writel(queue, RBQPH,
401 upper_32_bits(queue->rx_ring_dma));
402#endif
403 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
404#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
405 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
406 queue_writel(queue, TBQPH,
407 upper_32_bits(queue->tx_ring_dma));
408#endif
409 }
410}
411
412
413
414
415
416
417
418static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
419{
420 long ferr, rate, rate_rounded;
421
422 if (!clk)
423 return;
424
425 switch (speed) {
426 case SPEED_10:
427 rate = 2500000;
428 break;
429 case SPEED_100:
430 rate = 25000000;
431 break;
432 case SPEED_1000:
433 rate = 125000000;
434 break;
435 default:
436 return;
437 }
438
439 rate_rounded = clk_round_rate(clk, rate);
440 if (rate_rounded < 0)
441 return;
442
443
444
445
446 ferr = abs(rate_rounded - rate);
447 ferr = DIV_ROUND_UP(ferr, rate / 100000);
448 if (ferr > 5)
449 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
450 rate);
451
452 if (clk_set_rate(clk, rate_rounded))
453 netdev_err(dev, "adjusting tx_clk failed.\n");
454}
455
456static void macb_validate(struct phylink_config *config,
457 unsigned long *supported,
458 struct phylink_link_state *state)
459{
460 struct net_device *ndev = to_net_dev(config->dev);
461 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
462 struct macb *bp = netdev_priv(ndev);
463
464
465 if (state->interface != PHY_INTERFACE_MODE_NA &&
466 state->interface != PHY_INTERFACE_MODE_MII &&
467 state->interface != PHY_INTERFACE_MODE_RMII &&
468 state->interface != PHY_INTERFACE_MODE_GMII &&
469 state->interface != PHY_INTERFACE_MODE_SGMII &&
470 !phy_interface_mode_is_rgmii(state->interface)) {
471 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
472 return;
473 }
474
475 if (!macb_is_gem(bp) &&
476 (state->interface == PHY_INTERFACE_MODE_GMII ||
477 phy_interface_mode_is_rgmii(state->interface))) {
478 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
479 return;
480 }
481
482 phylink_set_port_modes(mask);
483 phylink_set(mask, Autoneg);
484 phylink_set(mask, Asym_Pause);
485
486 phylink_set(mask, 10baseT_Half);
487 phylink_set(mask, 10baseT_Full);
488 phylink_set(mask, 100baseT_Half);
489 phylink_set(mask, 100baseT_Full);
490
491 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
492 (state->interface == PHY_INTERFACE_MODE_NA ||
493 state->interface == PHY_INTERFACE_MODE_GMII ||
494 state->interface == PHY_INTERFACE_MODE_SGMII ||
495 phy_interface_mode_is_rgmii(state->interface))) {
496 phylink_set(mask, 1000baseT_Full);
497 phylink_set(mask, 1000baseX_Full);
498
499 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
500 phylink_set(mask, 1000baseT_Half);
501 }
502
503 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
504 bitmap_and(state->advertising, state->advertising, mask,
505 __ETHTOOL_LINK_MODE_MASK_NBITS);
506}
507
508static void macb_mac_pcs_get_state(struct phylink_config *config,
509 struct phylink_link_state *state)
510{
511 state->link = 0;
512}
513
514static void macb_mac_an_restart(struct phylink_config *config)
515{
516
517}
518
519static void macb_mac_config(struct phylink_config *config, unsigned int mode,
520 const struct phylink_link_state *state)
521{
522 struct net_device *ndev = to_net_dev(config->dev);
523 struct macb *bp = netdev_priv(ndev);
524 unsigned long flags;
525 u32 old_ctrl, ctrl;
526
527 spin_lock_irqsave(&bp->lock, flags);
528
529 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
530
531
532 ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) |
533 GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
534
535 if (state->speed == SPEED_1000)
536 ctrl |= GEM_BIT(GBE);
537 else if (state->speed == SPEED_100)
538 ctrl |= MACB_BIT(SPD);
539
540 if (state->duplex)
541 ctrl |= MACB_BIT(FD);
542
543
544 if (state->pause & MLO_PAUSE_TX)
545 ctrl |= MACB_BIT(PAE);
546
547 if (state->interface == PHY_INTERFACE_MODE_SGMII)
548 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
549
550
551 if (old_ctrl ^ ctrl)
552 macb_or_gem_writel(bp, NCFGR, ctrl);
553
554 bp->speed = state->speed;
555
556 spin_unlock_irqrestore(&bp->lock, flags);
557}
558
559static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
560 phy_interface_t interface)
561{
562 struct net_device *ndev = to_net_dev(config->dev);
563 struct macb *bp = netdev_priv(ndev);
564 struct macb_queue *queue;
565 unsigned int q;
566 u32 ctrl;
567
568 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
569 queue_writel(queue, IDR,
570 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
571
572
573 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
574 macb_writel(bp, NCR, ctrl);
575
576 netif_tx_stop_all_queues(ndev);
577}
578
579static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
580 phy_interface_t interface, struct phy_device *phy)
581{
582 struct net_device *ndev = to_net_dev(config->dev);
583 struct macb *bp = netdev_priv(ndev);
584 struct macb_queue *queue;
585 unsigned int q;
586
587 macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
588
589
590
591
592 bp->macbgem_ops.mog_init_rings(bp);
593 macb_init_buffers(bp);
594
595 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
596 queue_writel(queue, IER,
597 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
598
599
600 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
601
602 netif_tx_wake_all_queues(ndev);
603}
604
605static const struct phylink_mac_ops macb_phylink_ops = {
606 .validate = macb_validate,
607 .mac_pcs_get_state = macb_mac_pcs_get_state,
608 .mac_an_restart = macb_mac_an_restart,
609 .mac_config = macb_mac_config,
610 .mac_link_down = macb_mac_link_down,
611 .mac_link_up = macb_mac_link_up,
612};
613
614static bool macb_phy_handle_exists(struct device_node *dn)
615{
616 dn = of_parse_phandle(dn, "phy-handle", 0);
617 of_node_put(dn);
618 return dn != NULL;
619}
620
621static int macb_phylink_connect(struct macb *bp)
622{
623 struct device_node *dn = bp->pdev->dev.of_node;
624 struct net_device *dev = bp->dev;
625 struct phy_device *phydev;
626 int ret;
627
628 if (dn)
629 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
630
631 if (!dn || (ret && !macb_phy_handle_exists(dn))) {
632 phydev = phy_find_first(bp->mii_bus);
633 if (!phydev) {
634 netdev_err(dev, "no PHY found\n");
635 return -ENXIO;
636 }
637
638
639 ret = phylink_connect_phy(bp->phylink, phydev);
640 }
641
642 if (ret) {
643 netdev_err(dev, "Could not attach PHY (%d)\n", ret);
644 return ret;
645 }
646
647 phylink_start(bp->phylink);
648
649 return 0;
650}
651
652
653static int macb_mii_probe(struct net_device *dev)
654{
655 struct macb *bp = netdev_priv(dev);
656
657 bp->phylink_config.dev = &dev->dev;
658 bp->phylink_config.type = PHYLINK_NETDEV;
659
660 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
661 bp->phy_interface, &macb_phylink_ops);
662 if (IS_ERR(bp->phylink)) {
663 netdev_err(dev, "Could not create a phylink instance (%ld)\n",
664 PTR_ERR(bp->phylink));
665 return PTR_ERR(bp->phylink);
666 }
667
668 return 0;
669}
670
671static int macb_mdiobus_register(struct macb *bp)
672{
673 struct device_node *child, *np = bp->pdev->dev.of_node;
674
675
676
677
678
679
680 for_each_available_child_of_node(np, child)
681 if (of_mdiobus_child_is_phy(child)) {
682
683
684
685 of_node_put(child);
686
687 return of_mdiobus_register(bp->mii_bus, np);
688 }
689
690 return mdiobus_register(bp->mii_bus);
691}
692
693static int macb_mii_init(struct macb *bp)
694{
695 int err = -ENXIO;
696
697
698 macb_writel(bp, NCR, MACB_BIT(MPE));
699
700 bp->mii_bus = mdiobus_alloc();
701 if (!bp->mii_bus) {
702 err = -ENOMEM;
703 goto err_out;
704 }
705
706 bp->mii_bus->name = "MACB_mii_bus";
707 bp->mii_bus->read = &macb_mdio_read;
708 bp->mii_bus->write = &macb_mdio_write;
709 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
710 bp->pdev->name, bp->pdev->id);
711 bp->mii_bus->priv = bp;
712 bp->mii_bus->parent = &bp->pdev->dev;
713
714 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
715
716 err = macb_mdiobus_register(bp);
717 if (err)
718 goto err_out_free_mdiobus;
719
720 err = macb_mii_probe(bp->dev);
721 if (err)
722 goto err_out_unregister_bus;
723
724 return 0;
725
726err_out_unregister_bus:
727 mdiobus_unregister(bp->mii_bus);
728err_out_free_mdiobus:
729 mdiobus_free(bp->mii_bus);
730err_out:
731 return err;
732}
733
734static void macb_update_stats(struct macb *bp)
735{
736 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
737 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
738 int offset = MACB_PFR;
739
740 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
741
742 for (; p < end; p++, offset += 4)
743 *p += bp->macb_reg_readl(bp, offset);
744}
745
746static int macb_halt_tx(struct macb *bp)
747{
748 unsigned long halt_time, timeout;
749 u32 status;
750
751 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
752
753 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
754 do {
755 halt_time = jiffies;
756 status = macb_readl(bp, TSR);
757 if (!(status & MACB_BIT(TGO)))
758 return 0;
759
760 udelay(250);
761 } while (time_before(halt_time, timeout));
762
763 return -ETIMEDOUT;
764}
765
766static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
767{
768 if (tx_skb->mapping) {
769 if (tx_skb->mapped_as_page)
770 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
771 tx_skb->size, DMA_TO_DEVICE);
772 else
773 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
774 tx_skb->size, DMA_TO_DEVICE);
775 tx_skb->mapping = 0;
776 }
777
778 if (tx_skb->skb) {
779 dev_kfree_skb_any(tx_skb->skb);
780 tx_skb->skb = NULL;
781 }
782}
783
784static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
785{
786#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
787 struct macb_dma_desc_64 *desc_64;
788
789 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
790 desc_64 = macb_64b_desc(bp, desc);
791 desc_64->addrh = upper_32_bits(addr);
792
793
794
795
796 dma_wmb();
797 }
798#endif
799 desc->addr = lower_32_bits(addr);
800}
801
802static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
803{
804 dma_addr_t addr = 0;
805#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
806 struct macb_dma_desc_64 *desc_64;
807
808 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
809 desc_64 = macb_64b_desc(bp, desc);
810 addr = ((u64)(desc_64->addrh) << 32);
811 }
812#endif
813 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
814 return addr;
815}
816
817static void macb_tx_error_task(struct work_struct *work)
818{
819 struct macb_queue *queue = container_of(work, struct macb_queue,
820 tx_error_task);
821 struct macb *bp = queue->bp;
822 struct macb_tx_skb *tx_skb;
823 struct macb_dma_desc *desc;
824 struct sk_buff *skb;
825 unsigned int tail;
826 unsigned long flags;
827
828 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
829 (unsigned int)(queue - bp->queues),
830 queue->tx_tail, queue->tx_head);
831
832
833
834
835
836
837
838 spin_lock_irqsave(&bp->lock, flags);
839
840
841 netif_tx_stop_all_queues(bp->dev);
842
843
844
845
846
847 if (macb_halt_tx(bp))
848
849 netdev_err(bp->dev, "BUG: halt tx timed out\n");
850
851
852
853
854 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
855 u32 ctrl;
856
857 desc = macb_tx_desc(queue, tail);
858 ctrl = desc->ctrl;
859 tx_skb = macb_tx_skb(queue, tail);
860 skb = tx_skb->skb;
861
862 if (ctrl & MACB_BIT(TX_USED)) {
863
864 while (!skb) {
865 macb_tx_unmap(bp, tx_skb);
866 tail++;
867 tx_skb = macb_tx_skb(queue, tail);
868 skb = tx_skb->skb;
869 }
870
871
872
873
874 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
875 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
876 macb_tx_ring_wrap(bp, tail),
877 skb->data);
878 bp->dev->stats.tx_packets++;
879 queue->stats.tx_packets++;
880 bp->dev->stats.tx_bytes += skb->len;
881 queue->stats.tx_bytes += skb->len;
882 }
883 } else {
884
885
886
887
888 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
889 netdev_err(bp->dev,
890 "BUG: TX buffers exhausted mid-frame\n");
891
892 desc->ctrl = ctrl | MACB_BIT(TX_USED);
893 }
894
895 macb_tx_unmap(bp, tx_skb);
896 }
897
898
899 desc = macb_tx_desc(queue, 0);
900 macb_set_addr(bp, desc, 0);
901 desc->ctrl = MACB_BIT(TX_USED);
902
903
904 wmb();
905
906
907 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
908#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
909 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
910 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
911#endif
912
913 queue->tx_head = 0;
914 queue->tx_tail = 0;
915
916
917 macb_writel(bp, TSR, macb_readl(bp, TSR));
918 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
919
920
921 netif_tx_start_all_queues(bp->dev);
922 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
923
924 spin_unlock_irqrestore(&bp->lock, flags);
925}
926
927static void macb_tx_interrupt(struct macb_queue *queue)
928{
929 unsigned int tail;
930 unsigned int head;
931 u32 status;
932 struct macb *bp = queue->bp;
933 u16 queue_index = queue - bp->queues;
934
935 status = macb_readl(bp, TSR);
936 macb_writel(bp, TSR, status);
937
938 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
939 queue_writel(queue, ISR, MACB_BIT(TCOMP));
940
941 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
942 (unsigned long)status);
943
944 head = queue->tx_head;
945 for (tail = queue->tx_tail; tail != head; tail++) {
946 struct macb_tx_skb *tx_skb;
947 struct sk_buff *skb;
948 struct macb_dma_desc *desc;
949 u32 ctrl;
950
951 desc = macb_tx_desc(queue, tail);
952
953
954 rmb();
955
956 ctrl = desc->ctrl;
957
958
959
960
961 if (!(ctrl & MACB_BIT(TX_USED)))
962 break;
963
964
965 for (;; tail++) {
966 tx_skb = macb_tx_skb(queue, tail);
967 skb = tx_skb->skb;
968
969
970 if (skb) {
971 if (unlikely(skb_shinfo(skb)->tx_flags &
972 SKBTX_HW_TSTAMP) &&
973 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
974
975
976
977 tx_skb->skb = NULL;
978 }
979 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
980 macb_tx_ring_wrap(bp, tail),
981 skb->data);
982 bp->dev->stats.tx_packets++;
983 queue->stats.tx_packets++;
984 bp->dev->stats.tx_bytes += skb->len;
985 queue->stats.tx_bytes += skb->len;
986 }
987
988
989 macb_tx_unmap(bp, tx_skb);
990
991
992
993
994
995 if (skb)
996 break;
997 }
998 }
999
1000 queue->tx_tail = tail;
1001 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1002 CIRC_CNT(queue->tx_head, queue->tx_tail,
1003 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1004 netif_wake_subqueue(bp->dev, queue_index);
1005}
1006
1007static void gem_rx_refill(struct macb_queue *queue)
1008{
1009 unsigned int entry;
1010 struct sk_buff *skb;
1011 dma_addr_t paddr;
1012 struct macb *bp = queue->bp;
1013 struct macb_dma_desc *desc;
1014
1015 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1016 bp->rx_ring_size) > 0) {
1017 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1018
1019
1020 rmb();
1021
1022 queue->rx_prepared_head++;
1023 desc = macb_rx_desc(queue, entry);
1024
1025 if (!queue->rx_skbuff[entry]) {
1026
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1028 if (unlikely(!skb)) {
1029 netdev_err(bp->dev,
1030 "Unable to allocate sk_buff\n");
1031 break;
1032 }
1033
1034
1035 paddr = dma_map_single(&bp->pdev->dev, skb->data,
1036 bp->rx_buffer_size,
1037 DMA_FROM_DEVICE);
1038 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1039 dev_kfree_skb(skb);
1040 break;
1041 }
1042
1043 queue->rx_skbuff[entry] = skb;
1044
1045 if (entry == bp->rx_ring_size - 1)
1046 paddr |= MACB_BIT(RX_WRAP);
1047 desc->ctrl = 0;
1048
1049
1050
1051 dma_wmb();
1052 macb_set_addr(bp, desc, paddr);
1053
1054
1055 skb_reserve(skb, NET_IP_ALIGN);
1056 } else {
1057 desc->ctrl = 0;
1058 dma_wmb();
1059 desc->addr &= ~MACB_BIT(RX_USED);
1060 }
1061 }
1062
1063
1064 wmb();
1065
1066 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1067 queue, queue->rx_prepared_head, queue->rx_tail);
1068}
1069
1070
1071static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1072 unsigned int end)
1073{
1074 unsigned int frag;
1075
1076 for (frag = begin; frag != end; frag++) {
1077 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1078
1079 desc->addr &= ~MACB_BIT(RX_USED);
1080 }
1081
1082
1083 wmb();
1084
1085
1086
1087
1088
1089}
1090
1091static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1092 int budget)
1093{
1094 struct macb *bp = queue->bp;
1095 unsigned int len;
1096 unsigned int entry;
1097 struct sk_buff *skb;
1098 struct macb_dma_desc *desc;
1099 int count = 0;
1100
1101 while (count < budget) {
1102 u32 ctrl;
1103 dma_addr_t addr;
1104 bool rxused;
1105
1106 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1107 desc = macb_rx_desc(queue, entry);
1108
1109
1110 rmb();
1111
1112 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1113 addr = macb_get_addr(bp, desc);
1114
1115 if (!rxused)
1116 break;
1117
1118
1119 dma_rmb();
1120
1121 ctrl = desc->ctrl;
1122
1123 queue->rx_tail++;
1124 count++;
1125
1126 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1127 netdev_err(bp->dev,
1128 "not whole frame pointed by descriptor\n");
1129 bp->dev->stats.rx_dropped++;
1130 queue->stats.rx_dropped++;
1131 break;
1132 }
1133 skb = queue->rx_skbuff[entry];
1134 if (unlikely(!skb)) {
1135 netdev_err(bp->dev,
1136 "inconsistent Rx descriptor chain\n");
1137 bp->dev->stats.rx_dropped++;
1138 queue->stats.rx_dropped++;
1139 break;
1140 }
1141
1142 queue->rx_skbuff[entry] = NULL;
1143 len = ctrl & bp->rx_frm_len_mask;
1144
1145 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1146
1147 skb_put(skb, len);
1148 dma_unmap_single(&bp->pdev->dev, addr,
1149 bp->rx_buffer_size, DMA_FROM_DEVICE);
1150
1151 skb->protocol = eth_type_trans(skb, bp->dev);
1152 skb_checksum_none_assert(skb);
1153 if (bp->dev->features & NETIF_F_RXCSUM &&
1154 !(bp->dev->flags & IFF_PROMISC) &&
1155 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1156 skb->ip_summed = CHECKSUM_UNNECESSARY;
1157
1158 bp->dev->stats.rx_packets++;
1159 queue->stats.rx_packets++;
1160 bp->dev->stats.rx_bytes += skb->len;
1161 queue->stats.rx_bytes += skb->len;
1162
1163 gem_ptp_do_rxstamp(bp, skb, desc);
1164
1165#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1166 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1167 skb->len, skb->csum);
1168 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1169 skb_mac_header(skb), 16, true);
1170 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1171 skb->data, 32, true);
1172#endif
1173
1174 napi_gro_receive(napi, skb);
1175 }
1176
1177 gem_rx_refill(queue);
1178
1179 return count;
1180}
1181
1182static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1183 unsigned int first_frag, unsigned int last_frag)
1184{
1185 unsigned int len;
1186 unsigned int frag;
1187 unsigned int offset;
1188 struct sk_buff *skb;
1189 struct macb_dma_desc *desc;
1190 struct macb *bp = queue->bp;
1191
1192 desc = macb_rx_desc(queue, last_frag);
1193 len = desc->ctrl & bp->rx_frm_len_mask;
1194
1195 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1196 macb_rx_ring_wrap(bp, first_frag),
1197 macb_rx_ring_wrap(bp, last_frag), len);
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1208 if (!skb) {
1209 bp->dev->stats.rx_dropped++;
1210 for (frag = first_frag; ; frag++) {
1211 desc = macb_rx_desc(queue, frag);
1212 desc->addr &= ~MACB_BIT(RX_USED);
1213 if (frag == last_frag)
1214 break;
1215 }
1216
1217
1218 wmb();
1219
1220 return 1;
1221 }
1222
1223 offset = 0;
1224 len += NET_IP_ALIGN;
1225 skb_checksum_none_assert(skb);
1226 skb_put(skb, len);
1227
1228 for (frag = first_frag; ; frag++) {
1229 unsigned int frag_len = bp->rx_buffer_size;
1230
1231 if (offset + frag_len > len) {
1232 if (unlikely(frag != last_frag)) {
1233 dev_kfree_skb_any(skb);
1234 return -1;
1235 }
1236 frag_len = len - offset;
1237 }
1238 skb_copy_to_linear_data_offset(skb, offset,
1239 macb_rx_buffer(queue, frag),
1240 frag_len);
1241 offset += bp->rx_buffer_size;
1242 desc = macb_rx_desc(queue, frag);
1243 desc->addr &= ~MACB_BIT(RX_USED);
1244
1245 if (frag == last_frag)
1246 break;
1247 }
1248
1249
1250 wmb();
1251
1252 __skb_pull(skb, NET_IP_ALIGN);
1253 skb->protocol = eth_type_trans(skb, bp->dev);
1254
1255 bp->dev->stats.rx_packets++;
1256 bp->dev->stats.rx_bytes += skb->len;
1257 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1258 skb->len, skb->csum);
1259 napi_gro_receive(napi, skb);
1260
1261 return 0;
1262}
1263
1264static inline void macb_init_rx_ring(struct macb_queue *queue)
1265{
1266 struct macb *bp = queue->bp;
1267 dma_addr_t addr;
1268 struct macb_dma_desc *desc = NULL;
1269 int i;
1270
1271 addr = queue->rx_buffers_dma;
1272 for (i = 0; i < bp->rx_ring_size; i++) {
1273 desc = macb_rx_desc(queue, i);
1274 macb_set_addr(bp, desc, addr);
1275 desc->ctrl = 0;
1276 addr += bp->rx_buffer_size;
1277 }
1278 desc->addr |= MACB_BIT(RX_WRAP);
1279 queue->rx_tail = 0;
1280}
1281
1282static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1283 int budget)
1284{
1285 struct macb *bp = queue->bp;
1286 bool reset_rx_queue = false;
1287 int received = 0;
1288 unsigned int tail;
1289 int first_frag = -1;
1290
1291 for (tail = queue->rx_tail; budget > 0; tail++) {
1292 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1293 u32 ctrl;
1294
1295
1296 rmb();
1297
1298 if (!(desc->addr & MACB_BIT(RX_USED)))
1299 break;
1300
1301
1302 dma_rmb();
1303
1304 ctrl = desc->ctrl;
1305
1306 if (ctrl & MACB_BIT(RX_SOF)) {
1307 if (first_frag != -1)
1308 discard_partial_frame(queue, first_frag, tail);
1309 first_frag = tail;
1310 }
1311
1312 if (ctrl & MACB_BIT(RX_EOF)) {
1313 int dropped;
1314
1315 if (unlikely(first_frag == -1)) {
1316 reset_rx_queue = true;
1317 continue;
1318 }
1319
1320 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1321 first_frag = -1;
1322 if (unlikely(dropped < 0)) {
1323 reset_rx_queue = true;
1324 continue;
1325 }
1326 if (!dropped) {
1327 received++;
1328 budget--;
1329 }
1330 }
1331 }
1332
1333 if (unlikely(reset_rx_queue)) {
1334 unsigned long flags;
1335 u32 ctrl;
1336
1337 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1338
1339 spin_lock_irqsave(&bp->lock, flags);
1340
1341 ctrl = macb_readl(bp, NCR);
1342 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1343
1344 macb_init_rx_ring(queue);
1345 queue_writel(queue, RBQP, queue->rx_ring_dma);
1346
1347 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1348
1349 spin_unlock_irqrestore(&bp->lock, flags);
1350 return received;
1351 }
1352
1353 if (first_frag != -1)
1354 queue->rx_tail = first_frag;
1355 else
1356 queue->rx_tail = tail;
1357
1358 return received;
1359}
1360
1361static int macb_poll(struct napi_struct *napi, int budget)
1362{
1363 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1364 struct macb *bp = queue->bp;
1365 int work_done;
1366 u32 status;
1367
1368 status = macb_readl(bp, RSR);
1369 macb_writel(bp, RSR, status);
1370
1371 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1372 (unsigned long)status, budget);
1373
1374 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1375 if (work_done < budget) {
1376 napi_complete_done(napi, work_done);
1377
1378
1379 status = macb_readl(bp, RSR);
1380 if (status) {
1381 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1382 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1383 napi_reschedule(napi);
1384 } else {
1385 queue_writel(queue, IER, bp->rx_intr_mask);
1386 }
1387 }
1388
1389
1390
1391 return work_done;
1392}
1393
1394static void macb_hresp_error_task(unsigned long data)
1395{
1396 struct macb *bp = (struct macb *)data;
1397 struct net_device *dev = bp->dev;
1398 struct macb_queue *queue = bp->queues;
1399 unsigned int q;
1400 u32 ctrl;
1401
1402 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1403 queue_writel(queue, IDR, bp->rx_intr_mask |
1404 MACB_TX_INT_FLAGS |
1405 MACB_BIT(HRESP));
1406 }
1407 ctrl = macb_readl(bp, NCR);
1408 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1409 macb_writel(bp, NCR, ctrl);
1410
1411 netif_tx_stop_all_queues(dev);
1412 netif_carrier_off(dev);
1413
1414 bp->macbgem_ops.mog_init_rings(bp);
1415
1416
1417 macb_init_buffers(bp);
1418
1419
1420 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1421 queue_writel(queue, IER,
1422 bp->rx_intr_mask |
1423 MACB_TX_INT_FLAGS |
1424 MACB_BIT(HRESP));
1425
1426 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1427 macb_writel(bp, NCR, ctrl);
1428
1429 netif_carrier_on(dev);
1430 netif_tx_start_all_queues(dev);
1431}
1432
1433static void macb_tx_restart(struct macb_queue *queue)
1434{
1435 unsigned int head = queue->tx_head;
1436 unsigned int tail = queue->tx_tail;
1437 struct macb *bp = queue->bp;
1438
1439 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1440 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1441
1442 if (head == tail)
1443 return;
1444
1445 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1446}
1447
1448static irqreturn_t macb_interrupt(int irq, void *dev_id)
1449{
1450 struct macb_queue *queue = dev_id;
1451 struct macb *bp = queue->bp;
1452 struct net_device *dev = bp->dev;
1453 u32 status, ctrl;
1454
1455 status = queue_readl(queue, ISR);
1456
1457 if (unlikely(!status))
1458 return IRQ_NONE;
1459
1460 spin_lock(&bp->lock);
1461
1462 while (status) {
1463
1464 if (unlikely(!netif_running(dev))) {
1465 queue_writel(queue, IDR, -1);
1466 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1467 queue_writel(queue, ISR, -1);
1468 break;
1469 }
1470
1471 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1472 (unsigned int)(queue - bp->queues),
1473 (unsigned long)status);
1474
1475 if (status & bp->rx_intr_mask) {
1476
1477
1478
1479
1480
1481
1482 queue_writel(queue, IDR, bp->rx_intr_mask);
1483 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1484 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1485
1486 if (napi_schedule_prep(&queue->napi)) {
1487 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1488 __napi_schedule(&queue->napi);
1489 }
1490 }
1491
1492 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1493 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1494 schedule_work(&queue->tx_error_task);
1495
1496 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1497 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1498
1499 break;
1500 }
1501
1502 if (status & MACB_BIT(TCOMP))
1503 macb_tx_interrupt(queue);
1504
1505 if (status & MACB_BIT(TXUBR))
1506 macb_tx_restart(queue);
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519 if (status & MACB_BIT(RXUBR)) {
1520 ctrl = macb_readl(bp, NCR);
1521 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1522 wmb();
1523 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1524
1525 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1526 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1527 }
1528
1529 if (status & MACB_BIT(ISR_ROVR)) {
1530
1531 if (macb_is_gem(bp))
1532 bp->hw_stats.gem.rx_overruns++;
1533 else
1534 bp->hw_stats.macb.rx_overruns++;
1535
1536 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1537 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1538 }
1539
1540 if (status & MACB_BIT(HRESP)) {
1541 tasklet_schedule(&bp->hresp_err_tasklet);
1542 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1543
1544 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1545 queue_writel(queue, ISR, MACB_BIT(HRESP));
1546 }
1547 status = queue_readl(queue, ISR);
1548 }
1549
1550 spin_unlock(&bp->lock);
1551
1552 return IRQ_HANDLED;
1553}
1554
1555#ifdef CONFIG_NET_POLL_CONTROLLER
1556
1557
1558
1559static void macb_poll_controller(struct net_device *dev)
1560{
1561 struct macb *bp = netdev_priv(dev);
1562 struct macb_queue *queue;
1563 unsigned long flags;
1564 unsigned int q;
1565
1566 local_irq_save(flags);
1567 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1568 macb_interrupt(dev->irq, queue);
1569 local_irq_restore(flags);
1570}
1571#endif
1572
1573static unsigned int macb_tx_map(struct macb *bp,
1574 struct macb_queue *queue,
1575 struct sk_buff *skb,
1576 unsigned int hdrlen)
1577{
1578 dma_addr_t mapping;
1579 unsigned int len, entry, i, tx_head = queue->tx_head;
1580 struct macb_tx_skb *tx_skb = NULL;
1581 struct macb_dma_desc *desc;
1582 unsigned int offset, size, count = 0;
1583 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1584 unsigned int eof = 1, mss_mfs = 0;
1585 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1586
1587
1588 if (skb_shinfo(skb)->gso_size != 0) {
1589 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1590
1591 lso_ctrl = MACB_LSO_UFO_ENABLE;
1592 else
1593
1594 lso_ctrl = MACB_LSO_TSO_ENABLE;
1595 }
1596
1597
1598 len = skb_headlen(skb);
1599
1600
1601 size = hdrlen;
1602
1603 offset = 0;
1604 while (len) {
1605 entry = macb_tx_ring_wrap(bp, tx_head);
1606 tx_skb = &queue->tx_skb[entry];
1607
1608 mapping = dma_map_single(&bp->pdev->dev,
1609 skb->data + offset,
1610 size, DMA_TO_DEVICE);
1611 if (dma_mapping_error(&bp->pdev->dev, mapping))
1612 goto dma_error;
1613
1614
1615 tx_skb->skb = NULL;
1616 tx_skb->mapping = mapping;
1617 tx_skb->size = size;
1618 tx_skb->mapped_as_page = false;
1619
1620 len -= size;
1621 offset += size;
1622 count++;
1623 tx_head++;
1624
1625 size = min(len, bp->max_tx_length);
1626 }
1627
1628
1629 for (f = 0; f < nr_frags; f++) {
1630 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1631
1632 len = skb_frag_size(frag);
1633 offset = 0;
1634 while (len) {
1635 size = min(len, bp->max_tx_length);
1636 entry = macb_tx_ring_wrap(bp, tx_head);
1637 tx_skb = &queue->tx_skb[entry];
1638
1639 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1640 offset, size, DMA_TO_DEVICE);
1641 if (dma_mapping_error(&bp->pdev->dev, mapping))
1642 goto dma_error;
1643
1644
1645 tx_skb->skb = NULL;
1646 tx_skb->mapping = mapping;
1647 tx_skb->size = size;
1648 tx_skb->mapped_as_page = true;
1649
1650 len -= size;
1651 offset += size;
1652 count++;
1653 tx_head++;
1654 }
1655 }
1656
1657
1658 if (unlikely(!tx_skb)) {
1659 netdev_err(bp->dev, "BUG! empty skb!\n");
1660 return 0;
1661 }
1662
1663
1664 tx_skb->skb = skb;
1665
1666
1667
1668
1669
1670
1671
1672
1673 i = tx_head;
1674 entry = macb_tx_ring_wrap(bp, i);
1675 ctrl = MACB_BIT(TX_USED);
1676 desc = macb_tx_desc(queue, entry);
1677 desc->ctrl = ctrl;
1678
1679 if (lso_ctrl) {
1680 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1681
1682 mss_mfs = skb_shinfo(skb)->gso_size +
1683 skb_transport_offset(skb) +
1684 ETH_FCS_LEN;
1685 else {
1686 mss_mfs = skb_shinfo(skb)->gso_size;
1687
1688
1689
1690 seq_ctrl = 0;
1691 }
1692 }
1693
1694 do {
1695 i--;
1696 entry = macb_tx_ring_wrap(bp, i);
1697 tx_skb = &queue->tx_skb[entry];
1698 desc = macb_tx_desc(queue, entry);
1699
1700 ctrl = (u32)tx_skb->size;
1701 if (eof) {
1702 ctrl |= MACB_BIT(TX_LAST);
1703 eof = 0;
1704 }
1705 if (unlikely(entry == (bp->tx_ring_size - 1)))
1706 ctrl |= MACB_BIT(TX_WRAP);
1707
1708
1709 if (i == queue->tx_head) {
1710 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1711 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1712 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
1713 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
1714 ctrl |= MACB_BIT(TX_NOCRC);
1715 } else
1716
1717
1718
1719 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1720
1721
1722 macb_set_addr(bp, desc, tx_skb->mapping);
1723
1724
1725
1726 wmb();
1727 desc->ctrl = ctrl;
1728 } while (i != queue->tx_head);
1729
1730 queue->tx_head = tx_head;
1731
1732 return count;
1733
1734dma_error:
1735 netdev_err(bp->dev, "TX DMA map failed\n");
1736
1737 for (i = queue->tx_head; i != tx_head; i++) {
1738 tx_skb = macb_tx_skb(queue, i);
1739
1740 macb_tx_unmap(bp, tx_skb);
1741 }
1742
1743 return 0;
1744}
1745
1746static netdev_features_t macb_features_check(struct sk_buff *skb,
1747 struct net_device *dev,
1748 netdev_features_t features)
1749{
1750 unsigned int nr_frags, f;
1751 unsigned int hdrlen;
1752
1753
1754
1755
1756 if (!skb_is_nonlinear(skb))
1757 return features;
1758
1759
1760 hdrlen = skb_transport_offset(skb);
1761 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1762 hdrlen += tcp_hdrlen(skb);
1763
1764
1765
1766
1767
1768 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1769 return features & ~MACB_NETIF_LSO;
1770
1771 nr_frags = skb_shinfo(skb)->nr_frags;
1772
1773 nr_frags--;
1774 for (f = 0; f < nr_frags; f++) {
1775 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1776
1777 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1778 return features & ~MACB_NETIF_LSO;
1779 }
1780 return features;
1781}
1782
1783static inline int macb_clear_csum(struct sk_buff *skb)
1784{
1785
1786 if (skb->ip_summed != CHECKSUM_PARTIAL)
1787 return 0;
1788
1789
1790 if (unlikely(skb_cow_head(skb, 0)))
1791 return -1;
1792
1793
1794
1795
1796
1797 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1798 return 0;
1799}
1800
1801static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1802{
1803 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
1804 int padlen = ETH_ZLEN - (*skb)->len;
1805 int headroom = skb_headroom(*skb);
1806 int tailroom = skb_tailroom(*skb);
1807 struct sk_buff *nskb;
1808 u32 fcs;
1809
1810 if (!(ndev->features & NETIF_F_HW_CSUM) ||
1811 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
1812 skb_shinfo(*skb)->gso_size)
1813 return 0;
1814
1815 if (padlen <= 0) {
1816
1817 if (tailroom >= ETH_FCS_LEN)
1818 goto add_fcs;
1819
1820 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
1821 padlen = 0;
1822
1823 else
1824 padlen = ETH_FCS_LEN;
1825 } else {
1826
1827 padlen += ETH_FCS_LEN;
1828 }
1829
1830 if (!cloned && headroom + tailroom >= padlen) {
1831 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
1832 skb_set_tail_pointer(*skb, (*skb)->len);
1833 } else {
1834 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
1835 if (!nskb)
1836 return -ENOMEM;
1837
1838 dev_consume_skb_any(*skb);
1839 *skb = nskb;
1840 }
1841
1842 if (padlen > ETH_FCS_LEN)
1843 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1844
1845add_fcs:
1846
1847 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
1848 fcs = ~fcs;
1849
1850 skb_put_u8(*skb, fcs & 0xff);
1851 skb_put_u8(*skb, (fcs >> 8) & 0xff);
1852 skb_put_u8(*skb, (fcs >> 16) & 0xff);
1853 skb_put_u8(*skb, (fcs >> 24) & 0xff);
1854
1855 return 0;
1856}
1857
1858static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1859{
1860 u16 queue_index = skb_get_queue_mapping(skb);
1861 struct macb *bp = netdev_priv(dev);
1862 struct macb_queue *queue = &bp->queues[queue_index];
1863 unsigned long flags;
1864 unsigned int desc_cnt, nr_frags, frag_size, f;
1865 unsigned int hdrlen;
1866 bool is_lso, is_udp = 0;
1867 netdev_tx_t ret = NETDEV_TX_OK;
1868
1869 if (macb_clear_csum(skb)) {
1870 dev_kfree_skb_any(skb);
1871 return ret;
1872 }
1873
1874 if (macb_pad_and_fcs(&skb, dev)) {
1875 dev_kfree_skb_any(skb);
1876 return ret;
1877 }
1878
1879 is_lso = (skb_shinfo(skb)->gso_size != 0);
1880
1881 if (is_lso) {
1882 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1883
1884
1885 if (is_udp)
1886
1887 hdrlen = skb_transport_offset(skb);
1888 else
1889 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1890 if (skb_headlen(skb) < hdrlen) {
1891 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1892
1893 return NETDEV_TX_BUSY;
1894 }
1895 } else
1896 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1897
1898#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1899 netdev_vdbg(bp->dev,
1900 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1901 queue_index, skb->len, skb->head, skb->data,
1902 skb_tail_pointer(skb), skb_end_pointer(skb));
1903 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1904 skb->data, 16, true);
1905#endif
1906
1907
1908
1909
1910
1911 if (is_lso && (skb_headlen(skb) > hdrlen))
1912
1913 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1914 else
1915 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1916 nr_frags = skb_shinfo(skb)->nr_frags;
1917 for (f = 0; f < nr_frags; f++) {
1918 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1919 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1920 }
1921
1922 spin_lock_irqsave(&bp->lock, flags);
1923
1924
1925 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1926 bp->tx_ring_size) < desc_cnt) {
1927 netif_stop_subqueue(dev, queue_index);
1928 spin_unlock_irqrestore(&bp->lock, flags);
1929 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1930 queue->tx_head, queue->tx_tail);
1931 return NETDEV_TX_BUSY;
1932 }
1933
1934
1935 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1936 dev_kfree_skb_any(skb);
1937 goto unlock;
1938 }
1939
1940
1941 wmb();
1942 skb_tx_timestamp(skb);
1943
1944 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1945
1946 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1947 netif_stop_subqueue(dev, queue_index);
1948
1949unlock:
1950 spin_unlock_irqrestore(&bp->lock, flags);
1951
1952 return ret;
1953}
1954
1955static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1956{
1957 if (!macb_is_gem(bp)) {
1958 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1959 } else {
1960 bp->rx_buffer_size = size;
1961
1962 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1963 netdev_dbg(bp->dev,
1964 "RX buffer must be multiple of %d bytes, expanding\n",
1965 RX_BUFFER_MULTIPLE);
1966 bp->rx_buffer_size =
1967 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1968 }
1969 }
1970
1971 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
1972 bp->dev->mtu, bp->rx_buffer_size);
1973}
1974
1975static void gem_free_rx_buffers(struct macb *bp)
1976{
1977 struct sk_buff *skb;
1978 struct macb_dma_desc *desc;
1979 struct macb_queue *queue;
1980 dma_addr_t addr;
1981 unsigned int q;
1982 int i;
1983
1984 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1985 if (!queue->rx_skbuff)
1986 continue;
1987
1988 for (i = 0; i < bp->rx_ring_size; i++) {
1989 skb = queue->rx_skbuff[i];
1990
1991 if (!skb)
1992 continue;
1993
1994 desc = macb_rx_desc(queue, i);
1995 addr = macb_get_addr(bp, desc);
1996
1997 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1998 DMA_FROM_DEVICE);
1999 dev_kfree_skb_any(skb);
2000 skb = NULL;
2001 }
2002
2003 kfree(queue->rx_skbuff);
2004 queue->rx_skbuff = NULL;
2005 }
2006}
2007
2008static void macb_free_rx_buffers(struct macb *bp)
2009{
2010 struct macb_queue *queue = &bp->queues[0];
2011
2012 if (queue->rx_buffers) {
2013 dma_free_coherent(&bp->pdev->dev,
2014 bp->rx_ring_size * bp->rx_buffer_size,
2015 queue->rx_buffers, queue->rx_buffers_dma);
2016 queue->rx_buffers = NULL;
2017 }
2018}
2019
2020static void macb_free_consistent(struct macb *bp)
2021{
2022 struct macb_queue *queue;
2023 unsigned int q;
2024 int size;
2025
2026 bp->macbgem_ops.mog_free_rx_buffers(bp);
2027
2028 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2029 kfree(queue->tx_skb);
2030 queue->tx_skb = NULL;
2031 if (queue->tx_ring) {
2032 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2033 dma_free_coherent(&bp->pdev->dev, size,
2034 queue->tx_ring, queue->tx_ring_dma);
2035 queue->tx_ring = NULL;
2036 }
2037 if (queue->rx_ring) {
2038 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2039 dma_free_coherent(&bp->pdev->dev, size,
2040 queue->rx_ring, queue->rx_ring_dma);
2041 queue->rx_ring = NULL;
2042 }
2043 }
2044}
2045
2046static int gem_alloc_rx_buffers(struct macb *bp)
2047{
2048 struct macb_queue *queue;
2049 unsigned int q;
2050 int size;
2051
2052 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2053 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2054 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2055 if (!queue->rx_skbuff)
2056 return -ENOMEM;
2057 else
2058 netdev_dbg(bp->dev,
2059 "Allocated %d RX struct sk_buff entries at %p\n",
2060 bp->rx_ring_size, queue->rx_skbuff);
2061 }
2062 return 0;
2063}
2064
2065static int macb_alloc_rx_buffers(struct macb *bp)
2066{
2067 struct macb_queue *queue = &bp->queues[0];
2068 int size;
2069
2070 size = bp->rx_ring_size * bp->rx_buffer_size;
2071 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2072 &queue->rx_buffers_dma, GFP_KERNEL);
2073 if (!queue->rx_buffers)
2074 return -ENOMEM;
2075
2076 netdev_dbg(bp->dev,
2077 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2078 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2079 return 0;
2080}
2081
2082static int macb_alloc_consistent(struct macb *bp)
2083{
2084 struct macb_queue *queue;
2085 unsigned int q;
2086 int size;
2087
2088 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2089 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2090 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2091 &queue->tx_ring_dma,
2092 GFP_KERNEL);
2093 if (!queue->tx_ring)
2094 goto out_err;
2095 netdev_dbg(bp->dev,
2096 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2097 q, size, (unsigned long)queue->tx_ring_dma,
2098 queue->tx_ring);
2099
2100 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2101 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2102 if (!queue->tx_skb)
2103 goto out_err;
2104
2105 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2106 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2107 &queue->rx_ring_dma, GFP_KERNEL);
2108 if (!queue->rx_ring)
2109 goto out_err;
2110 netdev_dbg(bp->dev,
2111 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2112 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2113 }
2114 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2115 goto out_err;
2116
2117 return 0;
2118
2119out_err:
2120 macb_free_consistent(bp);
2121 return -ENOMEM;
2122}
2123
2124static void gem_init_rings(struct macb *bp)
2125{
2126 struct macb_queue *queue;
2127 struct macb_dma_desc *desc = NULL;
2128 unsigned int q;
2129 int i;
2130
2131 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2132 for (i = 0; i < bp->tx_ring_size; i++) {
2133 desc = macb_tx_desc(queue, i);
2134 macb_set_addr(bp, desc, 0);
2135 desc->ctrl = MACB_BIT(TX_USED);
2136 }
2137 desc->ctrl |= MACB_BIT(TX_WRAP);
2138 queue->tx_head = 0;
2139 queue->tx_tail = 0;
2140
2141 queue->rx_tail = 0;
2142 queue->rx_prepared_head = 0;
2143
2144 gem_rx_refill(queue);
2145 }
2146
2147}
2148
2149static void macb_init_rings(struct macb *bp)
2150{
2151 int i;
2152 struct macb_dma_desc *desc = NULL;
2153
2154 macb_init_rx_ring(&bp->queues[0]);
2155
2156 for (i = 0; i < bp->tx_ring_size; i++) {
2157 desc = macb_tx_desc(&bp->queues[0], i);
2158 macb_set_addr(bp, desc, 0);
2159 desc->ctrl = MACB_BIT(TX_USED);
2160 }
2161 bp->queues[0].tx_head = 0;
2162 bp->queues[0].tx_tail = 0;
2163 desc->ctrl |= MACB_BIT(TX_WRAP);
2164}
2165
2166static void macb_reset_hw(struct macb *bp)
2167{
2168 struct macb_queue *queue;
2169 unsigned int q;
2170 u32 ctrl = macb_readl(bp, NCR);
2171
2172
2173
2174
2175 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2176
2177
2178 ctrl |= MACB_BIT(CLRSTAT);
2179
2180 macb_writel(bp, NCR, ctrl);
2181
2182
2183 macb_writel(bp, TSR, -1);
2184 macb_writel(bp, RSR, -1);
2185
2186
2187 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2188 queue_writel(queue, IDR, -1);
2189 queue_readl(queue, ISR);
2190 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2191 queue_writel(queue, ISR, -1);
2192 }
2193}
2194
2195static u32 gem_mdc_clk_div(struct macb *bp)
2196{
2197 u32 config;
2198 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2199
2200 if (pclk_hz <= 20000000)
2201 config = GEM_BF(CLK, GEM_CLK_DIV8);
2202 else if (pclk_hz <= 40000000)
2203 config = GEM_BF(CLK, GEM_CLK_DIV16);
2204 else if (pclk_hz <= 80000000)
2205 config = GEM_BF(CLK, GEM_CLK_DIV32);
2206 else if (pclk_hz <= 120000000)
2207 config = GEM_BF(CLK, GEM_CLK_DIV48);
2208 else if (pclk_hz <= 160000000)
2209 config = GEM_BF(CLK, GEM_CLK_DIV64);
2210 else
2211 config = GEM_BF(CLK, GEM_CLK_DIV96);
2212
2213 return config;
2214}
2215
2216static u32 macb_mdc_clk_div(struct macb *bp)
2217{
2218 u32 config;
2219 unsigned long pclk_hz;
2220
2221 if (macb_is_gem(bp))
2222 return gem_mdc_clk_div(bp);
2223
2224 pclk_hz = clk_get_rate(bp->pclk);
2225 if (pclk_hz <= 20000000)
2226 config = MACB_BF(CLK, MACB_CLK_DIV8);
2227 else if (pclk_hz <= 40000000)
2228 config = MACB_BF(CLK, MACB_CLK_DIV16);
2229 else if (pclk_hz <= 80000000)
2230 config = MACB_BF(CLK, MACB_CLK_DIV32);
2231 else
2232 config = MACB_BF(CLK, MACB_CLK_DIV64);
2233
2234 return config;
2235}
2236
2237
2238
2239
2240
2241static u32 macb_dbw(struct macb *bp)
2242{
2243 if (!macb_is_gem(bp))
2244 return 0;
2245
2246 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2247 case 4:
2248 return GEM_BF(DBW, GEM_DBW128);
2249 case 2:
2250 return GEM_BF(DBW, GEM_DBW64);
2251 case 1:
2252 default:
2253 return GEM_BF(DBW, GEM_DBW32);
2254 }
2255}
2256
2257
2258
2259
2260
2261
2262
2263
2264static void macb_configure_dma(struct macb *bp)
2265{
2266 struct macb_queue *queue;
2267 u32 buffer_size;
2268 unsigned int q;
2269 u32 dmacfg;
2270
2271 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2272 if (macb_is_gem(bp)) {
2273 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2274 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2275 if (q)
2276 queue_writel(queue, RBQS, buffer_size);
2277 else
2278 dmacfg |= GEM_BF(RXBS, buffer_size);
2279 }
2280 if (bp->dma_burst_length)
2281 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2282 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2283 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2284
2285 if (bp->native_io)
2286 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2287 else
2288 dmacfg |= GEM_BIT(ENDIA_DESC);
2289
2290 if (bp->dev->features & NETIF_F_HW_CSUM)
2291 dmacfg |= GEM_BIT(TXCOEN);
2292 else
2293 dmacfg &= ~GEM_BIT(TXCOEN);
2294
2295 dmacfg &= ~GEM_BIT(ADDR64);
2296#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2297 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2298 dmacfg |= GEM_BIT(ADDR64);
2299#endif
2300#ifdef CONFIG_MACB_USE_HWSTAMP
2301 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2302 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2303#endif
2304 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2305 dmacfg);
2306 gem_writel(bp, DMACFG, dmacfg);
2307 }
2308}
2309
2310static void macb_init_hw(struct macb *bp)
2311{
2312 u32 config;
2313
2314 macb_reset_hw(bp);
2315 macb_set_hwaddr(bp);
2316
2317 config = macb_mdc_clk_div(bp);
2318 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2319 config |= MACB_BIT(DRFCS);
2320 if (bp->caps & MACB_CAPS_JUMBO)
2321 config |= MACB_BIT(JFRAME);
2322 else
2323 config |= MACB_BIT(BIG);
2324 if (bp->dev->flags & IFF_PROMISC)
2325 config |= MACB_BIT(CAF);
2326 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2327 config |= GEM_BIT(RXCOEN);
2328 if (!(bp->dev->flags & IFF_BROADCAST))
2329 config |= MACB_BIT(NBC);
2330 config |= macb_dbw(bp);
2331 macb_writel(bp, NCFGR, config);
2332 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2333 gem_writel(bp, JML, bp->jumbo_max_len);
2334 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2335 if (bp->caps & MACB_CAPS_JUMBO)
2336 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2337
2338 macb_configure_dma(bp);
2339}
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374static inline int hash_bit_value(int bitnr, __u8 *addr)
2375{
2376 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2377 return 1;
2378 return 0;
2379}
2380
2381
2382static int hash_get_index(__u8 *addr)
2383{
2384 int i, j, bitval;
2385 int hash_index = 0;
2386
2387 for (j = 0; j < 6; j++) {
2388 for (i = 0, bitval = 0; i < 8; i++)
2389 bitval ^= hash_bit_value(i * 6 + j, addr);
2390
2391 hash_index |= (bitval << j);
2392 }
2393
2394 return hash_index;
2395}
2396
2397
2398static void macb_sethashtable(struct net_device *dev)
2399{
2400 struct netdev_hw_addr *ha;
2401 unsigned long mc_filter[2];
2402 unsigned int bitnr;
2403 struct macb *bp = netdev_priv(dev);
2404
2405 mc_filter[0] = 0;
2406 mc_filter[1] = 0;
2407
2408 netdev_for_each_mc_addr(ha, dev) {
2409 bitnr = hash_get_index(ha->addr);
2410 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2411 }
2412
2413 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2414 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2415}
2416
2417
2418static void macb_set_rx_mode(struct net_device *dev)
2419{
2420 unsigned long cfg;
2421 struct macb *bp = netdev_priv(dev);
2422
2423 cfg = macb_readl(bp, NCFGR);
2424
2425 if (dev->flags & IFF_PROMISC) {
2426
2427 cfg |= MACB_BIT(CAF);
2428
2429
2430 if (macb_is_gem(bp))
2431 cfg &= ~GEM_BIT(RXCOEN);
2432 } else {
2433
2434 cfg &= ~MACB_BIT(CAF);
2435
2436
2437 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2438 cfg |= GEM_BIT(RXCOEN);
2439 }
2440
2441 if (dev->flags & IFF_ALLMULTI) {
2442
2443 macb_or_gem_writel(bp, HRB, -1);
2444 macb_or_gem_writel(bp, HRT, -1);
2445 cfg |= MACB_BIT(NCFGR_MTI);
2446 } else if (!netdev_mc_empty(dev)) {
2447
2448 macb_sethashtable(dev);
2449 cfg |= MACB_BIT(NCFGR_MTI);
2450 } else if (dev->flags & (~IFF_ALLMULTI)) {
2451
2452 macb_or_gem_writel(bp, HRB, 0);
2453 macb_or_gem_writel(bp, HRT, 0);
2454 cfg &= ~MACB_BIT(NCFGR_MTI);
2455 }
2456
2457 macb_writel(bp, NCFGR, cfg);
2458}
2459
2460static int macb_open(struct net_device *dev)
2461{
2462 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2463 struct macb *bp = netdev_priv(dev);
2464 struct macb_queue *queue;
2465 unsigned int q;
2466 int err;
2467
2468 netdev_dbg(bp->dev, "open\n");
2469
2470 err = pm_runtime_get_sync(&bp->pdev->dev);
2471 if (err < 0)
2472 goto pm_exit;
2473
2474
2475 macb_init_rx_buffer_size(bp, bufsz);
2476
2477 err = macb_alloc_consistent(bp);
2478 if (err) {
2479 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2480 err);
2481 goto pm_exit;
2482 }
2483
2484 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2485 napi_enable(&queue->napi);
2486
2487 macb_init_hw(bp);
2488
2489 err = macb_phylink_connect(bp);
2490 if (err)
2491 goto pm_exit;
2492
2493 netif_tx_start_all_queues(dev);
2494
2495 if (bp->ptp_info)
2496 bp->ptp_info->ptp_init(dev);
2497
2498pm_exit:
2499 if (err) {
2500 pm_runtime_put_sync(&bp->pdev->dev);
2501 return err;
2502 }
2503 return 0;
2504}
2505
2506static int macb_close(struct net_device *dev)
2507{
2508 struct macb *bp = netdev_priv(dev);
2509 struct macb_queue *queue;
2510 unsigned long flags;
2511 unsigned int q;
2512
2513 netif_tx_stop_all_queues(dev);
2514
2515 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2516 napi_disable(&queue->napi);
2517
2518 phylink_stop(bp->phylink);
2519 phylink_disconnect_phy(bp->phylink);
2520
2521 spin_lock_irqsave(&bp->lock, flags);
2522 macb_reset_hw(bp);
2523 netif_carrier_off(dev);
2524 spin_unlock_irqrestore(&bp->lock, flags);
2525
2526 macb_free_consistent(bp);
2527
2528 if (bp->ptp_info)
2529 bp->ptp_info->ptp_remove(dev);
2530
2531 pm_runtime_put(&bp->pdev->dev);
2532
2533 return 0;
2534}
2535
2536static int macb_change_mtu(struct net_device *dev, int new_mtu)
2537{
2538 if (netif_running(dev))
2539 return -EBUSY;
2540
2541 dev->mtu = new_mtu;
2542
2543 return 0;
2544}
2545
2546static void gem_update_stats(struct macb *bp)
2547{
2548 struct macb_queue *queue;
2549 unsigned int i, q, idx;
2550 unsigned long *stat;
2551
2552 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2553
2554 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2555 u32 offset = gem_statistics[i].offset;
2556 u64 val = bp->macb_reg_readl(bp, offset);
2557
2558 bp->ethtool_stats[i] += val;
2559 *p += val;
2560
2561 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2562
2563 val = bp->macb_reg_readl(bp, offset + 4);
2564 bp->ethtool_stats[i] += ((u64)val) << 32;
2565 *(++p) += val;
2566 }
2567 }
2568
2569 idx = GEM_STATS_LEN;
2570 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2571 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2572 bp->ethtool_stats[idx++] = *stat;
2573}
2574
2575static struct net_device_stats *gem_get_stats(struct macb *bp)
2576{
2577 struct gem_stats *hwstat = &bp->hw_stats.gem;
2578 struct net_device_stats *nstat = &bp->dev->stats;
2579
2580 gem_update_stats(bp);
2581
2582 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2583 hwstat->rx_alignment_errors +
2584 hwstat->rx_resource_errors +
2585 hwstat->rx_overruns +
2586 hwstat->rx_oversize_frames +
2587 hwstat->rx_jabbers +
2588 hwstat->rx_undersized_frames +
2589 hwstat->rx_length_field_frame_errors);
2590 nstat->tx_errors = (hwstat->tx_late_collisions +
2591 hwstat->tx_excessive_collisions +
2592 hwstat->tx_underrun +
2593 hwstat->tx_carrier_sense_errors);
2594 nstat->multicast = hwstat->rx_multicast_frames;
2595 nstat->collisions = (hwstat->tx_single_collision_frames +
2596 hwstat->tx_multiple_collision_frames +
2597 hwstat->tx_excessive_collisions);
2598 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2599 hwstat->rx_jabbers +
2600 hwstat->rx_undersized_frames +
2601 hwstat->rx_length_field_frame_errors);
2602 nstat->rx_over_errors = hwstat->rx_resource_errors;
2603 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2604 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2605 nstat->rx_fifo_errors = hwstat->rx_overruns;
2606 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2607 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2608 nstat->tx_fifo_errors = hwstat->tx_underrun;
2609
2610 return nstat;
2611}
2612
2613static void gem_get_ethtool_stats(struct net_device *dev,
2614 struct ethtool_stats *stats, u64 *data)
2615{
2616 struct macb *bp;
2617
2618 bp = netdev_priv(dev);
2619 gem_update_stats(bp);
2620 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2621 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2622}
2623
2624static int gem_get_sset_count(struct net_device *dev, int sset)
2625{
2626 struct macb *bp = netdev_priv(dev);
2627
2628 switch (sset) {
2629 case ETH_SS_STATS:
2630 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2631 default:
2632 return -EOPNOTSUPP;
2633 }
2634}
2635
2636static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2637{
2638 char stat_string[ETH_GSTRING_LEN];
2639 struct macb *bp = netdev_priv(dev);
2640 struct macb_queue *queue;
2641 unsigned int i;
2642 unsigned int q;
2643
2644 switch (sset) {
2645 case ETH_SS_STATS:
2646 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2647 memcpy(p, gem_statistics[i].stat_string,
2648 ETH_GSTRING_LEN);
2649
2650 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2651 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2652 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2653 q, queue_statistics[i].stat_string);
2654 memcpy(p, stat_string, ETH_GSTRING_LEN);
2655 }
2656 }
2657 break;
2658 }
2659}
2660
2661static struct net_device_stats *macb_get_stats(struct net_device *dev)
2662{
2663 struct macb *bp = netdev_priv(dev);
2664 struct net_device_stats *nstat = &bp->dev->stats;
2665 struct macb_stats *hwstat = &bp->hw_stats.macb;
2666
2667 if (macb_is_gem(bp))
2668 return gem_get_stats(bp);
2669
2670
2671 macb_update_stats(bp);
2672
2673
2674 nstat->rx_errors = (hwstat->rx_fcs_errors +
2675 hwstat->rx_align_errors +
2676 hwstat->rx_resource_errors +
2677 hwstat->rx_overruns +
2678 hwstat->rx_oversize_pkts +
2679 hwstat->rx_jabbers +
2680 hwstat->rx_undersize_pkts +
2681 hwstat->rx_length_mismatch);
2682 nstat->tx_errors = (hwstat->tx_late_cols +
2683 hwstat->tx_excessive_cols +
2684 hwstat->tx_underruns +
2685 hwstat->tx_carrier_errors +
2686 hwstat->sqe_test_errors);
2687 nstat->collisions = (hwstat->tx_single_cols +
2688 hwstat->tx_multiple_cols +
2689 hwstat->tx_excessive_cols);
2690 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2691 hwstat->rx_jabbers +
2692 hwstat->rx_undersize_pkts +
2693 hwstat->rx_length_mismatch);
2694 nstat->rx_over_errors = hwstat->rx_resource_errors +
2695 hwstat->rx_overruns;
2696 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2697 nstat->rx_frame_errors = hwstat->rx_align_errors;
2698 nstat->rx_fifo_errors = hwstat->rx_overruns;
2699
2700 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2701 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2702 nstat->tx_fifo_errors = hwstat->tx_underruns;
2703
2704
2705 return nstat;
2706}
2707
2708static int macb_get_regs_len(struct net_device *netdev)
2709{
2710 return MACB_GREGS_NBR * sizeof(u32);
2711}
2712
2713static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2714 void *p)
2715{
2716 struct macb *bp = netdev_priv(dev);
2717 unsigned int tail, head;
2718 u32 *regs_buff = p;
2719
2720 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2721 | MACB_GREGS_VERSION;
2722
2723 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2724 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2725
2726 regs_buff[0] = macb_readl(bp, NCR);
2727 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2728 regs_buff[2] = macb_readl(bp, NSR);
2729 regs_buff[3] = macb_readl(bp, TSR);
2730 regs_buff[4] = macb_readl(bp, RBQP);
2731 regs_buff[5] = macb_readl(bp, TBQP);
2732 regs_buff[6] = macb_readl(bp, RSR);
2733 regs_buff[7] = macb_readl(bp, IMR);
2734
2735 regs_buff[8] = tail;
2736 regs_buff[9] = head;
2737 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2738 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2739
2740 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2741 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2742 if (macb_is_gem(bp))
2743 regs_buff[13] = gem_readl(bp, DMACFG);
2744}
2745
2746static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2747{
2748 struct macb *bp = netdev_priv(netdev);
2749
2750 wol->supported = 0;
2751 wol->wolopts = 0;
2752
2753 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
2754 phylink_ethtool_get_wol(bp->phylink, wol);
2755}
2756
2757static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2758{
2759 struct macb *bp = netdev_priv(netdev);
2760 int ret;
2761
2762 ret = phylink_ethtool_set_wol(bp->phylink, wol);
2763 if (!ret)
2764 return 0;
2765
2766 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2767 (wol->wolopts & ~WAKE_MAGIC))
2768 return -EOPNOTSUPP;
2769
2770 if (wol->wolopts & WAKE_MAGIC)
2771 bp->wol |= MACB_WOL_ENABLED;
2772 else
2773 bp->wol &= ~MACB_WOL_ENABLED;
2774
2775 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2776
2777 return 0;
2778}
2779
2780static int macb_get_link_ksettings(struct net_device *netdev,
2781 struct ethtool_link_ksettings *kset)
2782{
2783 struct macb *bp = netdev_priv(netdev);
2784
2785 return phylink_ethtool_ksettings_get(bp->phylink, kset);
2786}
2787
2788static int macb_set_link_ksettings(struct net_device *netdev,
2789 const struct ethtool_link_ksettings *kset)
2790{
2791 struct macb *bp = netdev_priv(netdev);
2792
2793 return phylink_ethtool_ksettings_set(bp->phylink, kset);
2794}
2795
2796static void macb_get_ringparam(struct net_device *netdev,
2797 struct ethtool_ringparam *ring)
2798{
2799 struct macb *bp = netdev_priv(netdev);
2800
2801 ring->rx_max_pending = MAX_RX_RING_SIZE;
2802 ring->tx_max_pending = MAX_TX_RING_SIZE;
2803
2804 ring->rx_pending = bp->rx_ring_size;
2805 ring->tx_pending = bp->tx_ring_size;
2806}
2807
2808static int macb_set_ringparam(struct net_device *netdev,
2809 struct ethtool_ringparam *ring)
2810{
2811 struct macb *bp = netdev_priv(netdev);
2812 u32 new_rx_size, new_tx_size;
2813 unsigned int reset = 0;
2814
2815 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2816 return -EINVAL;
2817
2818 new_rx_size = clamp_t(u32, ring->rx_pending,
2819 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2820 new_rx_size = roundup_pow_of_two(new_rx_size);
2821
2822 new_tx_size = clamp_t(u32, ring->tx_pending,
2823 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2824 new_tx_size = roundup_pow_of_two(new_tx_size);
2825
2826 if ((new_tx_size == bp->tx_ring_size) &&
2827 (new_rx_size == bp->rx_ring_size)) {
2828
2829 return 0;
2830 }
2831
2832 if (netif_running(bp->dev)) {
2833 reset = 1;
2834 macb_close(bp->dev);
2835 }
2836
2837 bp->rx_ring_size = new_rx_size;
2838 bp->tx_ring_size = new_tx_size;
2839
2840 if (reset)
2841 macb_open(bp->dev);
2842
2843 return 0;
2844}
2845
2846#ifdef CONFIG_MACB_USE_HWSTAMP
2847static unsigned int gem_get_tsu_rate(struct macb *bp)
2848{
2849 struct clk *tsu_clk;
2850 unsigned int tsu_rate;
2851
2852 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2853 if (!IS_ERR(tsu_clk))
2854 tsu_rate = clk_get_rate(tsu_clk);
2855
2856 else if (!IS_ERR(bp->pclk)) {
2857 tsu_clk = bp->pclk;
2858 tsu_rate = clk_get_rate(tsu_clk);
2859 } else
2860 return -ENOTSUPP;
2861 return tsu_rate;
2862}
2863
2864static s32 gem_get_ptp_max_adj(void)
2865{
2866 return 64000000;
2867}
2868
2869static int gem_get_ts_info(struct net_device *dev,
2870 struct ethtool_ts_info *info)
2871{
2872 struct macb *bp = netdev_priv(dev);
2873
2874 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2875 ethtool_op_get_ts_info(dev, info);
2876 return 0;
2877 }
2878
2879 info->so_timestamping =
2880 SOF_TIMESTAMPING_TX_SOFTWARE |
2881 SOF_TIMESTAMPING_RX_SOFTWARE |
2882 SOF_TIMESTAMPING_SOFTWARE |
2883 SOF_TIMESTAMPING_TX_HARDWARE |
2884 SOF_TIMESTAMPING_RX_HARDWARE |
2885 SOF_TIMESTAMPING_RAW_HARDWARE;
2886 info->tx_types =
2887 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2888 (1 << HWTSTAMP_TX_OFF) |
2889 (1 << HWTSTAMP_TX_ON);
2890 info->rx_filters =
2891 (1 << HWTSTAMP_FILTER_NONE) |
2892 (1 << HWTSTAMP_FILTER_ALL);
2893
2894 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2895
2896 return 0;
2897}
2898
2899static struct macb_ptp_info gem_ptp_info = {
2900 .ptp_init = gem_ptp_init,
2901 .ptp_remove = gem_ptp_remove,
2902 .get_ptp_max_adj = gem_get_ptp_max_adj,
2903 .get_tsu_rate = gem_get_tsu_rate,
2904 .get_ts_info = gem_get_ts_info,
2905 .get_hwtst = gem_get_hwtst,
2906 .set_hwtst = gem_set_hwtst,
2907};
2908#endif
2909
2910static int macb_get_ts_info(struct net_device *netdev,
2911 struct ethtool_ts_info *info)
2912{
2913 struct macb *bp = netdev_priv(netdev);
2914
2915 if (bp->ptp_info)
2916 return bp->ptp_info->get_ts_info(netdev, info);
2917
2918 return ethtool_op_get_ts_info(netdev, info);
2919}
2920
2921static void gem_enable_flow_filters(struct macb *bp, bool enable)
2922{
2923 struct net_device *netdev = bp->dev;
2924 struct ethtool_rx_fs_item *item;
2925 u32 t2_scr;
2926 int num_t2_scr;
2927
2928 if (!(netdev->features & NETIF_F_NTUPLE))
2929 return;
2930
2931 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
2932
2933 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2934 struct ethtool_rx_flow_spec *fs = &item->fs;
2935 struct ethtool_tcpip4_spec *tp4sp_m;
2936
2937 if (fs->location >= num_t2_scr)
2938 continue;
2939
2940 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
2941
2942
2943 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
2944
2945
2946 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2947
2948 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
2949 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
2950 else
2951 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
2952
2953 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
2954 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
2955 else
2956 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
2957
2958 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
2959 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
2960 else
2961 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
2962
2963 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
2964 }
2965}
2966
2967static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
2968{
2969 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
2970 uint16_t index = fs->location;
2971 u32 w0, w1, t2_scr;
2972 bool cmp_a = false;
2973 bool cmp_b = false;
2974 bool cmp_c = false;
2975
2976 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
2977 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2978
2979
2980 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
2981
2982 w0 = 0;
2983 w1 = 0;
2984 w0 = tp4sp_v->ip4src;
2985 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2986 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2987 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
2988 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
2989 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
2990 cmp_a = true;
2991 }
2992
2993
2994 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
2995
2996 w0 = 0;
2997 w1 = 0;
2998 w0 = tp4sp_v->ip4dst;
2999 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3000 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3001 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3002 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3003 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3004 cmp_b = true;
3005 }
3006
3007
3008 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3009
3010 w0 = 0;
3011 w1 = 0;
3012 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3013 if (tp4sp_m->psrc == tp4sp_m->pdst) {
3014 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3015 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3016 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3017 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3018 } else {
3019
3020 w1 = GEM_BFINS(T2DISMSK, 0, w1);
3021 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3022 if (tp4sp_m->psrc == 0xFFFF) {
3023 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3024 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3025 } else {
3026 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3027 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3028 }
3029 }
3030 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3031 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3032 cmp_c = true;
3033 }
3034
3035 t2_scr = 0;
3036 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3037 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3038 if (cmp_a)
3039 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3040 if (cmp_b)
3041 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3042 if (cmp_c)
3043 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3044 gem_writel_n(bp, SCRT2, index, t2_scr);
3045}
3046
3047static int gem_add_flow_filter(struct net_device *netdev,
3048 struct ethtool_rxnfc *cmd)
3049{
3050 struct macb *bp = netdev_priv(netdev);
3051 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3052 struct ethtool_rx_fs_item *item, *newfs;
3053 unsigned long flags;
3054 int ret = -EINVAL;
3055 bool added = false;
3056
3057 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
3058 if (newfs == NULL)
3059 return -ENOMEM;
3060 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3061
3062 netdev_dbg(netdev,
3063 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3064 fs->flow_type, (int)fs->ring_cookie, fs->location,
3065 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3066 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3067 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
3068
3069 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3070
3071
3072 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3073 if (item->fs.location > newfs->fs.location) {
3074 list_add_tail(&newfs->list, &item->list);
3075 added = true;
3076 break;
3077 } else if (item->fs.location == fs->location) {
3078 netdev_err(netdev, "Rule not added: location %d not free!\n",
3079 fs->location);
3080 ret = -EBUSY;
3081 goto err;
3082 }
3083 }
3084 if (!added)
3085 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3086
3087 gem_prog_cmp_regs(bp, fs);
3088 bp->rx_fs_list.count++;
3089
3090 gem_enable_flow_filters(bp, 1);
3091
3092 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3093 return 0;
3094
3095err:
3096 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3097 kfree(newfs);
3098 return ret;
3099}
3100
3101static int gem_del_flow_filter(struct net_device *netdev,
3102 struct ethtool_rxnfc *cmd)
3103{
3104 struct macb *bp = netdev_priv(netdev);
3105 struct ethtool_rx_fs_item *item;
3106 struct ethtool_rx_flow_spec *fs;
3107 unsigned long flags;
3108
3109 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3110
3111 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3112 if (item->fs.location == cmd->fs.location) {
3113
3114 fs = &(item->fs);
3115 netdev_dbg(netdev,
3116 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3117 fs->flow_type, (int)fs->ring_cookie, fs->location,
3118 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3119 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3120 htons(fs->h_u.tcp_ip4_spec.psrc),
3121 htons(fs->h_u.tcp_ip4_spec.pdst));
3122
3123 gem_writel_n(bp, SCRT2, fs->location, 0);
3124
3125 list_del(&item->list);
3126 bp->rx_fs_list.count--;
3127 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3128 kfree(item);
3129 return 0;
3130 }
3131 }
3132
3133 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3134 return -EINVAL;
3135}
3136
3137static int gem_get_flow_entry(struct net_device *netdev,
3138 struct ethtool_rxnfc *cmd)
3139{
3140 struct macb *bp = netdev_priv(netdev);
3141 struct ethtool_rx_fs_item *item;
3142
3143 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3144 if (item->fs.location == cmd->fs.location) {
3145 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3146 return 0;
3147 }
3148 }
3149 return -EINVAL;
3150}
3151
3152static int gem_get_all_flow_entries(struct net_device *netdev,
3153 struct ethtool_rxnfc *cmd, u32 *rule_locs)
3154{
3155 struct macb *bp = netdev_priv(netdev);
3156 struct ethtool_rx_fs_item *item;
3157 uint32_t cnt = 0;
3158
3159 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3160 if (cnt == cmd->rule_cnt)
3161 return -EMSGSIZE;
3162 rule_locs[cnt] = item->fs.location;
3163 cnt++;
3164 }
3165 cmd->data = bp->max_tuples;
3166 cmd->rule_cnt = cnt;
3167
3168 return 0;
3169}
3170
3171static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3172 u32 *rule_locs)
3173{
3174 struct macb *bp = netdev_priv(netdev);
3175 int ret = 0;
3176
3177 switch (cmd->cmd) {
3178 case ETHTOOL_GRXRINGS:
3179 cmd->data = bp->num_queues;
3180 break;
3181 case ETHTOOL_GRXCLSRLCNT:
3182 cmd->rule_cnt = bp->rx_fs_list.count;
3183 break;
3184 case ETHTOOL_GRXCLSRULE:
3185 ret = gem_get_flow_entry(netdev, cmd);
3186 break;
3187 case ETHTOOL_GRXCLSRLALL:
3188 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3189 break;
3190 default:
3191 netdev_err(netdev,
3192 "Command parameter %d is not supported\n", cmd->cmd);
3193 ret = -EOPNOTSUPP;
3194 }
3195
3196 return ret;
3197}
3198
3199static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3200{
3201 struct macb *bp = netdev_priv(netdev);
3202 int ret;
3203
3204 switch (cmd->cmd) {
3205 case ETHTOOL_SRXCLSRLINS:
3206 if ((cmd->fs.location >= bp->max_tuples)
3207 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3208 ret = -EINVAL;
3209 break;
3210 }
3211 ret = gem_add_flow_filter(netdev, cmd);
3212 break;
3213 case ETHTOOL_SRXCLSRLDEL:
3214 ret = gem_del_flow_filter(netdev, cmd);
3215 break;
3216 default:
3217 netdev_err(netdev,
3218 "Command parameter %d is not supported\n", cmd->cmd);
3219 ret = -EOPNOTSUPP;
3220 }
3221
3222 return ret;
3223}
3224
3225static const struct ethtool_ops macb_ethtool_ops = {
3226 .get_regs_len = macb_get_regs_len,
3227 .get_regs = macb_get_regs,
3228 .get_link = ethtool_op_get_link,
3229 .get_ts_info = ethtool_op_get_ts_info,
3230 .get_wol = macb_get_wol,
3231 .set_wol = macb_set_wol,
3232 .get_link_ksettings = macb_get_link_ksettings,
3233 .set_link_ksettings = macb_set_link_ksettings,
3234 .get_ringparam = macb_get_ringparam,
3235 .set_ringparam = macb_set_ringparam,
3236};
3237
3238static const struct ethtool_ops gem_ethtool_ops = {
3239 .get_regs_len = macb_get_regs_len,
3240 .get_regs = macb_get_regs,
3241 .get_link = ethtool_op_get_link,
3242 .get_ts_info = macb_get_ts_info,
3243 .get_ethtool_stats = gem_get_ethtool_stats,
3244 .get_strings = gem_get_ethtool_strings,
3245 .get_sset_count = gem_get_sset_count,
3246 .get_link_ksettings = macb_get_link_ksettings,
3247 .set_link_ksettings = macb_set_link_ksettings,
3248 .get_ringparam = macb_get_ringparam,
3249 .set_ringparam = macb_set_ringparam,
3250 .get_rxnfc = gem_get_rxnfc,
3251 .set_rxnfc = gem_set_rxnfc,
3252};
3253
3254static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3255{
3256 struct macb *bp = netdev_priv(dev);
3257
3258 if (!netif_running(dev))
3259 return -EINVAL;
3260
3261 if (bp->ptp_info) {
3262 switch (cmd) {
3263 case SIOCSHWTSTAMP:
3264 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3265 case SIOCGHWTSTAMP:
3266 return bp->ptp_info->get_hwtst(dev, rq);
3267 }
3268 }
3269
3270 return phylink_mii_ioctl(bp->phylink, rq, cmd);
3271}
3272
3273static inline void macb_set_txcsum_feature(struct macb *bp,
3274 netdev_features_t features)
3275{
3276 u32 val;
3277
3278 if (!macb_is_gem(bp))
3279 return;
3280
3281 val = gem_readl(bp, DMACFG);
3282 if (features & NETIF_F_HW_CSUM)
3283 val |= GEM_BIT(TXCOEN);
3284 else
3285 val &= ~GEM_BIT(TXCOEN);
3286
3287 gem_writel(bp, DMACFG, val);
3288}
3289
3290static inline void macb_set_rxcsum_feature(struct macb *bp,
3291 netdev_features_t features)
3292{
3293 struct net_device *netdev = bp->dev;
3294 u32 val;
3295
3296 if (!macb_is_gem(bp))
3297 return;
3298
3299 val = gem_readl(bp, NCFGR);
3300 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3301 val |= GEM_BIT(RXCOEN);
3302 else
3303 val &= ~GEM_BIT(RXCOEN);
3304
3305 gem_writel(bp, NCFGR, val);
3306}
3307
3308static inline void macb_set_rxflow_feature(struct macb *bp,
3309 netdev_features_t features)
3310{
3311 if (!macb_is_gem(bp))
3312 return;
3313
3314 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3315}
3316
3317static int macb_set_features(struct net_device *netdev,
3318 netdev_features_t features)
3319{
3320 struct macb *bp = netdev_priv(netdev);
3321 netdev_features_t changed = features ^ netdev->features;
3322
3323
3324 if (changed & NETIF_F_HW_CSUM)
3325 macb_set_txcsum_feature(bp, features);
3326
3327
3328 if (changed & NETIF_F_RXCSUM)
3329 macb_set_rxcsum_feature(bp, features);
3330
3331
3332 if (changed & NETIF_F_NTUPLE)
3333 macb_set_rxflow_feature(bp, features);
3334
3335 return 0;
3336}
3337
3338static void macb_restore_features(struct macb *bp)
3339{
3340 struct net_device *netdev = bp->dev;
3341 netdev_features_t features = netdev->features;
3342
3343
3344 macb_set_txcsum_feature(bp, features);
3345
3346
3347 macb_set_rxcsum_feature(bp, features);
3348
3349
3350 macb_set_rxflow_feature(bp, features);
3351}
3352
3353static const struct net_device_ops macb_netdev_ops = {
3354 .ndo_open = macb_open,
3355 .ndo_stop = macb_close,
3356 .ndo_start_xmit = macb_start_xmit,
3357 .ndo_set_rx_mode = macb_set_rx_mode,
3358 .ndo_get_stats = macb_get_stats,
3359 .ndo_do_ioctl = macb_ioctl,
3360 .ndo_validate_addr = eth_validate_addr,
3361 .ndo_change_mtu = macb_change_mtu,
3362 .ndo_set_mac_address = eth_mac_addr,
3363#ifdef CONFIG_NET_POLL_CONTROLLER
3364 .ndo_poll_controller = macb_poll_controller,
3365#endif
3366 .ndo_set_features = macb_set_features,
3367 .ndo_features_check = macb_features_check,
3368};
3369
3370
3371
3372
3373static void macb_configure_caps(struct macb *bp,
3374 const struct macb_config *dt_conf)
3375{
3376 u32 dcfg;
3377
3378 if (dt_conf)
3379 bp->caps = dt_conf->caps;
3380
3381 if (hw_is_gem(bp->regs, bp->native_io)) {
3382 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3383
3384 dcfg = gem_readl(bp, DCFG1);
3385 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3386 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3387 dcfg = gem_readl(bp, DCFG2);
3388 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3389 bp->caps |= MACB_CAPS_FIFO_MODE;
3390#ifdef CONFIG_MACB_USE_HWSTAMP
3391 if (gem_has_ptp(bp)) {
3392 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3393 dev_err(&bp->pdev->dev,
3394 "GEM doesn't support hardware ptp.\n");
3395 else {
3396 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3397 bp->ptp_info = &gem_ptp_info;
3398 }
3399 }
3400#endif
3401 }
3402
3403 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3404}
3405
3406static void macb_probe_queues(void __iomem *mem,
3407 bool native_io,
3408 unsigned int *queue_mask,
3409 unsigned int *num_queues)
3410{
3411 unsigned int hw_q;
3412
3413 *queue_mask = 0x1;
3414 *num_queues = 1;
3415
3416
3417
3418
3419
3420
3421
3422 if (!hw_is_gem(mem, native_io))
3423 return;
3424
3425
3426 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3427
3428 *queue_mask |= 0x1;
3429
3430 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3431 if (*queue_mask & (1 << hw_q))
3432 (*num_queues)++;
3433}
3434
3435static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3436 struct clk **hclk, struct clk **tx_clk,
3437 struct clk **rx_clk, struct clk **tsu_clk)
3438{
3439 struct macb_platform_data *pdata;
3440 int err;
3441
3442 pdata = dev_get_platdata(&pdev->dev);
3443 if (pdata) {
3444 *pclk = pdata->pclk;
3445 *hclk = pdata->hclk;
3446 } else {
3447 *pclk = devm_clk_get(&pdev->dev, "pclk");
3448 *hclk = devm_clk_get(&pdev->dev, "hclk");
3449 }
3450
3451 if (IS_ERR_OR_NULL(*pclk)) {
3452 err = PTR_ERR(*pclk);
3453 if (!err)
3454 err = -ENODEV;
3455
3456 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
3457 return err;
3458 }
3459
3460 if (IS_ERR_OR_NULL(*hclk)) {
3461 err = PTR_ERR(*hclk);
3462 if (!err)
3463 err = -ENODEV;
3464
3465 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
3466 return err;
3467 }
3468
3469 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
3470 if (IS_ERR(*tx_clk))
3471 return PTR_ERR(*tx_clk);
3472
3473 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
3474 if (IS_ERR(*rx_clk))
3475 return PTR_ERR(*rx_clk);
3476
3477 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3478 if (IS_ERR(*tsu_clk))
3479 return PTR_ERR(*tsu_clk);
3480
3481 err = clk_prepare_enable(*pclk);
3482 if (err) {
3483 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3484 return err;
3485 }
3486
3487 err = clk_prepare_enable(*hclk);
3488 if (err) {
3489 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
3490 goto err_disable_pclk;
3491 }
3492
3493 err = clk_prepare_enable(*tx_clk);
3494 if (err) {
3495 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
3496 goto err_disable_hclk;
3497 }
3498
3499 err = clk_prepare_enable(*rx_clk);
3500 if (err) {
3501 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
3502 goto err_disable_txclk;
3503 }
3504
3505 err = clk_prepare_enable(*tsu_clk);
3506 if (err) {
3507 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
3508 goto err_disable_rxclk;
3509 }
3510
3511 return 0;
3512
3513err_disable_rxclk:
3514 clk_disable_unprepare(*rx_clk);
3515
3516err_disable_txclk:
3517 clk_disable_unprepare(*tx_clk);
3518
3519err_disable_hclk:
3520 clk_disable_unprepare(*hclk);
3521
3522err_disable_pclk:
3523 clk_disable_unprepare(*pclk);
3524
3525 return err;
3526}
3527
3528static int macb_init(struct platform_device *pdev)
3529{
3530 struct net_device *dev = platform_get_drvdata(pdev);
3531 unsigned int hw_q, q;
3532 struct macb *bp = netdev_priv(dev);
3533 struct macb_queue *queue;
3534 int err;
3535 u32 val, reg;
3536
3537 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3538 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3539
3540
3541
3542
3543
3544 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3545 if (!(bp->queue_mask & (1 << hw_q)))
3546 continue;
3547
3548 queue = &bp->queues[q];
3549 queue->bp = bp;
3550 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
3551 if (hw_q) {
3552 queue->ISR = GEM_ISR(hw_q - 1);
3553 queue->IER = GEM_IER(hw_q - 1);
3554 queue->IDR = GEM_IDR(hw_q - 1);
3555 queue->IMR = GEM_IMR(hw_q - 1);
3556 queue->TBQP = GEM_TBQP(hw_q - 1);
3557 queue->RBQP = GEM_RBQP(hw_q - 1);
3558 queue->RBQS = GEM_RBQS(hw_q - 1);
3559#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3560 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3561 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3562 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3563 }
3564#endif
3565 } else {
3566
3567 queue->ISR = MACB_ISR;
3568 queue->IER = MACB_IER;
3569 queue->IDR = MACB_IDR;
3570 queue->IMR = MACB_IMR;
3571 queue->TBQP = MACB_TBQP;
3572 queue->RBQP = MACB_RBQP;
3573#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3574 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3575 queue->TBQPH = MACB_TBQPH;
3576 queue->RBQPH = MACB_RBQPH;
3577 }
3578#endif
3579 }
3580
3581
3582
3583
3584
3585
3586 queue->irq = platform_get_irq(pdev, q);
3587 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3588 IRQF_SHARED, dev->name, queue);
3589 if (err) {
3590 dev_err(&pdev->dev,
3591 "Unable to request IRQ %d (error %d)\n",
3592 queue->irq, err);
3593 return err;
3594 }
3595
3596 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3597 q++;
3598 }
3599
3600 dev->netdev_ops = &macb_netdev_ops;
3601
3602
3603 if (macb_is_gem(bp)) {
3604 bp->max_tx_length = GEM_MAX_TX_LEN;
3605 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3606 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3607 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3608 bp->macbgem_ops.mog_rx = gem_rx;
3609 dev->ethtool_ops = &gem_ethtool_ops;
3610 } else {
3611 bp->max_tx_length = MACB_MAX_TX_LEN;
3612 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3613 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3614 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3615 bp->macbgem_ops.mog_rx = macb_rx;
3616 dev->ethtool_ops = &macb_ethtool_ops;
3617 }
3618
3619
3620 dev->hw_features = NETIF_F_SG;
3621
3622
3623 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3624 dev->hw_features |= MACB_NETIF_LSO;
3625
3626
3627 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3628 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3629 if (bp->caps & MACB_CAPS_SG_DISABLED)
3630 dev->hw_features &= ~NETIF_F_SG;
3631 dev->features = dev->hw_features;
3632
3633
3634
3635
3636
3637 reg = gem_readl(bp, DCFG8);
3638 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3639 GEM_BFEXT(T2SCR, reg));
3640 if (bp->max_tuples > 0) {
3641
3642 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3643
3644 reg = 0;
3645 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3646 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3647
3648 dev->hw_features |= NETIF_F_NTUPLE;
3649
3650 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3651 bp->rx_fs_list.count = 0;
3652 spin_lock_init(&bp->rx_fs_lock);
3653 } else
3654 bp->max_tuples = 0;
3655 }
3656
3657 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3658 val = 0;
3659 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3660 val = GEM_BIT(RGMII);
3661 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3662 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3663 val = MACB_BIT(RMII);
3664 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3665 val = MACB_BIT(MII);
3666
3667 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3668 val |= MACB_BIT(CLKEN);
3669
3670 macb_or_gem_writel(bp, USRIO, val);
3671 }
3672
3673
3674 val = macb_mdc_clk_div(bp);
3675 val |= macb_dbw(bp);
3676 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3677 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3678 macb_writel(bp, NCFGR, val);
3679
3680 return 0;
3681}
3682
3683#if defined(CONFIG_OF)
3684
3685#define AT91ETHER_MAX_RBUFF_SZ 0x600
3686
3687#define AT91ETHER_MAX_RX_DESCR 9
3688
3689static struct sifive_fu540_macb_mgmt *mgmt;
3690
3691
3692static int at91ether_start(struct net_device *dev)
3693{
3694 struct macb *lp = netdev_priv(dev);
3695 struct macb_queue *q = &lp->queues[0];
3696 struct macb_dma_desc *desc;
3697 dma_addr_t addr;
3698 u32 ctl;
3699 int i;
3700
3701 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3702 (AT91ETHER_MAX_RX_DESCR *
3703 macb_dma_desc_get_size(lp)),
3704 &q->rx_ring_dma, GFP_KERNEL);
3705 if (!q->rx_ring)
3706 return -ENOMEM;
3707
3708 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3709 AT91ETHER_MAX_RX_DESCR *
3710 AT91ETHER_MAX_RBUFF_SZ,
3711 &q->rx_buffers_dma, GFP_KERNEL);
3712 if (!q->rx_buffers) {
3713 dma_free_coherent(&lp->pdev->dev,
3714 AT91ETHER_MAX_RX_DESCR *
3715 macb_dma_desc_get_size(lp),
3716 q->rx_ring, q->rx_ring_dma);
3717 q->rx_ring = NULL;
3718 return -ENOMEM;
3719 }
3720
3721 addr = q->rx_buffers_dma;
3722 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3723 desc = macb_rx_desc(q, i);
3724 macb_set_addr(lp, desc, addr);
3725 desc->ctrl = 0;
3726 addr += AT91ETHER_MAX_RBUFF_SZ;
3727 }
3728
3729
3730 desc->addr |= MACB_BIT(RX_WRAP);
3731
3732
3733 q->rx_tail = 0;
3734
3735
3736 macb_writel(lp, RBQP, q->rx_ring_dma);
3737
3738
3739 ctl = macb_readl(lp, NCR);
3740 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3741
3742 return 0;
3743}
3744
3745
3746static int at91ether_open(struct net_device *dev)
3747{
3748 struct macb *lp = netdev_priv(dev);
3749 u32 ctl;
3750 int ret;
3751
3752
3753 ctl = macb_readl(lp, NCR);
3754 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3755
3756 macb_set_hwaddr(lp);
3757
3758 ret = at91ether_start(dev);
3759 if (ret)
3760 return ret;
3761
3762
3763 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3764 MACB_BIT(RXUBR) |
3765 MACB_BIT(ISR_TUND) |
3766 MACB_BIT(ISR_RLE) |
3767 MACB_BIT(TCOMP) |
3768 MACB_BIT(ISR_ROVR) |
3769 MACB_BIT(HRESP));
3770
3771 ret = macb_phylink_connect(lp);
3772 if (ret)
3773 return ret;
3774
3775 netif_start_queue(dev);
3776
3777 return 0;
3778}
3779
3780
3781static int at91ether_close(struct net_device *dev)
3782{
3783 struct macb *lp = netdev_priv(dev);
3784 struct macb_queue *q = &lp->queues[0];
3785 u32 ctl;
3786
3787
3788 ctl = macb_readl(lp, NCR);
3789 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3790
3791
3792 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3793 MACB_BIT(RXUBR) |
3794 MACB_BIT(ISR_TUND) |
3795 MACB_BIT(ISR_RLE) |
3796 MACB_BIT(TCOMP) |
3797 MACB_BIT(ISR_ROVR) |
3798 MACB_BIT(HRESP));
3799
3800 netif_stop_queue(dev);
3801
3802 phylink_stop(lp->phylink);
3803 phylink_disconnect_phy(lp->phylink);
3804
3805 dma_free_coherent(&lp->pdev->dev,
3806 AT91ETHER_MAX_RX_DESCR *
3807 macb_dma_desc_get_size(lp),
3808 q->rx_ring, q->rx_ring_dma);
3809 q->rx_ring = NULL;
3810
3811 dma_free_coherent(&lp->pdev->dev,
3812 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3813 q->rx_buffers, q->rx_buffers_dma);
3814 q->rx_buffers = NULL;
3815
3816 return 0;
3817}
3818
3819
3820static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
3821 struct net_device *dev)
3822{
3823 struct macb *lp = netdev_priv(dev);
3824
3825 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3826 netif_stop_queue(dev);
3827
3828
3829 lp->skb = skb;
3830 lp->skb_length = skb->len;
3831 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
3832 skb->len, DMA_TO_DEVICE);
3833 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
3834 dev_kfree_skb_any(skb);
3835 dev->stats.tx_dropped++;
3836 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3837 return NETDEV_TX_OK;
3838 }
3839
3840
3841 macb_writel(lp, TAR, lp->skb_physaddr);
3842
3843 macb_writel(lp, TCR, skb->len);
3844
3845 } else {
3846 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3847 return NETDEV_TX_BUSY;
3848 }
3849
3850 return NETDEV_TX_OK;
3851}
3852
3853
3854
3855
3856static void at91ether_rx(struct net_device *dev)
3857{
3858 struct macb *lp = netdev_priv(dev);
3859 struct macb_queue *q = &lp->queues[0];
3860 struct macb_dma_desc *desc;
3861 unsigned char *p_recv;
3862 struct sk_buff *skb;
3863 unsigned int pktlen;
3864
3865 desc = macb_rx_desc(q, q->rx_tail);
3866 while (desc->addr & MACB_BIT(RX_USED)) {
3867 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3868 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3869 skb = netdev_alloc_skb(dev, pktlen + 2);
3870 if (skb) {
3871 skb_reserve(skb, 2);
3872 skb_put_data(skb, p_recv, pktlen);
3873
3874 skb->protocol = eth_type_trans(skb, dev);
3875 dev->stats.rx_packets++;
3876 dev->stats.rx_bytes += pktlen;
3877 netif_rx(skb);
3878 } else {
3879 dev->stats.rx_dropped++;
3880 }
3881
3882 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3883 dev->stats.multicast++;
3884
3885
3886 desc->addr &= ~MACB_BIT(RX_USED);
3887
3888
3889 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3890 q->rx_tail = 0;
3891 else
3892 q->rx_tail++;
3893
3894 desc = macb_rx_desc(q, q->rx_tail);
3895 }
3896}
3897
3898
3899static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3900{
3901 struct net_device *dev = dev_id;
3902 struct macb *lp = netdev_priv(dev);
3903 u32 intstatus, ctl;
3904
3905
3906
3907
3908 intstatus = macb_readl(lp, ISR);
3909
3910
3911 if (intstatus & MACB_BIT(RCOMP))
3912 at91ether_rx(dev);
3913
3914
3915 if (intstatus & MACB_BIT(TCOMP)) {
3916
3917 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3918 dev->stats.tx_errors++;
3919
3920 if (lp->skb) {
3921 dev_consume_skb_irq(lp->skb);
3922 lp->skb = NULL;
3923 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
3924 lp->skb_length, DMA_TO_DEVICE);
3925 dev->stats.tx_packets++;
3926 dev->stats.tx_bytes += lp->skb_length;
3927 }
3928 netif_wake_queue(dev);
3929 }
3930
3931
3932 if (intstatus & MACB_BIT(RXUBR)) {
3933 ctl = macb_readl(lp, NCR);
3934 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3935 wmb();
3936 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3937 }
3938
3939 if (intstatus & MACB_BIT(ISR_ROVR))
3940 netdev_err(dev, "ROVR error\n");
3941
3942 return IRQ_HANDLED;
3943}
3944
3945#ifdef CONFIG_NET_POLL_CONTROLLER
3946static void at91ether_poll_controller(struct net_device *dev)
3947{
3948 unsigned long flags;
3949
3950 local_irq_save(flags);
3951 at91ether_interrupt(dev->irq, dev);
3952 local_irq_restore(flags);
3953}
3954#endif
3955
3956static const struct net_device_ops at91ether_netdev_ops = {
3957 .ndo_open = at91ether_open,
3958 .ndo_stop = at91ether_close,
3959 .ndo_start_xmit = at91ether_start_xmit,
3960 .ndo_get_stats = macb_get_stats,
3961 .ndo_set_rx_mode = macb_set_rx_mode,
3962 .ndo_set_mac_address = eth_mac_addr,
3963 .ndo_do_ioctl = macb_ioctl,
3964 .ndo_validate_addr = eth_validate_addr,
3965#ifdef CONFIG_NET_POLL_CONTROLLER
3966 .ndo_poll_controller = at91ether_poll_controller,
3967#endif
3968};
3969
3970static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3971 struct clk **hclk, struct clk **tx_clk,
3972 struct clk **rx_clk, struct clk **tsu_clk)
3973{
3974 int err;
3975
3976 *hclk = NULL;
3977 *tx_clk = NULL;
3978 *rx_clk = NULL;
3979 *tsu_clk = NULL;
3980
3981 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3982 if (IS_ERR(*pclk))
3983 return PTR_ERR(*pclk);
3984
3985 err = clk_prepare_enable(*pclk);
3986 if (err) {
3987 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3988 return err;
3989 }
3990
3991 return 0;
3992}
3993
3994static int at91ether_init(struct platform_device *pdev)
3995{
3996 struct net_device *dev = platform_get_drvdata(pdev);
3997 struct macb *bp = netdev_priv(dev);
3998 int err;
3999 u32 reg;
4000
4001 bp->queues[0].bp = bp;
4002
4003 dev->netdev_ops = &at91ether_netdev_ops;
4004 dev->ethtool_ops = &macb_ethtool_ops;
4005
4006 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
4007 0, dev->name, dev);
4008 if (err)
4009 return err;
4010
4011 macb_writel(bp, NCR, 0);
4012
4013 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
4014 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
4015 reg |= MACB_BIT(RM9200_RMII);
4016
4017 macb_writel(bp, NCFGR, reg);
4018
4019 return 0;
4020}
4021
4022static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4023 unsigned long parent_rate)
4024{
4025 return mgmt->rate;
4026}
4027
4028static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4029 unsigned long *parent_rate)
4030{
4031 if (WARN_ON(rate < 2500000))
4032 return 2500000;
4033 else if (rate == 2500000)
4034 return 2500000;
4035 else if (WARN_ON(rate < 13750000))
4036 return 2500000;
4037 else if (WARN_ON(rate < 25000000))
4038 return 25000000;
4039 else if (rate == 25000000)
4040 return 25000000;
4041 else if (WARN_ON(rate < 75000000))
4042 return 25000000;
4043 else if (WARN_ON(rate < 125000000))
4044 return 125000000;
4045 else if (rate == 125000000)
4046 return 125000000;
4047
4048 WARN_ON(rate > 125000000);
4049
4050 return 125000000;
4051}
4052
4053static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4054 unsigned long parent_rate)
4055{
4056 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4057 if (rate != 125000000)
4058 iowrite32(1, mgmt->reg);
4059 else
4060 iowrite32(0, mgmt->reg);
4061 mgmt->rate = rate;
4062
4063 return 0;
4064}
4065
4066static const struct clk_ops fu540_c000_ops = {
4067 .recalc_rate = fu540_macb_tx_recalc_rate,
4068 .round_rate = fu540_macb_tx_round_rate,
4069 .set_rate = fu540_macb_tx_set_rate,
4070};
4071
4072static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4073 struct clk **hclk, struct clk **tx_clk,
4074 struct clk **rx_clk, struct clk **tsu_clk)
4075{
4076 struct clk_init_data init;
4077 int err = 0;
4078
4079 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4080 if (err)
4081 return err;
4082
4083 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4084 if (!mgmt)
4085 return -ENOMEM;
4086
4087 init.name = "sifive-gemgxl-mgmt";
4088 init.ops = &fu540_c000_ops;
4089 init.flags = 0;
4090 init.num_parents = 0;
4091
4092 mgmt->rate = 0;
4093 mgmt->hw.init = &init;
4094
4095 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4096 if (IS_ERR(*tx_clk))
4097 return PTR_ERR(*tx_clk);
4098
4099 err = clk_prepare_enable(*tx_clk);
4100 if (err)
4101 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4102 else
4103 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4104
4105 return 0;
4106}
4107
4108static int fu540_c000_init(struct platform_device *pdev)
4109{
4110 struct resource *res;
4111
4112 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
4113 if (!res)
4114 return -ENODEV;
4115
4116 mgmt->reg = ioremap(res->start, resource_size(res));
4117 if (!mgmt->reg)
4118 return -ENOMEM;
4119
4120 return macb_init(pdev);
4121}
4122
4123static const struct macb_config fu540_c000_config = {
4124 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4125 MACB_CAPS_GEM_HAS_PTP,
4126 .dma_burst_length = 16,
4127 .clk_init = fu540_c000_clk_init,
4128 .init = fu540_c000_init,
4129 .jumbo_max_len = 10240,
4130};
4131
4132static const struct macb_config at91sam9260_config = {
4133 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4134 .clk_init = macb_clk_init,
4135 .init = macb_init,
4136};
4137
4138static const struct macb_config sama5d3macb_config = {
4139 .caps = MACB_CAPS_SG_DISABLED
4140 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4141 .clk_init = macb_clk_init,
4142 .init = macb_init,
4143};
4144
4145static const struct macb_config pc302gem_config = {
4146 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
4147 .dma_burst_length = 16,
4148 .clk_init = macb_clk_init,
4149 .init = macb_init,
4150};
4151
4152static const struct macb_config sama5d2_config = {
4153 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4154 .dma_burst_length = 16,
4155 .clk_init = macb_clk_init,
4156 .init = macb_init,
4157};
4158
4159static const struct macb_config sama5d3_config = {
4160 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
4161 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
4162 .dma_burst_length = 16,
4163 .clk_init = macb_clk_init,
4164 .init = macb_init,
4165 .jumbo_max_len = 10240,
4166};
4167
4168static const struct macb_config sama5d4_config = {
4169 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4170 .dma_burst_length = 4,
4171 .clk_init = macb_clk_init,
4172 .init = macb_init,
4173};
4174
4175static const struct macb_config emac_config = {
4176 .caps = MACB_CAPS_NEEDS_RSTONUBR,
4177 .clk_init = at91ether_clk_init,
4178 .init = at91ether_init,
4179};
4180
4181static const struct macb_config np4_config = {
4182 .caps = MACB_CAPS_USRIO_DISABLED,
4183 .clk_init = macb_clk_init,
4184 .init = macb_init,
4185};
4186
4187static const struct macb_config zynqmp_config = {
4188 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4189 MACB_CAPS_JUMBO |
4190 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
4191 .dma_burst_length = 16,
4192 .clk_init = macb_clk_init,
4193 .init = macb_init,
4194 .jumbo_max_len = 10240,
4195};
4196
4197static const struct macb_config zynq_config = {
4198 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
4199 MACB_CAPS_NEEDS_RSTONUBR,
4200 .dma_burst_length = 16,
4201 .clk_init = macb_clk_init,
4202 .init = macb_init,
4203};
4204
4205static const struct of_device_id macb_dt_ids[] = {
4206 { .compatible = "cdns,at32ap7000-macb" },
4207 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4208 { .compatible = "cdns,macb" },
4209 { .compatible = "cdns,np4-macb", .data = &np4_config },
4210 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4211 { .compatible = "cdns,gem", .data = &pc302gem_config },
4212 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4213 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4214 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4215 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4216 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4217 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4218 { .compatible = "cdns,emac", .data = &emac_config },
4219 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4220 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4221 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4222 { }
4223};
4224MODULE_DEVICE_TABLE(of, macb_dt_ids);
4225#endif
4226
4227static const struct macb_config default_gem_config = {
4228 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4229 MACB_CAPS_JUMBO |
4230 MACB_CAPS_GEM_HAS_PTP,
4231 .dma_burst_length = 16,
4232 .clk_init = macb_clk_init,
4233 .init = macb_init,
4234 .jumbo_max_len = 10240,
4235};
4236
4237static int macb_probe(struct platform_device *pdev)
4238{
4239 const struct macb_config *macb_config = &default_gem_config;
4240 int (*clk_init)(struct platform_device *, struct clk **,
4241 struct clk **, struct clk **, struct clk **,
4242 struct clk **) = macb_config->clk_init;
4243 int (*init)(struct platform_device *) = macb_config->init;
4244 struct device_node *np = pdev->dev.of_node;
4245 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4246 struct clk *tsu_clk = NULL;
4247 unsigned int queue_mask, num_queues;
4248 bool native_io;
4249 phy_interface_t interface;
4250 struct net_device *dev;
4251 struct resource *regs;
4252 void __iomem *mem;
4253 const char *mac;
4254 struct macb *bp;
4255 int err, val;
4256
4257 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4258 mem = devm_ioremap_resource(&pdev->dev, regs);
4259 if (IS_ERR(mem))
4260 return PTR_ERR(mem);
4261
4262 if (np) {
4263 const struct of_device_id *match;
4264
4265 match = of_match_node(macb_dt_ids, np);
4266 if (match && match->data) {
4267 macb_config = match->data;
4268 clk_init = macb_config->clk_init;
4269 init = macb_config->init;
4270 }
4271 }
4272
4273 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
4274 if (err)
4275 return err;
4276
4277 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4278 pm_runtime_use_autosuspend(&pdev->dev);
4279 pm_runtime_get_noresume(&pdev->dev);
4280 pm_runtime_set_active(&pdev->dev);
4281 pm_runtime_enable(&pdev->dev);
4282 native_io = hw_is_native_io(mem);
4283
4284 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
4285 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
4286 if (!dev) {
4287 err = -ENOMEM;
4288 goto err_disable_clocks;
4289 }
4290
4291 dev->base_addr = regs->start;
4292
4293 SET_NETDEV_DEV(dev, &pdev->dev);
4294
4295 bp = netdev_priv(dev);
4296 bp->pdev = pdev;
4297 bp->dev = dev;
4298 bp->regs = mem;
4299 bp->native_io = native_io;
4300 if (native_io) {
4301 bp->macb_reg_readl = hw_readl_native;
4302 bp->macb_reg_writel = hw_writel_native;
4303 } else {
4304 bp->macb_reg_readl = hw_readl;
4305 bp->macb_reg_writel = hw_writel;
4306 }
4307 bp->num_queues = num_queues;
4308 bp->queue_mask = queue_mask;
4309 if (macb_config)
4310 bp->dma_burst_length = macb_config->dma_burst_length;
4311 bp->pclk = pclk;
4312 bp->hclk = hclk;
4313 bp->tx_clk = tx_clk;
4314 bp->rx_clk = rx_clk;
4315 bp->tsu_clk = tsu_clk;
4316 if (macb_config)
4317 bp->jumbo_max_len = macb_config->jumbo_max_len;
4318
4319 bp->wol = 0;
4320 if (of_get_property(np, "magic-packet", NULL))
4321 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
4322 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
4323
4324 spin_lock_init(&bp->lock);
4325
4326
4327 macb_configure_caps(bp, macb_config);
4328
4329#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4330 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
4331 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
4332 bp->hw_dma_cap |= HW_DMA_CAP_64B;
4333 }
4334#endif
4335 platform_set_drvdata(pdev, dev);
4336
4337 dev->irq = platform_get_irq(pdev, 0);
4338 if (dev->irq < 0) {
4339 err = dev->irq;
4340 goto err_out_free_netdev;
4341 }
4342
4343
4344 dev->min_mtu = GEM_MTU_MIN_SIZE;
4345 if (bp->caps & MACB_CAPS_JUMBO)
4346 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
4347 else
4348 dev->max_mtu = ETH_DATA_LEN;
4349
4350 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
4351 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
4352 if (val)
4353 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
4354 macb_dma_desc_get_size(bp);
4355
4356 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
4357 if (val)
4358 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
4359 macb_dma_desc_get_size(bp);
4360 }
4361
4362 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4363 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4364 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4365
4366 mac = of_get_mac_address(np);
4367 if (PTR_ERR(mac) == -EPROBE_DEFER) {
4368 err = -EPROBE_DEFER;
4369 goto err_out_free_netdev;
4370 } else if (!IS_ERR_OR_NULL(mac)) {
4371 ether_addr_copy(bp->dev->dev_addr, mac);
4372 } else {
4373 macb_get_hwaddr(bp);
4374 }
4375
4376 err = of_get_phy_mode(np, &interface);
4377 if (err)
4378
4379 bp->phy_interface = PHY_INTERFACE_MODE_MII;
4380 else
4381 bp->phy_interface = interface;
4382
4383 bp->speed = SPEED_UNKNOWN;
4384
4385
4386 err = init(pdev);
4387 if (err)
4388 goto err_out_free_netdev;
4389
4390 err = macb_mii_init(bp);
4391 if (err)
4392 goto err_out_free_netdev;
4393
4394 netif_carrier_off(dev);
4395
4396 err = register_netdev(dev);
4397 if (err) {
4398 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
4399 goto err_out_unregister_mdio;
4400 }
4401
4402 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
4403 (unsigned long)bp);
4404
4405 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4406 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4407 dev->base_addr, dev->irq, dev->dev_addr);
4408
4409 pm_runtime_mark_last_busy(&bp->pdev->dev);
4410 pm_runtime_put_autosuspend(&bp->pdev->dev);
4411
4412 return 0;
4413
4414err_out_unregister_mdio:
4415 mdiobus_unregister(bp->mii_bus);
4416 mdiobus_free(bp->mii_bus);
4417
4418err_out_free_netdev:
4419 free_netdev(dev);
4420
4421err_disable_clocks:
4422 clk_disable_unprepare(tx_clk);
4423 clk_disable_unprepare(hclk);
4424 clk_disable_unprepare(pclk);
4425 clk_disable_unprepare(rx_clk);
4426 clk_disable_unprepare(tsu_clk);
4427 pm_runtime_disable(&pdev->dev);
4428 pm_runtime_set_suspended(&pdev->dev);
4429 pm_runtime_dont_use_autosuspend(&pdev->dev);
4430
4431 return err;
4432}
4433
4434static int macb_remove(struct platform_device *pdev)
4435{
4436 struct net_device *dev;
4437 struct macb *bp;
4438
4439 dev = platform_get_drvdata(pdev);
4440
4441 if (dev) {
4442 bp = netdev_priv(dev);
4443 mdiobus_unregister(bp->mii_bus);
4444 mdiobus_free(bp->mii_bus);
4445
4446 unregister_netdev(dev);
4447 tasklet_kill(&bp->hresp_err_tasklet);
4448 pm_runtime_disable(&pdev->dev);
4449 pm_runtime_dont_use_autosuspend(&pdev->dev);
4450 if (!pm_runtime_suspended(&pdev->dev)) {
4451 clk_disable_unprepare(bp->tx_clk);
4452 clk_disable_unprepare(bp->hclk);
4453 clk_disable_unprepare(bp->pclk);
4454 clk_disable_unprepare(bp->rx_clk);
4455 clk_disable_unprepare(bp->tsu_clk);
4456 pm_runtime_set_suspended(&pdev->dev);
4457 }
4458 phylink_destroy(bp->phylink);
4459 free_netdev(dev);
4460 }
4461
4462 return 0;
4463}
4464
4465static int __maybe_unused macb_suspend(struct device *dev)
4466{
4467 struct net_device *netdev = dev_get_drvdata(dev);
4468 struct macb *bp = netdev_priv(netdev);
4469 struct macb_queue *queue = bp->queues;
4470 unsigned long flags;
4471 unsigned int q;
4472
4473 if (!netif_running(netdev))
4474 return 0;
4475
4476 if (bp->wol & MACB_WOL_ENABLED) {
4477 macb_writel(bp, IER, MACB_BIT(WOL));
4478 macb_writel(bp, WOL, MACB_BIT(MAG));
4479 enable_irq_wake(bp->queues[0].irq);
4480 netif_device_detach(netdev);
4481 } else {
4482 netif_device_detach(netdev);
4483 for (q = 0, queue = bp->queues; q < bp->num_queues;
4484 ++q, ++queue)
4485 napi_disable(&queue->napi);
4486 rtnl_lock();
4487 phylink_stop(bp->phylink);
4488 rtnl_unlock();
4489 spin_lock_irqsave(&bp->lock, flags);
4490 macb_reset_hw(bp);
4491 spin_unlock_irqrestore(&bp->lock, flags);
4492
4493 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4494 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
4495
4496 if (netdev->hw_features & NETIF_F_NTUPLE)
4497 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
4498 }
4499
4500 netif_carrier_off(netdev);
4501 if (bp->ptp_info)
4502 bp->ptp_info->ptp_remove(netdev);
4503 pm_runtime_force_suspend(dev);
4504
4505 return 0;
4506}
4507
4508static int __maybe_unused macb_resume(struct device *dev)
4509{
4510 struct net_device *netdev = dev_get_drvdata(dev);
4511 struct macb *bp = netdev_priv(netdev);
4512 struct macb_queue *queue = bp->queues;
4513 unsigned int q;
4514
4515 if (!netif_running(netdev))
4516 return 0;
4517
4518 pm_runtime_force_resume(dev);
4519
4520 if (bp->wol & MACB_WOL_ENABLED) {
4521 macb_writel(bp, IDR, MACB_BIT(WOL));
4522 macb_writel(bp, WOL, 0);
4523 disable_irq_wake(bp->queues[0].irq);
4524 } else {
4525 macb_writel(bp, NCR, MACB_BIT(MPE));
4526
4527 if (netdev->hw_features & NETIF_F_NTUPLE)
4528 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
4529
4530 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4531 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
4532
4533 for (q = 0, queue = bp->queues; q < bp->num_queues;
4534 ++q, ++queue)
4535 napi_enable(&queue->napi);
4536 rtnl_lock();
4537 phylink_start(bp->phylink);
4538 rtnl_unlock();
4539 }
4540
4541 macb_init_hw(bp);
4542 macb_set_rx_mode(netdev);
4543 macb_restore_features(bp);
4544 netif_device_attach(netdev);
4545 if (bp->ptp_info)
4546 bp->ptp_info->ptp_init(netdev);
4547
4548 return 0;
4549}
4550
4551static int __maybe_unused macb_runtime_suspend(struct device *dev)
4552{
4553 struct net_device *netdev = dev_get_drvdata(dev);
4554 struct macb *bp = netdev_priv(netdev);
4555
4556 if (!(device_may_wakeup(&bp->dev->dev))) {
4557 clk_disable_unprepare(bp->tx_clk);
4558 clk_disable_unprepare(bp->hclk);
4559 clk_disable_unprepare(bp->pclk);
4560 clk_disable_unprepare(bp->rx_clk);
4561 }
4562 clk_disable_unprepare(bp->tsu_clk);
4563
4564 return 0;
4565}
4566
4567static int __maybe_unused macb_runtime_resume(struct device *dev)
4568{
4569 struct net_device *netdev = dev_get_drvdata(dev);
4570 struct macb *bp = netdev_priv(netdev);
4571
4572 if (!(device_may_wakeup(&bp->dev->dev))) {
4573 clk_prepare_enable(bp->pclk);
4574 clk_prepare_enable(bp->hclk);
4575 clk_prepare_enable(bp->tx_clk);
4576 clk_prepare_enable(bp->rx_clk);
4577 }
4578 clk_prepare_enable(bp->tsu_clk);
4579
4580 return 0;
4581}
4582
4583static const struct dev_pm_ops macb_pm_ops = {
4584 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4585 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4586};
4587
4588static struct platform_driver macb_driver = {
4589 .probe = macb_probe,
4590 .remove = macb_remove,
4591 .driver = {
4592 .name = "macb",
4593 .of_match_table = of_match_ptr(macb_dt_ids),
4594 .pm = &macb_pm_ops,
4595 },
4596};
4597
4598module_platform_driver(macb_driver);
4599
4600MODULE_LICENSE("GPL");
4601MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4602MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4603MODULE_ALIAS("platform:macb");
4604