1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/clk.h>
10#include <linux/clk-provider.h>
11#include <linux/crc32.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/circ_buf.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21#include <linux/gpio/consumer.h>
22#include <linux/interrupt.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/dma-mapping.h>
26#include <linux/platform_data/macb.h>
27#include <linux/platform_device.h>
28#include <linux/phylink.h>
29#include <linux/of.h>
30#include <linux/of_device.h>
31#include <linux/of_gpio.h>
32#include <linux/of_mdio.h>
33#include <linux/of_net.h>
34#include <linux/ip.h>
35#include <linux/udp.h>
36#include <linux/tcp.h>
37#include <linux/iopoll.h>
38#include <linux/pm_runtime.h>
39#include "macb.h"
40
41
42struct sifive_fu540_macb_mgmt {
43 void __iomem *reg;
44 unsigned long rate;
45 struct clk_hw hw;
46};
47
48#define MACB_RX_BUFFER_SIZE 128
49#define RX_BUFFER_MULTIPLE 64
50
51#define DEFAULT_RX_RING_SIZE 512
52#define MIN_RX_RING_SIZE 64
53#define MAX_RX_RING_SIZE 8192
54#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
55 * (bp)->rx_ring_size)
56
57#define DEFAULT_TX_RING_SIZE 512
58#define MIN_TX_RING_SIZE 64
59#define MAX_TX_RING_SIZE 4096
60#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
61 * (bp)->tx_ring_size)
62
63
64#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
65
66#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
67#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
68 | MACB_BIT(ISR_RLE) \
69 | MACB_BIT(TXERR))
70#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
71 | MACB_BIT(TXUBR))
72
73
74#define MACB_TX_LEN_ALIGN 8
75#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
76
77
78
79
80#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
81
82#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
83#define MACB_NETIF_LSO NETIF_F_TSO
84
85#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
86#define MACB_WOL_ENABLED (0x1 << 1)
87
88
89
90
91#define MACB_HALT_TIMEOUT 1230
92
93#define MACB_PM_TIMEOUT 100
94
95#define MACB_MDIO_TIMEOUT 1000000
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124static unsigned int macb_dma_desc_get_size(struct macb *bp)
125{
126#ifdef MACB_EXT_DESC
127 unsigned int desc_size;
128
129 switch (bp->hw_dma_cap) {
130 case HW_DMA_CAP_64B:
131 desc_size = sizeof(struct macb_dma_desc)
132 + sizeof(struct macb_dma_desc_64);
133 break;
134 case HW_DMA_CAP_PTP:
135 desc_size = sizeof(struct macb_dma_desc)
136 + sizeof(struct macb_dma_desc_ptp);
137 break;
138 case HW_DMA_CAP_64B_PTP:
139 desc_size = sizeof(struct macb_dma_desc)
140 + sizeof(struct macb_dma_desc_64)
141 + sizeof(struct macb_dma_desc_ptp);
142 break;
143 default:
144 desc_size = sizeof(struct macb_dma_desc);
145 }
146 return desc_size;
147#endif
148 return sizeof(struct macb_dma_desc);
149}
150
151static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
152{
153#ifdef MACB_EXT_DESC
154 switch (bp->hw_dma_cap) {
155 case HW_DMA_CAP_64B:
156 case HW_DMA_CAP_PTP:
157 desc_idx <<= 1;
158 break;
159 case HW_DMA_CAP_64B_PTP:
160 desc_idx *= 3;
161 break;
162 default:
163 break;
164 }
165#endif
166 return desc_idx;
167}
168
169#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
170static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
171{
172 return (struct macb_dma_desc_64 *)((void *)desc
173 + sizeof(struct macb_dma_desc));
174}
175#endif
176
177
178static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
179{
180 return index & (bp->tx_ring_size - 1);
181}
182
183static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
184 unsigned int index)
185{
186 index = macb_tx_ring_wrap(queue->bp, index);
187 index = macb_adj_dma_desc_idx(queue->bp, index);
188 return &queue->tx_ring[index];
189}
190
191static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
192 unsigned int index)
193{
194 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
195}
196
197static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
198{
199 dma_addr_t offset;
200
201 offset = macb_tx_ring_wrap(queue->bp, index) *
202 macb_dma_desc_get_size(queue->bp);
203
204 return queue->tx_ring_dma + offset;
205}
206
207static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
208{
209 return index & (bp->rx_ring_size - 1);
210}
211
212static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
213{
214 index = macb_rx_ring_wrap(queue->bp, index);
215 index = macb_adj_dma_desc_idx(queue->bp, index);
216 return &queue->rx_ring[index];
217}
218
219static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
220{
221 return queue->rx_buffers + queue->bp->rx_buffer_size *
222 macb_rx_ring_wrap(queue->bp, index);
223}
224
225
226static u32 hw_readl_native(struct macb *bp, int offset)
227{
228 return __raw_readl(bp->regs + offset);
229}
230
231static void hw_writel_native(struct macb *bp, int offset, u32 value)
232{
233 __raw_writel(value, bp->regs + offset);
234}
235
236static u32 hw_readl(struct macb *bp, int offset)
237{
238 return readl_relaxed(bp->regs + offset);
239}
240
241static void hw_writel(struct macb *bp, int offset, u32 value)
242{
243 writel_relaxed(value, bp->regs + offset);
244}
245
246
247
248
249
250static bool hw_is_native_io(void __iomem *addr)
251{
252 u32 value = MACB_BIT(LLB);
253
254 __raw_writel(value, addr + MACB_NCR);
255 value = __raw_readl(addr + MACB_NCR);
256
257
258 __raw_writel(0, addr + MACB_NCR);
259
260 return value == MACB_BIT(LLB);
261}
262
263static bool hw_is_gem(void __iomem *addr, bool native_io)
264{
265 u32 id;
266
267 if (native_io)
268 id = __raw_readl(addr + MACB_MID);
269 else
270 id = readl_relaxed(addr + MACB_MID);
271
272 return MACB_BFEXT(IDNUM, id) >= 0x2;
273}
274
275static void macb_set_hwaddr(struct macb *bp)
276{
277 u32 bottom;
278 u16 top;
279
280 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
281 macb_or_gem_writel(bp, SA1B, bottom);
282 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
283 macb_or_gem_writel(bp, SA1T, top);
284
285
286 macb_or_gem_writel(bp, SA2B, 0);
287 macb_or_gem_writel(bp, SA2T, 0);
288 macb_or_gem_writel(bp, SA3B, 0);
289 macb_or_gem_writel(bp, SA3T, 0);
290 macb_or_gem_writel(bp, SA4B, 0);
291 macb_or_gem_writel(bp, SA4T, 0);
292}
293
294static void macb_get_hwaddr(struct macb *bp)
295{
296 u32 bottom;
297 u16 top;
298 u8 addr[6];
299 int i;
300
301
302 for (i = 0; i < 4; i++) {
303 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
304 top = macb_or_gem_readl(bp, SA1T + i * 8);
305
306 addr[0] = bottom & 0xff;
307 addr[1] = (bottom >> 8) & 0xff;
308 addr[2] = (bottom >> 16) & 0xff;
309 addr[3] = (bottom >> 24) & 0xff;
310 addr[4] = top & 0xff;
311 addr[5] = (top >> 8) & 0xff;
312
313 if (is_valid_ether_addr(addr)) {
314 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
315 return;
316 }
317 }
318
319 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
320 eth_hw_addr_random(bp->dev);
321}
322
323static int macb_mdio_wait_for_idle(struct macb *bp)
324{
325 u32 val;
326
327 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
328 1, MACB_MDIO_TIMEOUT);
329}
330
331static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
332{
333 struct macb *bp = bus->priv;
334 int status;
335
336 status = pm_runtime_get_sync(&bp->pdev->dev);
337 if (status < 0) {
338 pm_runtime_put_noidle(&bp->pdev->dev);
339 goto mdio_pm_exit;
340 }
341
342 status = macb_mdio_wait_for_idle(bp);
343 if (status < 0)
344 goto mdio_read_exit;
345
346 if (regnum & MII_ADDR_C45) {
347 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
348 | MACB_BF(RW, MACB_MAN_C45_ADDR)
349 | MACB_BF(PHYA, mii_id)
350 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
351 | MACB_BF(DATA, regnum & 0xFFFF)
352 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
353
354 status = macb_mdio_wait_for_idle(bp);
355 if (status < 0)
356 goto mdio_read_exit;
357
358 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
359 | MACB_BF(RW, MACB_MAN_C45_READ)
360 | MACB_BF(PHYA, mii_id)
361 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
362 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
363 } else {
364 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
365 | MACB_BF(RW, MACB_MAN_C22_READ)
366 | MACB_BF(PHYA, mii_id)
367 | MACB_BF(REGA, regnum)
368 | MACB_BF(CODE, MACB_MAN_C22_CODE)));
369 }
370
371 status = macb_mdio_wait_for_idle(bp);
372 if (status < 0)
373 goto mdio_read_exit;
374
375 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
376
377mdio_read_exit:
378 pm_runtime_mark_last_busy(&bp->pdev->dev);
379 pm_runtime_put_autosuspend(&bp->pdev->dev);
380mdio_pm_exit:
381 return status;
382}
383
384static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
385 u16 value)
386{
387 struct macb *bp = bus->priv;
388 int status;
389
390 status = pm_runtime_get_sync(&bp->pdev->dev);
391 if (status < 0) {
392 pm_runtime_put_noidle(&bp->pdev->dev);
393 goto mdio_pm_exit;
394 }
395
396 status = macb_mdio_wait_for_idle(bp);
397 if (status < 0)
398 goto mdio_write_exit;
399
400 if (regnum & MII_ADDR_C45) {
401 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
402 | MACB_BF(RW, MACB_MAN_C45_ADDR)
403 | MACB_BF(PHYA, mii_id)
404 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
405 | MACB_BF(DATA, regnum & 0xFFFF)
406 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
407
408 status = macb_mdio_wait_for_idle(bp);
409 if (status < 0)
410 goto mdio_write_exit;
411
412 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
413 | MACB_BF(RW, MACB_MAN_C45_WRITE)
414 | MACB_BF(PHYA, mii_id)
415 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
416 | MACB_BF(CODE, MACB_MAN_C45_CODE)
417 | MACB_BF(DATA, value)));
418 } else {
419 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
420 | MACB_BF(RW, MACB_MAN_C22_WRITE)
421 | MACB_BF(PHYA, mii_id)
422 | MACB_BF(REGA, regnum)
423 | MACB_BF(CODE, MACB_MAN_C22_CODE)
424 | MACB_BF(DATA, value)));
425 }
426
427 status = macb_mdio_wait_for_idle(bp);
428 if (status < 0)
429 goto mdio_write_exit;
430
431mdio_write_exit:
432 pm_runtime_mark_last_busy(&bp->pdev->dev);
433 pm_runtime_put_autosuspend(&bp->pdev->dev);
434mdio_pm_exit:
435 return status;
436}
437
438static void macb_init_buffers(struct macb *bp)
439{
440 struct macb_queue *queue;
441 unsigned int q;
442
443 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
444 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
445#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
446 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
447 queue_writel(queue, RBQPH,
448 upper_32_bits(queue->rx_ring_dma));
449#endif
450 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
451#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
452 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
453 queue_writel(queue, TBQPH,
454 upper_32_bits(queue->tx_ring_dma));
455#endif
456 }
457}
458
459
460
461
462
463
464
465static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
466{
467 long ferr, rate, rate_rounded;
468
469 if (!clk)
470 return;
471
472 switch (speed) {
473 case SPEED_10:
474 rate = 2500000;
475 break;
476 case SPEED_100:
477 rate = 25000000;
478 break;
479 case SPEED_1000:
480 rate = 125000000;
481 break;
482 default:
483 return;
484 }
485
486 rate_rounded = clk_round_rate(clk, rate);
487 if (rate_rounded < 0)
488 return;
489
490
491
492
493 ferr = abs(rate_rounded - rate);
494 ferr = DIV_ROUND_UP(ferr, rate / 100000);
495 if (ferr > 5)
496 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
497 rate);
498
499 if (clk_set_rate(clk, rate_rounded))
500 netdev_err(dev, "adjusting tx_clk failed.\n");
501}
502
503static void macb_validate(struct phylink_config *config,
504 unsigned long *supported,
505 struct phylink_link_state *state)
506{
507 struct net_device *ndev = to_net_dev(config->dev);
508 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
509 struct macb *bp = netdev_priv(ndev);
510
511
512 if (state->interface != PHY_INTERFACE_MODE_NA &&
513 state->interface != PHY_INTERFACE_MODE_MII &&
514 state->interface != PHY_INTERFACE_MODE_RMII &&
515 state->interface != PHY_INTERFACE_MODE_GMII &&
516 state->interface != PHY_INTERFACE_MODE_SGMII &&
517 !phy_interface_mode_is_rgmii(state->interface)) {
518 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
519 return;
520 }
521
522 if (!macb_is_gem(bp) &&
523 (state->interface == PHY_INTERFACE_MODE_GMII ||
524 phy_interface_mode_is_rgmii(state->interface))) {
525 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
526 return;
527 }
528
529 phylink_set_port_modes(mask);
530 phylink_set(mask, Autoneg);
531 phylink_set(mask, Asym_Pause);
532
533 phylink_set(mask, 10baseT_Half);
534 phylink_set(mask, 10baseT_Full);
535 phylink_set(mask, 100baseT_Half);
536 phylink_set(mask, 100baseT_Full);
537
538 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
539 (state->interface == PHY_INTERFACE_MODE_NA ||
540 state->interface == PHY_INTERFACE_MODE_GMII ||
541 state->interface == PHY_INTERFACE_MODE_SGMII ||
542 phy_interface_mode_is_rgmii(state->interface))) {
543 phylink_set(mask, 1000baseT_Full);
544 phylink_set(mask, 1000baseX_Full);
545
546 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
547 phylink_set(mask, 1000baseT_Half);
548 }
549
550 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
551 bitmap_and(state->advertising, state->advertising, mask,
552 __ETHTOOL_LINK_MODE_MASK_NBITS);
553}
554
555static void macb_mac_pcs_get_state(struct phylink_config *config,
556 struct phylink_link_state *state)
557{
558 state->link = 0;
559}
560
561static void macb_mac_an_restart(struct phylink_config *config)
562{
563
564}
565
566static void macb_mac_config(struct phylink_config *config, unsigned int mode,
567 const struct phylink_link_state *state)
568{
569 struct net_device *ndev = to_net_dev(config->dev);
570 struct macb *bp = netdev_priv(ndev);
571 unsigned long flags;
572 u32 old_ctrl, ctrl;
573
574 spin_lock_irqsave(&bp->lock, flags);
575
576 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
577
578 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
579 if (state->interface == PHY_INTERFACE_MODE_RMII)
580 ctrl |= MACB_BIT(RM9200_RMII);
581 } else {
582 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
583
584 if (state->interface == PHY_INTERFACE_MODE_SGMII)
585 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
586 }
587
588
589 if (old_ctrl ^ ctrl)
590 macb_or_gem_writel(bp, NCFGR, ctrl);
591
592 spin_unlock_irqrestore(&bp->lock, flags);
593}
594
595static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
596 phy_interface_t interface)
597{
598 struct net_device *ndev = to_net_dev(config->dev);
599 struct macb *bp = netdev_priv(ndev);
600 struct macb_queue *queue;
601 unsigned int q;
602 u32 ctrl;
603
604 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
605 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
606 queue_writel(queue, IDR,
607 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
608
609
610 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
611 macb_writel(bp, NCR, ctrl);
612
613 netif_tx_stop_all_queues(ndev);
614}
615
616static void macb_mac_link_up(struct phylink_config *config,
617 struct phy_device *phy,
618 unsigned int mode, phy_interface_t interface,
619 int speed, int duplex,
620 bool tx_pause, bool rx_pause)
621{
622 struct net_device *ndev = to_net_dev(config->dev);
623 struct macb *bp = netdev_priv(ndev);
624 struct macb_queue *queue;
625 unsigned long flags;
626 unsigned int q;
627 u32 ctrl;
628
629 spin_lock_irqsave(&bp->lock, flags);
630
631 ctrl = macb_or_gem_readl(bp, NCFGR);
632
633 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
634
635 if (speed == SPEED_100)
636 ctrl |= MACB_BIT(SPD);
637
638 if (duplex)
639 ctrl |= MACB_BIT(FD);
640
641 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
642 ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(PAE));
643
644 if (speed == SPEED_1000)
645 ctrl |= GEM_BIT(GBE);
646
647
648 if (tx_pause)
649 ctrl |= MACB_BIT(PAE);
650
651 macb_set_tx_clk(bp->tx_clk, speed, ndev);
652
653
654
655
656 bp->macbgem_ops.mog_init_rings(bp);
657 macb_init_buffers(bp);
658
659 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
660 queue_writel(queue, IER,
661 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
662 }
663
664 macb_or_gem_writel(bp, NCFGR, ctrl);
665
666 spin_unlock_irqrestore(&bp->lock, flags);
667
668
669 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
670
671 netif_tx_wake_all_queues(ndev);
672}
673
674static const struct phylink_mac_ops macb_phylink_ops = {
675 .validate = macb_validate,
676 .mac_pcs_get_state = macb_mac_pcs_get_state,
677 .mac_an_restart = macb_mac_an_restart,
678 .mac_config = macb_mac_config,
679 .mac_link_down = macb_mac_link_down,
680 .mac_link_up = macb_mac_link_up,
681};
682
683static bool macb_phy_handle_exists(struct device_node *dn)
684{
685 dn = of_parse_phandle(dn, "phy-handle", 0);
686 of_node_put(dn);
687 return dn != NULL;
688}
689
690static int macb_phylink_connect(struct macb *bp)
691{
692 struct device_node *dn = bp->pdev->dev.of_node;
693 struct net_device *dev = bp->dev;
694 struct phy_device *phydev;
695 int ret;
696
697 if (dn)
698 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
699
700 if (!dn || (ret && !macb_phy_handle_exists(dn))) {
701 phydev = phy_find_first(bp->mii_bus);
702 if (!phydev) {
703 netdev_err(dev, "no PHY found\n");
704 return -ENXIO;
705 }
706
707
708 ret = phylink_connect_phy(bp->phylink, phydev);
709 }
710
711 if (ret) {
712 netdev_err(dev, "Could not attach PHY (%d)\n", ret);
713 return ret;
714 }
715
716 phylink_start(bp->phylink);
717
718 return 0;
719}
720
721
722static int macb_mii_probe(struct net_device *dev)
723{
724 struct macb *bp = netdev_priv(dev);
725
726 bp->phylink_config.dev = &dev->dev;
727 bp->phylink_config.type = PHYLINK_NETDEV;
728
729 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
730 bp->phy_interface, &macb_phylink_ops);
731 if (IS_ERR(bp->phylink)) {
732 netdev_err(dev, "Could not create a phylink instance (%ld)\n",
733 PTR_ERR(bp->phylink));
734 return PTR_ERR(bp->phylink);
735 }
736
737 return 0;
738}
739
740static int macb_mdiobus_register(struct macb *bp)
741{
742 struct device_node *child, *np = bp->pdev->dev.of_node;
743
744 if (of_phy_is_fixed_link(np))
745 return mdiobus_register(bp->mii_bus);
746
747
748
749
750
751
752 for_each_available_child_of_node(np, child)
753 if (of_mdiobus_child_is_phy(child)) {
754
755
756
757 of_node_put(child);
758
759 return of_mdiobus_register(bp->mii_bus, np);
760 }
761
762 return mdiobus_register(bp->mii_bus);
763}
764
765static int macb_mii_init(struct macb *bp)
766{
767 int err = -ENXIO;
768
769
770 macb_writel(bp, NCR, MACB_BIT(MPE));
771
772 bp->mii_bus = mdiobus_alloc();
773 if (!bp->mii_bus) {
774 err = -ENOMEM;
775 goto err_out;
776 }
777
778 bp->mii_bus->name = "MACB_mii_bus";
779 bp->mii_bus->read = &macb_mdio_read;
780 bp->mii_bus->write = &macb_mdio_write;
781 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
782 bp->pdev->name, bp->pdev->id);
783 bp->mii_bus->priv = bp;
784 bp->mii_bus->parent = &bp->pdev->dev;
785
786 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
787
788 err = macb_mdiobus_register(bp);
789 if (err)
790 goto err_out_free_mdiobus;
791
792 err = macb_mii_probe(bp->dev);
793 if (err)
794 goto err_out_unregister_bus;
795
796 return 0;
797
798err_out_unregister_bus:
799 mdiobus_unregister(bp->mii_bus);
800err_out_free_mdiobus:
801 mdiobus_free(bp->mii_bus);
802err_out:
803 return err;
804}
805
806static void macb_update_stats(struct macb *bp)
807{
808 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
809 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
810 int offset = MACB_PFR;
811
812 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
813
814 for (; p < end; p++, offset += 4)
815 *p += bp->macb_reg_readl(bp, offset);
816}
817
818static int macb_halt_tx(struct macb *bp)
819{
820 unsigned long halt_time, timeout;
821 u32 status;
822
823 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
824
825 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
826 do {
827 halt_time = jiffies;
828 status = macb_readl(bp, TSR);
829 if (!(status & MACB_BIT(TGO)))
830 return 0;
831
832 udelay(250);
833 } while (time_before(halt_time, timeout));
834
835 return -ETIMEDOUT;
836}
837
838static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
839{
840 if (tx_skb->mapping) {
841 if (tx_skb->mapped_as_page)
842 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
843 tx_skb->size, DMA_TO_DEVICE);
844 else
845 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
846 tx_skb->size, DMA_TO_DEVICE);
847 tx_skb->mapping = 0;
848 }
849
850 if (tx_skb->skb) {
851 dev_kfree_skb_any(tx_skb->skb);
852 tx_skb->skb = NULL;
853 }
854}
855
856static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
857{
858#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
859 struct macb_dma_desc_64 *desc_64;
860
861 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
862 desc_64 = macb_64b_desc(bp, desc);
863 desc_64->addrh = upper_32_bits(addr);
864
865
866
867
868 dma_wmb();
869 }
870#endif
871 desc->addr = lower_32_bits(addr);
872}
873
874static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
875{
876 dma_addr_t addr = 0;
877#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
878 struct macb_dma_desc_64 *desc_64;
879
880 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
881 desc_64 = macb_64b_desc(bp, desc);
882 addr = ((u64)(desc_64->addrh) << 32);
883 }
884#endif
885 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
886 return addr;
887}
888
889static void macb_tx_error_task(struct work_struct *work)
890{
891 struct macb_queue *queue = container_of(work, struct macb_queue,
892 tx_error_task);
893 struct macb *bp = queue->bp;
894 struct macb_tx_skb *tx_skb;
895 struct macb_dma_desc *desc;
896 struct sk_buff *skb;
897 unsigned int tail;
898 unsigned long flags;
899
900 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
901 (unsigned int)(queue - bp->queues),
902 queue->tx_tail, queue->tx_head);
903
904
905
906
907
908
909
910 spin_lock_irqsave(&bp->lock, flags);
911
912
913 netif_tx_stop_all_queues(bp->dev);
914
915
916
917
918
919 if (macb_halt_tx(bp))
920
921 netdev_err(bp->dev, "BUG: halt tx timed out\n");
922
923
924
925
926 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
927 u32 ctrl;
928
929 desc = macb_tx_desc(queue, tail);
930 ctrl = desc->ctrl;
931 tx_skb = macb_tx_skb(queue, tail);
932 skb = tx_skb->skb;
933
934 if (ctrl & MACB_BIT(TX_USED)) {
935
936 while (!skb) {
937 macb_tx_unmap(bp, tx_skb);
938 tail++;
939 tx_skb = macb_tx_skb(queue, tail);
940 skb = tx_skb->skb;
941 }
942
943
944
945
946 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
947 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
948 macb_tx_ring_wrap(bp, tail),
949 skb->data);
950 bp->dev->stats.tx_packets++;
951 queue->stats.tx_packets++;
952 bp->dev->stats.tx_bytes += skb->len;
953 queue->stats.tx_bytes += skb->len;
954 }
955 } else {
956
957
958
959
960 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
961 netdev_err(bp->dev,
962 "BUG: TX buffers exhausted mid-frame\n");
963
964 desc->ctrl = ctrl | MACB_BIT(TX_USED);
965 }
966
967 macb_tx_unmap(bp, tx_skb);
968 }
969
970
971 desc = macb_tx_desc(queue, 0);
972 macb_set_addr(bp, desc, 0);
973 desc->ctrl = MACB_BIT(TX_USED);
974
975
976 wmb();
977
978
979 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
980#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
981 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
982 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
983#endif
984
985 queue->tx_head = 0;
986 queue->tx_tail = 0;
987
988
989 macb_writel(bp, TSR, macb_readl(bp, TSR));
990 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
991
992
993 netif_tx_start_all_queues(bp->dev);
994 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
995
996 spin_unlock_irqrestore(&bp->lock, flags);
997}
998
999static void macb_tx_interrupt(struct macb_queue *queue)
1000{
1001 unsigned int tail;
1002 unsigned int head;
1003 u32 status;
1004 struct macb *bp = queue->bp;
1005 u16 queue_index = queue - bp->queues;
1006
1007 status = macb_readl(bp, TSR);
1008 macb_writel(bp, TSR, status);
1009
1010 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1011 queue_writel(queue, ISR, MACB_BIT(TCOMP));
1012
1013 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
1014 (unsigned long)status);
1015
1016 head = queue->tx_head;
1017 for (tail = queue->tx_tail; tail != head; tail++) {
1018 struct macb_tx_skb *tx_skb;
1019 struct sk_buff *skb;
1020 struct macb_dma_desc *desc;
1021 u32 ctrl;
1022
1023 desc = macb_tx_desc(queue, tail);
1024
1025
1026 rmb();
1027
1028 ctrl = desc->ctrl;
1029
1030
1031
1032
1033 if (!(ctrl & MACB_BIT(TX_USED)))
1034 break;
1035
1036
1037 for (;; tail++) {
1038 tx_skb = macb_tx_skb(queue, tail);
1039 skb = tx_skb->skb;
1040
1041
1042 if (skb) {
1043 if (unlikely(skb_shinfo(skb)->tx_flags &
1044 SKBTX_HW_TSTAMP) &&
1045 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
1046
1047
1048
1049 tx_skb->skb = NULL;
1050 }
1051 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1052 macb_tx_ring_wrap(bp, tail),
1053 skb->data);
1054 bp->dev->stats.tx_packets++;
1055 queue->stats.tx_packets++;
1056 bp->dev->stats.tx_bytes += skb->len;
1057 queue->stats.tx_bytes += skb->len;
1058 }
1059
1060
1061 macb_tx_unmap(bp, tx_skb);
1062
1063
1064
1065
1066
1067 if (skb)
1068 break;
1069 }
1070 }
1071
1072 queue->tx_tail = tail;
1073 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1074 CIRC_CNT(queue->tx_head, queue->tx_tail,
1075 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1076 netif_wake_subqueue(bp->dev, queue_index);
1077}
1078
1079static void gem_rx_refill(struct macb_queue *queue)
1080{
1081 unsigned int entry;
1082 struct sk_buff *skb;
1083 dma_addr_t paddr;
1084 struct macb *bp = queue->bp;
1085 struct macb_dma_desc *desc;
1086
1087 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1088 bp->rx_ring_size) > 0) {
1089 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1090
1091
1092 rmb();
1093
1094 queue->rx_prepared_head++;
1095 desc = macb_rx_desc(queue, entry);
1096
1097 if (!queue->rx_skbuff[entry]) {
1098
1099 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1100 if (unlikely(!skb)) {
1101 netdev_err(bp->dev,
1102 "Unable to allocate sk_buff\n");
1103 break;
1104 }
1105
1106
1107 paddr = dma_map_single(&bp->pdev->dev, skb->data,
1108 bp->rx_buffer_size,
1109 DMA_FROM_DEVICE);
1110 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1111 dev_kfree_skb(skb);
1112 break;
1113 }
1114
1115 queue->rx_skbuff[entry] = skb;
1116
1117 if (entry == bp->rx_ring_size - 1)
1118 paddr |= MACB_BIT(RX_WRAP);
1119 desc->ctrl = 0;
1120
1121
1122
1123 dma_wmb();
1124 macb_set_addr(bp, desc, paddr);
1125
1126
1127 skb_reserve(skb, NET_IP_ALIGN);
1128 } else {
1129 desc->ctrl = 0;
1130 dma_wmb();
1131 desc->addr &= ~MACB_BIT(RX_USED);
1132 }
1133 }
1134
1135
1136 wmb();
1137
1138 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1139 queue, queue->rx_prepared_head, queue->rx_tail);
1140}
1141
1142
1143static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1144 unsigned int end)
1145{
1146 unsigned int frag;
1147
1148 for (frag = begin; frag != end; frag++) {
1149 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1150
1151 desc->addr &= ~MACB_BIT(RX_USED);
1152 }
1153
1154
1155 wmb();
1156
1157
1158
1159
1160
1161}
1162
1163static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1164 int budget)
1165{
1166 struct macb *bp = queue->bp;
1167 unsigned int len;
1168 unsigned int entry;
1169 struct sk_buff *skb;
1170 struct macb_dma_desc *desc;
1171 int count = 0;
1172
1173 while (count < budget) {
1174 u32 ctrl;
1175 dma_addr_t addr;
1176 bool rxused;
1177
1178 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1179 desc = macb_rx_desc(queue, entry);
1180
1181
1182 rmb();
1183
1184 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1185 addr = macb_get_addr(bp, desc);
1186
1187 if (!rxused)
1188 break;
1189
1190
1191 dma_rmb();
1192
1193 ctrl = desc->ctrl;
1194
1195 queue->rx_tail++;
1196 count++;
1197
1198 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1199 netdev_err(bp->dev,
1200 "not whole frame pointed by descriptor\n");
1201 bp->dev->stats.rx_dropped++;
1202 queue->stats.rx_dropped++;
1203 break;
1204 }
1205 skb = queue->rx_skbuff[entry];
1206 if (unlikely(!skb)) {
1207 netdev_err(bp->dev,
1208 "inconsistent Rx descriptor chain\n");
1209 bp->dev->stats.rx_dropped++;
1210 queue->stats.rx_dropped++;
1211 break;
1212 }
1213
1214 queue->rx_skbuff[entry] = NULL;
1215 len = ctrl & bp->rx_frm_len_mask;
1216
1217 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1218
1219 skb_put(skb, len);
1220 dma_unmap_single(&bp->pdev->dev, addr,
1221 bp->rx_buffer_size, DMA_FROM_DEVICE);
1222
1223 skb->protocol = eth_type_trans(skb, bp->dev);
1224 skb_checksum_none_assert(skb);
1225 if (bp->dev->features & NETIF_F_RXCSUM &&
1226 !(bp->dev->flags & IFF_PROMISC) &&
1227 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1228 skb->ip_summed = CHECKSUM_UNNECESSARY;
1229
1230 bp->dev->stats.rx_packets++;
1231 queue->stats.rx_packets++;
1232 bp->dev->stats.rx_bytes += skb->len;
1233 queue->stats.rx_bytes += skb->len;
1234
1235 gem_ptp_do_rxstamp(bp, skb, desc);
1236
1237#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1238 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1239 skb->len, skb->csum);
1240 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1241 skb_mac_header(skb), 16, true);
1242 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1243 skb->data, 32, true);
1244#endif
1245
1246 napi_gro_receive(napi, skb);
1247 }
1248
1249 gem_rx_refill(queue);
1250
1251 return count;
1252}
1253
1254static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1255 unsigned int first_frag, unsigned int last_frag)
1256{
1257 unsigned int len;
1258 unsigned int frag;
1259 unsigned int offset;
1260 struct sk_buff *skb;
1261 struct macb_dma_desc *desc;
1262 struct macb *bp = queue->bp;
1263
1264 desc = macb_rx_desc(queue, last_frag);
1265 len = desc->ctrl & bp->rx_frm_len_mask;
1266
1267 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1268 macb_rx_ring_wrap(bp, first_frag),
1269 macb_rx_ring_wrap(bp, last_frag), len);
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1280 if (!skb) {
1281 bp->dev->stats.rx_dropped++;
1282 for (frag = first_frag; ; frag++) {
1283 desc = macb_rx_desc(queue, frag);
1284 desc->addr &= ~MACB_BIT(RX_USED);
1285 if (frag == last_frag)
1286 break;
1287 }
1288
1289
1290 wmb();
1291
1292 return 1;
1293 }
1294
1295 offset = 0;
1296 len += NET_IP_ALIGN;
1297 skb_checksum_none_assert(skb);
1298 skb_put(skb, len);
1299
1300 for (frag = first_frag; ; frag++) {
1301 unsigned int frag_len = bp->rx_buffer_size;
1302
1303 if (offset + frag_len > len) {
1304 if (unlikely(frag != last_frag)) {
1305 dev_kfree_skb_any(skb);
1306 return -1;
1307 }
1308 frag_len = len - offset;
1309 }
1310 skb_copy_to_linear_data_offset(skb, offset,
1311 macb_rx_buffer(queue, frag),
1312 frag_len);
1313 offset += bp->rx_buffer_size;
1314 desc = macb_rx_desc(queue, frag);
1315 desc->addr &= ~MACB_BIT(RX_USED);
1316
1317 if (frag == last_frag)
1318 break;
1319 }
1320
1321
1322 wmb();
1323
1324 __skb_pull(skb, NET_IP_ALIGN);
1325 skb->protocol = eth_type_trans(skb, bp->dev);
1326
1327 bp->dev->stats.rx_packets++;
1328 bp->dev->stats.rx_bytes += skb->len;
1329 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1330 skb->len, skb->csum);
1331 napi_gro_receive(napi, skb);
1332
1333 return 0;
1334}
1335
1336static inline void macb_init_rx_ring(struct macb_queue *queue)
1337{
1338 struct macb *bp = queue->bp;
1339 dma_addr_t addr;
1340 struct macb_dma_desc *desc = NULL;
1341 int i;
1342
1343 addr = queue->rx_buffers_dma;
1344 for (i = 0; i < bp->rx_ring_size; i++) {
1345 desc = macb_rx_desc(queue, i);
1346 macb_set_addr(bp, desc, addr);
1347 desc->ctrl = 0;
1348 addr += bp->rx_buffer_size;
1349 }
1350 desc->addr |= MACB_BIT(RX_WRAP);
1351 queue->rx_tail = 0;
1352}
1353
1354static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1355 int budget)
1356{
1357 struct macb *bp = queue->bp;
1358 bool reset_rx_queue = false;
1359 int received = 0;
1360 unsigned int tail;
1361 int first_frag = -1;
1362
1363 for (tail = queue->rx_tail; budget > 0; tail++) {
1364 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1365 u32 ctrl;
1366
1367
1368 rmb();
1369
1370 if (!(desc->addr & MACB_BIT(RX_USED)))
1371 break;
1372
1373
1374 dma_rmb();
1375
1376 ctrl = desc->ctrl;
1377
1378 if (ctrl & MACB_BIT(RX_SOF)) {
1379 if (first_frag != -1)
1380 discard_partial_frame(queue, first_frag, tail);
1381 first_frag = tail;
1382 }
1383
1384 if (ctrl & MACB_BIT(RX_EOF)) {
1385 int dropped;
1386
1387 if (unlikely(first_frag == -1)) {
1388 reset_rx_queue = true;
1389 continue;
1390 }
1391
1392 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1393 first_frag = -1;
1394 if (unlikely(dropped < 0)) {
1395 reset_rx_queue = true;
1396 continue;
1397 }
1398 if (!dropped) {
1399 received++;
1400 budget--;
1401 }
1402 }
1403 }
1404
1405 if (unlikely(reset_rx_queue)) {
1406 unsigned long flags;
1407 u32 ctrl;
1408
1409 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1410
1411 spin_lock_irqsave(&bp->lock, flags);
1412
1413 ctrl = macb_readl(bp, NCR);
1414 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1415
1416 macb_init_rx_ring(queue);
1417 queue_writel(queue, RBQP, queue->rx_ring_dma);
1418
1419 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1420
1421 spin_unlock_irqrestore(&bp->lock, flags);
1422 return received;
1423 }
1424
1425 if (first_frag != -1)
1426 queue->rx_tail = first_frag;
1427 else
1428 queue->rx_tail = tail;
1429
1430 return received;
1431}
1432
1433static int macb_poll(struct napi_struct *napi, int budget)
1434{
1435 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1436 struct macb *bp = queue->bp;
1437 int work_done;
1438 u32 status;
1439
1440 status = macb_readl(bp, RSR);
1441 macb_writel(bp, RSR, status);
1442
1443 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1444 (unsigned long)status, budget);
1445
1446 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1447 if (work_done < budget) {
1448 napi_complete_done(napi, work_done);
1449
1450
1451 status = macb_readl(bp, RSR);
1452 if (status) {
1453 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1454 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1455 napi_reschedule(napi);
1456 } else {
1457 queue_writel(queue, IER, bp->rx_intr_mask);
1458 }
1459 }
1460
1461
1462
1463 return work_done;
1464}
1465
1466static void macb_hresp_error_task(unsigned long data)
1467{
1468 struct macb *bp = (struct macb *)data;
1469 struct net_device *dev = bp->dev;
1470 struct macb_queue *queue = bp->queues;
1471 unsigned int q;
1472 u32 ctrl;
1473
1474 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1475 queue_writel(queue, IDR, bp->rx_intr_mask |
1476 MACB_TX_INT_FLAGS |
1477 MACB_BIT(HRESP));
1478 }
1479 ctrl = macb_readl(bp, NCR);
1480 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1481 macb_writel(bp, NCR, ctrl);
1482
1483 netif_tx_stop_all_queues(dev);
1484 netif_carrier_off(dev);
1485
1486 bp->macbgem_ops.mog_init_rings(bp);
1487
1488
1489 macb_init_buffers(bp);
1490
1491
1492 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1493 queue_writel(queue, IER,
1494 bp->rx_intr_mask |
1495 MACB_TX_INT_FLAGS |
1496 MACB_BIT(HRESP));
1497
1498 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1499 macb_writel(bp, NCR, ctrl);
1500
1501 netif_carrier_on(dev);
1502 netif_tx_start_all_queues(dev);
1503}
1504
1505static void macb_tx_restart(struct macb_queue *queue)
1506{
1507 unsigned int head = queue->tx_head;
1508 unsigned int tail = queue->tx_tail;
1509 struct macb *bp = queue->bp;
1510
1511 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1512 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1513
1514 if (head == tail)
1515 return;
1516
1517 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1518}
1519
1520static irqreturn_t macb_interrupt(int irq, void *dev_id)
1521{
1522 struct macb_queue *queue = dev_id;
1523 struct macb *bp = queue->bp;
1524 struct net_device *dev = bp->dev;
1525 u32 status, ctrl;
1526
1527 status = queue_readl(queue, ISR);
1528
1529 if (unlikely(!status))
1530 return IRQ_NONE;
1531
1532 spin_lock(&bp->lock);
1533
1534 while (status) {
1535
1536 if (unlikely(!netif_running(dev))) {
1537 queue_writel(queue, IDR, -1);
1538 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1539 queue_writel(queue, ISR, -1);
1540 break;
1541 }
1542
1543 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1544 (unsigned int)(queue - bp->queues),
1545 (unsigned long)status);
1546
1547 if (status & bp->rx_intr_mask) {
1548
1549
1550
1551
1552
1553
1554 queue_writel(queue, IDR, bp->rx_intr_mask);
1555 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1556 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1557
1558 if (napi_schedule_prep(&queue->napi)) {
1559 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1560 __napi_schedule(&queue->napi);
1561 }
1562 }
1563
1564 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1565 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1566 schedule_work(&queue->tx_error_task);
1567
1568 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1569 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1570
1571 break;
1572 }
1573
1574 if (status & MACB_BIT(TCOMP))
1575 macb_tx_interrupt(queue);
1576
1577 if (status & MACB_BIT(TXUBR))
1578 macb_tx_restart(queue);
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591 if (status & MACB_BIT(RXUBR)) {
1592 ctrl = macb_readl(bp, NCR);
1593 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1594 wmb();
1595 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1596
1597 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1598 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1599 }
1600
1601 if (status & MACB_BIT(ISR_ROVR)) {
1602
1603 if (macb_is_gem(bp))
1604 bp->hw_stats.gem.rx_overruns++;
1605 else
1606 bp->hw_stats.macb.rx_overruns++;
1607
1608 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1609 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1610 }
1611
1612 if (status & MACB_BIT(HRESP)) {
1613 tasklet_schedule(&bp->hresp_err_tasklet);
1614 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1615
1616 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1617 queue_writel(queue, ISR, MACB_BIT(HRESP));
1618 }
1619 status = queue_readl(queue, ISR);
1620 }
1621
1622 spin_unlock(&bp->lock);
1623
1624 return IRQ_HANDLED;
1625}
1626
1627#ifdef CONFIG_NET_POLL_CONTROLLER
1628
1629
1630
1631static void macb_poll_controller(struct net_device *dev)
1632{
1633 struct macb *bp = netdev_priv(dev);
1634 struct macb_queue *queue;
1635 unsigned long flags;
1636 unsigned int q;
1637
1638 local_irq_save(flags);
1639 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1640 macb_interrupt(dev->irq, queue);
1641 local_irq_restore(flags);
1642}
1643#endif
1644
1645static unsigned int macb_tx_map(struct macb *bp,
1646 struct macb_queue *queue,
1647 struct sk_buff *skb,
1648 unsigned int hdrlen)
1649{
1650 dma_addr_t mapping;
1651 unsigned int len, entry, i, tx_head = queue->tx_head;
1652 struct macb_tx_skb *tx_skb = NULL;
1653 struct macb_dma_desc *desc;
1654 unsigned int offset, size, count = 0;
1655 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1656 unsigned int eof = 1, mss_mfs = 0;
1657 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1658
1659
1660 if (skb_shinfo(skb)->gso_size != 0) {
1661 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1662
1663 lso_ctrl = MACB_LSO_UFO_ENABLE;
1664 else
1665
1666 lso_ctrl = MACB_LSO_TSO_ENABLE;
1667 }
1668
1669
1670 len = skb_headlen(skb);
1671
1672
1673 size = hdrlen;
1674
1675 offset = 0;
1676 while (len) {
1677 entry = macb_tx_ring_wrap(bp, tx_head);
1678 tx_skb = &queue->tx_skb[entry];
1679
1680 mapping = dma_map_single(&bp->pdev->dev,
1681 skb->data + offset,
1682 size, DMA_TO_DEVICE);
1683 if (dma_mapping_error(&bp->pdev->dev, mapping))
1684 goto dma_error;
1685
1686
1687 tx_skb->skb = NULL;
1688 tx_skb->mapping = mapping;
1689 tx_skb->size = size;
1690 tx_skb->mapped_as_page = false;
1691
1692 len -= size;
1693 offset += size;
1694 count++;
1695 tx_head++;
1696
1697 size = min(len, bp->max_tx_length);
1698 }
1699
1700
1701 for (f = 0; f < nr_frags; f++) {
1702 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1703
1704 len = skb_frag_size(frag);
1705 offset = 0;
1706 while (len) {
1707 size = min(len, bp->max_tx_length);
1708 entry = macb_tx_ring_wrap(bp, tx_head);
1709 tx_skb = &queue->tx_skb[entry];
1710
1711 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1712 offset, size, DMA_TO_DEVICE);
1713 if (dma_mapping_error(&bp->pdev->dev, mapping))
1714 goto dma_error;
1715
1716
1717 tx_skb->skb = NULL;
1718 tx_skb->mapping = mapping;
1719 tx_skb->size = size;
1720 tx_skb->mapped_as_page = true;
1721
1722 len -= size;
1723 offset += size;
1724 count++;
1725 tx_head++;
1726 }
1727 }
1728
1729
1730 if (unlikely(!tx_skb)) {
1731 netdev_err(bp->dev, "BUG! empty skb!\n");
1732 return 0;
1733 }
1734
1735
1736 tx_skb->skb = skb;
1737
1738
1739
1740
1741
1742
1743
1744
1745 i = tx_head;
1746 entry = macb_tx_ring_wrap(bp, i);
1747 ctrl = MACB_BIT(TX_USED);
1748 desc = macb_tx_desc(queue, entry);
1749 desc->ctrl = ctrl;
1750
1751 if (lso_ctrl) {
1752 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1753
1754 mss_mfs = skb_shinfo(skb)->gso_size +
1755 skb_transport_offset(skb) +
1756 ETH_FCS_LEN;
1757 else {
1758 mss_mfs = skb_shinfo(skb)->gso_size;
1759
1760
1761
1762 seq_ctrl = 0;
1763 }
1764 }
1765
1766 do {
1767 i--;
1768 entry = macb_tx_ring_wrap(bp, i);
1769 tx_skb = &queue->tx_skb[entry];
1770 desc = macb_tx_desc(queue, entry);
1771
1772 ctrl = (u32)tx_skb->size;
1773 if (eof) {
1774 ctrl |= MACB_BIT(TX_LAST);
1775 eof = 0;
1776 }
1777 if (unlikely(entry == (bp->tx_ring_size - 1)))
1778 ctrl |= MACB_BIT(TX_WRAP);
1779
1780
1781 if (i == queue->tx_head) {
1782 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1783 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1784 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
1785 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
1786 ctrl |= MACB_BIT(TX_NOCRC);
1787 } else
1788
1789
1790
1791 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1792
1793
1794 macb_set_addr(bp, desc, tx_skb->mapping);
1795
1796
1797
1798 wmb();
1799 desc->ctrl = ctrl;
1800 } while (i != queue->tx_head);
1801
1802 queue->tx_head = tx_head;
1803
1804 return count;
1805
1806dma_error:
1807 netdev_err(bp->dev, "TX DMA map failed\n");
1808
1809 for (i = queue->tx_head; i != tx_head; i++) {
1810 tx_skb = macb_tx_skb(queue, i);
1811
1812 macb_tx_unmap(bp, tx_skb);
1813 }
1814
1815 return 0;
1816}
1817
1818static netdev_features_t macb_features_check(struct sk_buff *skb,
1819 struct net_device *dev,
1820 netdev_features_t features)
1821{
1822 unsigned int nr_frags, f;
1823 unsigned int hdrlen;
1824
1825
1826
1827
1828 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
1829 return features;
1830
1831
1832 hdrlen = skb_transport_offset(skb);
1833
1834
1835
1836
1837
1838 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1839 return features & ~MACB_NETIF_LSO;
1840
1841 nr_frags = skb_shinfo(skb)->nr_frags;
1842
1843 nr_frags--;
1844 for (f = 0; f < nr_frags; f++) {
1845 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1846
1847 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1848 return features & ~MACB_NETIF_LSO;
1849 }
1850 return features;
1851}
1852
1853static inline int macb_clear_csum(struct sk_buff *skb)
1854{
1855
1856 if (skb->ip_summed != CHECKSUM_PARTIAL)
1857 return 0;
1858
1859
1860 if (unlikely(skb_cow_head(skb, 0)))
1861 return -1;
1862
1863
1864
1865
1866
1867 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1868 return 0;
1869}
1870
1871static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1872{
1873 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
1874 int padlen = ETH_ZLEN - (*skb)->len;
1875 int headroom = skb_headroom(*skb);
1876 int tailroom = skb_tailroom(*skb);
1877 struct sk_buff *nskb;
1878 u32 fcs;
1879
1880 if (!(ndev->features & NETIF_F_HW_CSUM) ||
1881 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
1882 skb_shinfo(*skb)->gso_size)
1883 return 0;
1884
1885 if (padlen <= 0) {
1886
1887 if (tailroom >= ETH_FCS_LEN)
1888 goto add_fcs;
1889
1890 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
1891 padlen = 0;
1892
1893 else
1894 padlen = ETH_FCS_LEN;
1895 } else {
1896
1897 padlen += ETH_FCS_LEN;
1898 }
1899
1900 if (!cloned && headroom + tailroom >= padlen) {
1901 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
1902 skb_set_tail_pointer(*skb, (*skb)->len);
1903 } else {
1904 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
1905 if (!nskb)
1906 return -ENOMEM;
1907
1908 dev_consume_skb_any(*skb);
1909 *skb = nskb;
1910 }
1911
1912 if (padlen > ETH_FCS_LEN)
1913 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
1914
1915add_fcs:
1916
1917 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
1918 fcs = ~fcs;
1919
1920 skb_put_u8(*skb, fcs & 0xff);
1921 skb_put_u8(*skb, (fcs >> 8) & 0xff);
1922 skb_put_u8(*skb, (fcs >> 16) & 0xff);
1923 skb_put_u8(*skb, (fcs >> 24) & 0xff);
1924
1925 return 0;
1926}
1927
1928static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1929{
1930 u16 queue_index = skb_get_queue_mapping(skb);
1931 struct macb *bp = netdev_priv(dev);
1932 struct macb_queue *queue = &bp->queues[queue_index];
1933 unsigned long flags;
1934 unsigned int desc_cnt, nr_frags, frag_size, f;
1935 unsigned int hdrlen;
1936 bool is_lso, is_udp = 0;
1937 netdev_tx_t ret = NETDEV_TX_OK;
1938
1939 if (macb_clear_csum(skb)) {
1940 dev_kfree_skb_any(skb);
1941 return ret;
1942 }
1943
1944 if (macb_pad_and_fcs(&skb, dev)) {
1945 dev_kfree_skb_any(skb);
1946 return ret;
1947 }
1948
1949 is_lso = (skb_shinfo(skb)->gso_size != 0);
1950
1951 if (is_lso) {
1952 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1953
1954
1955 if (is_udp)
1956
1957 hdrlen = skb_transport_offset(skb);
1958 else
1959 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1960 if (skb_headlen(skb) < hdrlen) {
1961 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1962
1963 return NETDEV_TX_BUSY;
1964 }
1965 } else
1966 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1967
1968#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1969 netdev_vdbg(bp->dev,
1970 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1971 queue_index, skb->len, skb->head, skb->data,
1972 skb_tail_pointer(skb), skb_end_pointer(skb));
1973 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1974 skb->data, 16, true);
1975#endif
1976
1977
1978
1979
1980
1981 if (is_lso && (skb_headlen(skb) > hdrlen))
1982
1983 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1984 else
1985 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1986 nr_frags = skb_shinfo(skb)->nr_frags;
1987 for (f = 0; f < nr_frags; f++) {
1988 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1989 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1990 }
1991
1992 spin_lock_irqsave(&bp->lock, flags);
1993
1994
1995 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1996 bp->tx_ring_size) < desc_cnt) {
1997 netif_stop_subqueue(dev, queue_index);
1998 spin_unlock_irqrestore(&bp->lock, flags);
1999 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2000 queue->tx_head, queue->tx_tail);
2001 return NETDEV_TX_BUSY;
2002 }
2003
2004
2005 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
2006 dev_kfree_skb_any(skb);
2007 goto unlock;
2008 }
2009
2010
2011 wmb();
2012 skb_tx_timestamp(skb);
2013
2014 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2015
2016 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2017 netif_stop_subqueue(dev, queue_index);
2018
2019unlock:
2020 spin_unlock_irqrestore(&bp->lock, flags);
2021
2022 return ret;
2023}
2024
2025static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
2026{
2027 if (!macb_is_gem(bp)) {
2028 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2029 } else {
2030 bp->rx_buffer_size = size;
2031
2032 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2033 netdev_dbg(bp->dev,
2034 "RX buffer must be multiple of %d bytes, expanding\n",
2035 RX_BUFFER_MULTIPLE);
2036 bp->rx_buffer_size =
2037 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2038 }
2039 }
2040
2041 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2042 bp->dev->mtu, bp->rx_buffer_size);
2043}
2044
2045static void gem_free_rx_buffers(struct macb *bp)
2046{
2047 struct sk_buff *skb;
2048 struct macb_dma_desc *desc;
2049 struct macb_queue *queue;
2050 dma_addr_t addr;
2051 unsigned int q;
2052 int i;
2053
2054 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2055 if (!queue->rx_skbuff)
2056 continue;
2057
2058 for (i = 0; i < bp->rx_ring_size; i++) {
2059 skb = queue->rx_skbuff[i];
2060
2061 if (!skb)
2062 continue;
2063
2064 desc = macb_rx_desc(queue, i);
2065 addr = macb_get_addr(bp, desc);
2066
2067 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2068 DMA_FROM_DEVICE);
2069 dev_kfree_skb_any(skb);
2070 skb = NULL;
2071 }
2072
2073 kfree(queue->rx_skbuff);
2074 queue->rx_skbuff = NULL;
2075 }
2076}
2077
2078static void macb_free_rx_buffers(struct macb *bp)
2079{
2080 struct macb_queue *queue = &bp->queues[0];
2081
2082 if (queue->rx_buffers) {
2083 dma_free_coherent(&bp->pdev->dev,
2084 bp->rx_ring_size * bp->rx_buffer_size,
2085 queue->rx_buffers, queue->rx_buffers_dma);
2086 queue->rx_buffers = NULL;
2087 }
2088}
2089
2090static void macb_free_consistent(struct macb *bp)
2091{
2092 struct macb_queue *queue;
2093 unsigned int q;
2094 int size;
2095
2096 bp->macbgem_ops.mog_free_rx_buffers(bp);
2097
2098 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2099 kfree(queue->tx_skb);
2100 queue->tx_skb = NULL;
2101 if (queue->tx_ring) {
2102 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2103 dma_free_coherent(&bp->pdev->dev, size,
2104 queue->tx_ring, queue->tx_ring_dma);
2105 queue->tx_ring = NULL;
2106 }
2107 if (queue->rx_ring) {
2108 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2109 dma_free_coherent(&bp->pdev->dev, size,
2110 queue->rx_ring, queue->rx_ring_dma);
2111 queue->rx_ring = NULL;
2112 }
2113 }
2114}
2115
2116static int gem_alloc_rx_buffers(struct macb *bp)
2117{
2118 struct macb_queue *queue;
2119 unsigned int q;
2120 int size;
2121
2122 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2123 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2124 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2125 if (!queue->rx_skbuff)
2126 return -ENOMEM;
2127 else
2128 netdev_dbg(bp->dev,
2129 "Allocated %d RX struct sk_buff entries at %p\n",
2130 bp->rx_ring_size, queue->rx_skbuff);
2131 }
2132 return 0;
2133}
2134
2135static int macb_alloc_rx_buffers(struct macb *bp)
2136{
2137 struct macb_queue *queue = &bp->queues[0];
2138 int size;
2139
2140 size = bp->rx_ring_size * bp->rx_buffer_size;
2141 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2142 &queue->rx_buffers_dma, GFP_KERNEL);
2143 if (!queue->rx_buffers)
2144 return -ENOMEM;
2145
2146 netdev_dbg(bp->dev,
2147 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2148 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2149 return 0;
2150}
2151
2152static int macb_alloc_consistent(struct macb *bp)
2153{
2154 struct macb_queue *queue;
2155 unsigned int q;
2156 int size;
2157
2158 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2159 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2160 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2161 &queue->tx_ring_dma,
2162 GFP_KERNEL);
2163 if (!queue->tx_ring)
2164 goto out_err;
2165 netdev_dbg(bp->dev,
2166 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2167 q, size, (unsigned long)queue->tx_ring_dma,
2168 queue->tx_ring);
2169
2170 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2171 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2172 if (!queue->tx_skb)
2173 goto out_err;
2174
2175 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2176 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2177 &queue->rx_ring_dma, GFP_KERNEL);
2178 if (!queue->rx_ring)
2179 goto out_err;
2180 netdev_dbg(bp->dev,
2181 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2182 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2183 }
2184 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2185 goto out_err;
2186
2187 return 0;
2188
2189out_err:
2190 macb_free_consistent(bp);
2191 return -ENOMEM;
2192}
2193
2194static void gem_init_rings(struct macb *bp)
2195{
2196 struct macb_queue *queue;
2197 struct macb_dma_desc *desc = NULL;
2198 unsigned int q;
2199 int i;
2200
2201 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2202 for (i = 0; i < bp->tx_ring_size; i++) {
2203 desc = macb_tx_desc(queue, i);
2204 macb_set_addr(bp, desc, 0);
2205 desc->ctrl = MACB_BIT(TX_USED);
2206 }
2207 desc->ctrl |= MACB_BIT(TX_WRAP);
2208 queue->tx_head = 0;
2209 queue->tx_tail = 0;
2210
2211 queue->rx_tail = 0;
2212 queue->rx_prepared_head = 0;
2213
2214 gem_rx_refill(queue);
2215 }
2216
2217}
2218
2219static void macb_init_rings(struct macb *bp)
2220{
2221 int i;
2222 struct macb_dma_desc *desc = NULL;
2223
2224 macb_init_rx_ring(&bp->queues[0]);
2225
2226 for (i = 0; i < bp->tx_ring_size; i++) {
2227 desc = macb_tx_desc(&bp->queues[0], i);
2228 macb_set_addr(bp, desc, 0);
2229 desc->ctrl = MACB_BIT(TX_USED);
2230 }
2231 bp->queues[0].tx_head = 0;
2232 bp->queues[0].tx_tail = 0;
2233 desc->ctrl |= MACB_BIT(TX_WRAP);
2234}
2235
2236static void macb_reset_hw(struct macb *bp)
2237{
2238 struct macb_queue *queue;
2239 unsigned int q;
2240 u32 ctrl = macb_readl(bp, NCR);
2241
2242
2243
2244
2245 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2246
2247
2248 ctrl |= MACB_BIT(CLRSTAT);
2249
2250 macb_writel(bp, NCR, ctrl);
2251
2252
2253 macb_writel(bp, TSR, -1);
2254 macb_writel(bp, RSR, -1);
2255
2256
2257 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2258 queue_writel(queue, IDR, -1);
2259 queue_readl(queue, ISR);
2260 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2261 queue_writel(queue, ISR, -1);
2262 }
2263}
2264
2265static u32 gem_mdc_clk_div(struct macb *bp)
2266{
2267 u32 config;
2268 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2269
2270 if (pclk_hz <= 20000000)
2271 config = GEM_BF(CLK, GEM_CLK_DIV8);
2272 else if (pclk_hz <= 40000000)
2273 config = GEM_BF(CLK, GEM_CLK_DIV16);
2274 else if (pclk_hz <= 80000000)
2275 config = GEM_BF(CLK, GEM_CLK_DIV32);
2276 else if (pclk_hz <= 120000000)
2277 config = GEM_BF(CLK, GEM_CLK_DIV48);
2278 else if (pclk_hz <= 160000000)
2279 config = GEM_BF(CLK, GEM_CLK_DIV64);
2280 else
2281 config = GEM_BF(CLK, GEM_CLK_DIV96);
2282
2283 return config;
2284}
2285
2286static u32 macb_mdc_clk_div(struct macb *bp)
2287{
2288 u32 config;
2289 unsigned long pclk_hz;
2290
2291 if (macb_is_gem(bp))
2292 return gem_mdc_clk_div(bp);
2293
2294 pclk_hz = clk_get_rate(bp->pclk);
2295 if (pclk_hz <= 20000000)
2296 config = MACB_BF(CLK, MACB_CLK_DIV8);
2297 else if (pclk_hz <= 40000000)
2298 config = MACB_BF(CLK, MACB_CLK_DIV16);
2299 else if (pclk_hz <= 80000000)
2300 config = MACB_BF(CLK, MACB_CLK_DIV32);
2301 else
2302 config = MACB_BF(CLK, MACB_CLK_DIV64);
2303
2304 return config;
2305}
2306
2307
2308
2309
2310
2311static u32 macb_dbw(struct macb *bp)
2312{
2313 if (!macb_is_gem(bp))
2314 return 0;
2315
2316 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2317 case 4:
2318 return GEM_BF(DBW, GEM_DBW128);
2319 case 2:
2320 return GEM_BF(DBW, GEM_DBW64);
2321 case 1:
2322 default:
2323 return GEM_BF(DBW, GEM_DBW32);
2324 }
2325}
2326
2327
2328
2329
2330
2331
2332
2333
2334static void macb_configure_dma(struct macb *bp)
2335{
2336 struct macb_queue *queue;
2337 u32 buffer_size;
2338 unsigned int q;
2339 u32 dmacfg;
2340
2341 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2342 if (macb_is_gem(bp)) {
2343 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2344 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2345 if (q)
2346 queue_writel(queue, RBQS, buffer_size);
2347 else
2348 dmacfg |= GEM_BF(RXBS, buffer_size);
2349 }
2350 if (bp->dma_burst_length)
2351 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2352 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2353 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2354
2355 if (bp->native_io)
2356 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2357 else
2358 dmacfg |= GEM_BIT(ENDIA_DESC);
2359
2360 if (bp->dev->features & NETIF_F_HW_CSUM)
2361 dmacfg |= GEM_BIT(TXCOEN);
2362 else
2363 dmacfg &= ~GEM_BIT(TXCOEN);
2364
2365 dmacfg &= ~GEM_BIT(ADDR64);
2366#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2367 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2368 dmacfg |= GEM_BIT(ADDR64);
2369#endif
2370#ifdef CONFIG_MACB_USE_HWSTAMP
2371 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2372 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2373#endif
2374 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2375 dmacfg);
2376 gem_writel(bp, DMACFG, dmacfg);
2377 }
2378}
2379
2380static void macb_init_hw(struct macb *bp)
2381{
2382 u32 config;
2383
2384 macb_reset_hw(bp);
2385 macb_set_hwaddr(bp);
2386
2387 config = macb_mdc_clk_div(bp);
2388 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2389 config |= MACB_BIT(DRFCS);
2390 if (bp->caps & MACB_CAPS_JUMBO)
2391 config |= MACB_BIT(JFRAME);
2392 else
2393 config |= MACB_BIT(BIG);
2394 if (bp->dev->flags & IFF_PROMISC)
2395 config |= MACB_BIT(CAF);
2396 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2397 config |= GEM_BIT(RXCOEN);
2398 if (!(bp->dev->flags & IFF_BROADCAST))
2399 config |= MACB_BIT(NBC);
2400 config |= macb_dbw(bp);
2401 macb_writel(bp, NCFGR, config);
2402 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2403 gem_writel(bp, JML, bp->jumbo_max_len);
2404 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2405 if (bp->caps & MACB_CAPS_JUMBO)
2406 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2407
2408 macb_configure_dma(bp);
2409}
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444static inline int hash_bit_value(int bitnr, __u8 *addr)
2445{
2446 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2447 return 1;
2448 return 0;
2449}
2450
2451
2452static int hash_get_index(__u8 *addr)
2453{
2454 int i, j, bitval;
2455 int hash_index = 0;
2456
2457 for (j = 0; j < 6; j++) {
2458 for (i = 0, bitval = 0; i < 8; i++)
2459 bitval ^= hash_bit_value(i * 6 + j, addr);
2460
2461 hash_index |= (bitval << j);
2462 }
2463
2464 return hash_index;
2465}
2466
2467
2468static void macb_sethashtable(struct net_device *dev)
2469{
2470 struct netdev_hw_addr *ha;
2471 unsigned long mc_filter[2];
2472 unsigned int bitnr;
2473 struct macb *bp = netdev_priv(dev);
2474
2475 mc_filter[0] = 0;
2476 mc_filter[1] = 0;
2477
2478 netdev_for_each_mc_addr(ha, dev) {
2479 bitnr = hash_get_index(ha->addr);
2480 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2481 }
2482
2483 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2484 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2485}
2486
2487
2488static void macb_set_rx_mode(struct net_device *dev)
2489{
2490 unsigned long cfg;
2491 struct macb *bp = netdev_priv(dev);
2492
2493 cfg = macb_readl(bp, NCFGR);
2494
2495 if (dev->flags & IFF_PROMISC) {
2496
2497 cfg |= MACB_BIT(CAF);
2498
2499
2500 if (macb_is_gem(bp))
2501 cfg &= ~GEM_BIT(RXCOEN);
2502 } else {
2503
2504 cfg &= ~MACB_BIT(CAF);
2505
2506
2507 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2508 cfg |= GEM_BIT(RXCOEN);
2509 }
2510
2511 if (dev->flags & IFF_ALLMULTI) {
2512
2513 macb_or_gem_writel(bp, HRB, -1);
2514 macb_or_gem_writel(bp, HRT, -1);
2515 cfg |= MACB_BIT(NCFGR_MTI);
2516 } else if (!netdev_mc_empty(dev)) {
2517
2518 macb_sethashtable(dev);
2519 cfg |= MACB_BIT(NCFGR_MTI);
2520 } else if (dev->flags & (~IFF_ALLMULTI)) {
2521
2522 macb_or_gem_writel(bp, HRB, 0);
2523 macb_or_gem_writel(bp, HRT, 0);
2524 cfg &= ~MACB_BIT(NCFGR_MTI);
2525 }
2526
2527 macb_writel(bp, NCFGR, cfg);
2528}
2529
2530static int macb_open(struct net_device *dev)
2531{
2532 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2533 struct macb *bp = netdev_priv(dev);
2534 struct macb_queue *queue;
2535 unsigned int q;
2536 int err;
2537
2538 netdev_dbg(bp->dev, "open\n");
2539
2540 err = pm_runtime_get_sync(&bp->pdev->dev);
2541 if (err < 0)
2542 goto pm_exit;
2543
2544
2545 macb_init_rx_buffer_size(bp, bufsz);
2546
2547 err = macb_alloc_consistent(bp);
2548 if (err) {
2549 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2550 err);
2551 goto pm_exit;
2552 }
2553
2554 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2555 napi_enable(&queue->napi);
2556
2557 macb_init_hw(bp);
2558
2559 err = macb_phylink_connect(bp);
2560 if (err)
2561 goto reset_hw;
2562
2563 netif_tx_start_all_queues(dev);
2564
2565 if (bp->ptp_info)
2566 bp->ptp_info->ptp_init(dev);
2567
2568 return 0;
2569
2570reset_hw:
2571 macb_reset_hw(bp);
2572 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2573 napi_disable(&queue->napi);
2574 macb_free_consistent(bp);
2575pm_exit:
2576 pm_runtime_put_sync(&bp->pdev->dev);
2577 return err;
2578}
2579
2580static int macb_close(struct net_device *dev)
2581{
2582 struct macb *bp = netdev_priv(dev);
2583 struct macb_queue *queue;
2584 unsigned long flags;
2585 unsigned int q;
2586
2587 netif_tx_stop_all_queues(dev);
2588
2589 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2590 napi_disable(&queue->napi);
2591
2592 phylink_stop(bp->phylink);
2593 phylink_disconnect_phy(bp->phylink);
2594
2595 spin_lock_irqsave(&bp->lock, flags);
2596 macb_reset_hw(bp);
2597 netif_carrier_off(dev);
2598 spin_unlock_irqrestore(&bp->lock, flags);
2599
2600 macb_free_consistent(bp);
2601
2602 if (bp->ptp_info)
2603 bp->ptp_info->ptp_remove(dev);
2604
2605 pm_runtime_put(&bp->pdev->dev);
2606
2607 return 0;
2608}
2609
2610static int macb_change_mtu(struct net_device *dev, int new_mtu)
2611{
2612 if (netif_running(dev))
2613 return -EBUSY;
2614
2615 dev->mtu = new_mtu;
2616
2617 return 0;
2618}
2619
2620static void gem_update_stats(struct macb *bp)
2621{
2622 struct macb_queue *queue;
2623 unsigned int i, q, idx;
2624 unsigned long *stat;
2625
2626 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2627
2628 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2629 u32 offset = gem_statistics[i].offset;
2630 u64 val = bp->macb_reg_readl(bp, offset);
2631
2632 bp->ethtool_stats[i] += val;
2633 *p += val;
2634
2635 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2636
2637 val = bp->macb_reg_readl(bp, offset + 4);
2638 bp->ethtool_stats[i] += ((u64)val) << 32;
2639 *(++p) += val;
2640 }
2641 }
2642
2643 idx = GEM_STATS_LEN;
2644 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2645 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2646 bp->ethtool_stats[idx++] = *stat;
2647}
2648
2649static struct net_device_stats *gem_get_stats(struct macb *bp)
2650{
2651 struct gem_stats *hwstat = &bp->hw_stats.gem;
2652 struct net_device_stats *nstat = &bp->dev->stats;
2653
2654 gem_update_stats(bp);
2655
2656 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2657 hwstat->rx_alignment_errors +
2658 hwstat->rx_resource_errors +
2659 hwstat->rx_overruns +
2660 hwstat->rx_oversize_frames +
2661 hwstat->rx_jabbers +
2662 hwstat->rx_undersized_frames +
2663 hwstat->rx_length_field_frame_errors);
2664 nstat->tx_errors = (hwstat->tx_late_collisions +
2665 hwstat->tx_excessive_collisions +
2666 hwstat->tx_underrun +
2667 hwstat->tx_carrier_sense_errors);
2668 nstat->multicast = hwstat->rx_multicast_frames;
2669 nstat->collisions = (hwstat->tx_single_collision_frames +
2670 hwstat->tx_multiple_collision_frames +
2671 hwstat->tx_excessive_collisions);
2672 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2673 hwstat->rx_jabbers +
2674 hwstat->rx_undersized_frames +
2675 hwstat->rx_length_field_frame_errors);
2676 nstat->rx_over_errors = hwstat->rx_resource_errors;
2677 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2678 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2679 nstat->rx_fifo_errors = hwstat->rx_overruns;
2680 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2681 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2682 nstat->tx_fifo_errors = hwstat->tx_underrun;
2683
2684 return nstat;
2685}
2686
2687static void gem_get_ethtool_stats(struct net_device *dev,
2688 struct ethtool_stats *stats, u64 *data)
2689{
2690 struct macb *bp;
2691
2692 bp = netdev_priv(dev);
2693 gem_update_stats(bp);
2694 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2695 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2696}
2697
2698static int gem_get_sset_count(struct net_device *dev, int sset)
2699{
2700 struct macb *bp = netdev_priv(dev);
2701
2702 switch (sset) {
2703 case ETH_SS_STATS:
2704 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2705 default:
2706 return -EOPNOTSUPP;
2707 }
2708}
2709
2710static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2711{
2712 char stat_string[ETH_GSTRING_LEN];
2713 struct macb *bp = netdev_priv(dev);
2714 struct macb_queue *queue;
2715 unsigned int i;
2716 unsigned int q;
2717
2718 switch (sset) {
2719 case ETH_SS_STATS:
2720 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2721 memcpy(p, gem_statistics[i].stat_string,
2722 ETH_GSTRING_LEN);
2723
2724 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2725 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2726 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2727 q, queue_statistics[i].stat_string);
2728 memcpy(p, stat_string, ETH_GSTRING_LEN);
2729 }
2730 }
2731 break;
2732 }
2733}
2734
2735static struct net_device_stats *macb_get_stats(struct net_device *dev)
2736{
2737 struct macb *bp = netdev_priv(dev);
2738 struct net_device_stats *nstat = &bp->dev->stats;
2739 struct macb_stats *hwstat = &bp->hw_stats.macb;
2740
2741 if (macb_is_gem(bp))
2742 return gem_get_stats(bp);
2743
2744
2745 macb_update_stats(bp);
2746
2747
2748 nstat->rx_errors = (hwstat->rx_fcs_errors +
2749 hwstat->rx_align_errors +
2750 hwstat->rx_resource_errors +
2751 hwstat->rx_overruns +
2752 hwstat->rx_oversize_pkts +
2753 hwstat->rx_jabbers +
2754 hwstat->rx_undersize_pkts +
2755 hwstat->rx_length_mismatch);
2756 nstat->tx_errors = (hwstat->tx_late_cols +
2757 hwstat->tx_excessive_cols +
2758 hwstat->tx_underruns +
2759 hwstat->tx_carrier_errors +
2760 hwstat->sqe_test_errors);
2761 nstat->collisions = (hwstat->tx_single_cols +
2762 hwstat->tx_multiple_cols +
2763 hwstat->tx_excessive_cols);
2764 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2765 hwstat->rx_jabbers +
2766 hwstat->rx_undersize_pkts +
2767 hwstat->rx_length_mismatch);
2768 nstat->rx_over_errors = hwstat->rx_resource_errors +
2769 hwstat->rx_overruns;
2770 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2771 nstat->rx_frame_errors = hwstat->rx_align_errors;
2772 nstat->rx_fifo_errors = hwstat->rx_overruns;
2773
2774 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2775 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2776 nstat->tx_fifo_errors = hwstat->tx_underruns;
2777
2778
2779 return nstat;
2780}
2781
2782static int macb_get_regs_len(struct net_device *netdev)
2783{
2784 return MACB_GREGS_NBR * sizeof(u32);
2785}
2786
2787static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2788 void *p)
2789{
2790 struct macb *bp = netdev_priv(dev);
2791 unsigned int tail, head;
2792 u32 *regs_buff = p;
2793
2794 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2795 | MACB_GREGS_VERSION;
2796
2797 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2798 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2799
2800 regs_buff[0] = macb_readl(bp, NCR);
2801 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2802 regs_buff[2] = macb_readl(bp, NSR);
2803 regs_buff[3] = macb_readl(bp, TSR);
2804 regs_buff[4] = macb_readl(bp, RBQP);
2805 regs_buff[5] = macb_readl(bp, TBQP);
2806 regs_buff[6] = macb_readl(bp, RSR);
2807 regs_buff[7] = macb_readl(bp, IMR);
2808
2809 regs_buff[8] = tail;
2810 regs_buff[9] = head;
2811 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2812 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2813
2814 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2815 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2816 if (macb_is_gem(bp))
2817 regs_buff[13] = gem_readl(bp, DMACFG);
2818}
2819
2820static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2821{
2822 struct macb *bp = netdev_priv(netdev);
2823
2824 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2825 phylink_ethtool_get_wol(bp->phylink, wol);
2826 wol->supported |= WAKE_MAGIC;
2827
2828 if (bp->wol & MACB_WOL_ENABLED)
2829 wol->wolopts |= WAKE_MAGIC;
2830 }
2831}
2832
2833static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2834{
2835 struct macb *bp = netdev_priv(netdev);
2836 int ret;
2837
2838
2839 ret = phylink_ethtool_set_wol(bp->phylink, wol);
2840
2841
2842
2843 if (!ret || ret != -EOPNOTSUPP)
2844 return ret;
2845
2846 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2847 (wol->wolopts & ~WAKE_MAGIC))
2848 return -EOPNOTSUPP;
2849
2850 if (wol->wolopts & WAKE_MAGIC)
2851 bp->wol |= MACB_WOL_ENABLED;
2852 else
2853 bp->wol &= ~MACB_WOL_ENABLED;
2854
2855 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2856
2857 return 0;
2858}
2859
2860static int macb_get_link_ksettings(struct net_device *netdev,
2861 struct ethtool_link_ksettings *kset)
2862{
2863 struct macb *bp = netdev_priv(netdev);
2864
2865 return phylink_ethtool_ksettings_get(bp->phylink, kset);
2866}
2867
2868static int macb_set_link_ksettings(struct net_device *netdev,
2869 const struct ethtool_link_ksettings *kset)
2870{
2871 struct macb *bp = netdev_priv(netdev);
2872
2873 return phylink_ethtool_ksettings_set(bp->phylink, kset);
2874}
2875
2876static void macb_get_ringparam(struct net_device *netdev,
2877 struct ethtool_ringparam *ring)
2878{
2879 struct macb *bp = netdev_priv(netdev);
2880
2881 ring->rx_max_pending = MAX_RX_RING_SIZE;
2882 ring->tx_max_pending = MAX_TX_RING_SIZE;
2883
2884 ring->rx_pending = bp->rx_ring_size;
2885 ring->tx_pending = bp->tx_ring_size;
2886}
2887
2888static int macb_set_ringparam(struct net_device *netdev,
2889 struct ethtool_ringparam *ring)
2890{
2891 struct macb *bp = netdev_priv(netdev);
2892 u32 new_rx_size, new_tx_size;
2893 unsigned int reset = 0;
2894
2895 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2896 return -EINVAL;
2897
2898 new_rx_size = clamp_t(u32, ring->rx_pending,
2899 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2900 new_rx_size = roundup_pow_of_two(new_rx_size);
2901
2902 new_tx_size = clamp_t(u32, ring->tx_pending,
2903 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2904 new_tx_size = roundup_pow_of_two(new_tx_size);
2905
2906 if ((new_tx_size == bp->tx_ring_size) &&
2907 (new_rx_size == bp->rx_ring_size)) {
2908
2909 return 0;
2910 }
2911
2912 if (netif_running(bp->dev)) {
2913 reset = 1;
2914 macb_close(bp->dev);
2915 }
2916
2917 bp->rx_ring_size = new_rx_size;
2918 bp->tx_ring_size = new_tx_size;
2919
2920 if (reset)
2921 macb_open(bp->dev);
2922
2923 return 0;
2924}
2925
2926#ifdef CONFIG_MACB_USE_HWSTAMP
2927static unsigned int gem_get_tsu_rate(struct macb *bp)
2928{
2929 struct clk *tsu_clk;
2930 unsigned int tsu_rate;
2931
2932 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2933 if (!IS_ERR(tsu_clk))
2934 tsu_rate = clk_get_rate(tsu_clk);
2935
2936 else if (!IS_ERR(bp->pclk)) {
2937 tsu_clk = bp->pclk;
2938 tsu_rate = clk_get_rate(tsu_clk);
2939 } else
2940 return -ENOTSUPP;
2941 return tsu_rate;
2942}
2943
2944static s32 gem_get_ptp_max_adj(void)
2945{
2946 return 64000000;
2947}
2948
2949static int gem_get_ts_info(struct net_device *dev,
2950 struct ethtool_ts_info *info)
2951{
2952 struct macb *bp = netdev_priv(dev);
2953
2954 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2955 ethtool_op_get_ts_info(dev, info);
2956 return 0;
2957 }
2958
2959 info->so_timestamping =
2960 SOF_TIMESTAMPING_TX_SOFTWARE |
2961 SOF_TIMESTAMPING_RX_SOFTWARE |
2962 SOF_TIMESTAMPING_SOFTWARE |
2963 SOF_TIMESTAMPING_TX_HARDWARE |
2964 SOF_TIMESTAMPING_RX_HARDWARE |
2965 SOF_TIMESTAMPING_RAW_HARDWARE;
2966 info->tx_types =
2967 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2968 (1 << HWTSTAMP_TX_OFF) |
2969 (1 << HWTSTAMP_TX_ON);
2970 info->rx_filters =
2971 (1 << HWTSTAMP_FILTER_NONE) |
2972 (1 << HWTSTAMP_FILTER_ALL);
2973
2974 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2975
2976 return 0;
2977}
2978
2979static struct macb_ptp_info gem_ptp_info = {
2980 .ptp_init = gem_ptp_init,
2981 .ptp_remove = gem_ptp_remove,
2982 .get_ptp_max_adj = gem_get_ptp_max_adj,
2983 .get_tsu_rate = gem_get_tsu_rate,
2984 .get_ts_info = gem_get_ts_info,
2985 .get_hwtst = gem_get_hwtst,
2986 .set_hwtst = gem_set_hwtst,
2987};
2988#endif
2989
2990static int macb_get_ts_info(struct net_device *netdev,
2991 struct ethtool_ts_info *info)
2992{
2993 struct macb *bp = netdev_priv(netdev);
2994
2995 if (bp->ptp_info)
2996 return bp->ptp_info->get_ts_info(netdev, info);
2997
2998 return ethtool_op_get_ts_info(netdev, info);
2999}
3000
3001static void gem_enable_flow_filters(struct macb *bp, bool enable)
3002{
3003 struct net_device *netdev = bp->dev;
3004 struct ethtool_rx_fs_item *item;
3005 u32 t2_scr;
3006 int num_t2_scr;
3007
3008 if (!(netdev->features & NETIF_F_NTUPLE))
3009 return;
3010
3011 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3012
3013 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3014 struct ethtool_rx_flow_spec *fs = &item->fs;
3015 struct ethtool_tcpip4_spec *tp4sp_m;
3016
3017 if (fs->location >= num_t2_scr)
3018 continue;
3019
3020 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3021
3022
3023 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
3024
3025
3026 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3027
3028 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3029 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
3030 else
3031 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
3032
3033 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3034 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
3035 else
3036 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
3037
3038 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3039 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
3040 else
3041 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
3042
3043 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3044 }
3045}
3046
3047static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3048{
3049 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
3050 uint16_t index = fs->location;
3051 u32 w0, w1, t2_scr;
3052 bool cmp_a = false;
3053 bool cmp_b = false;
3054 bool cmp_c = false;
3055
3056 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3057 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3058
3059
3060 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3061
3062 w0 = 0;
3063 w1 = 0;
3064 w0 = tp4sp_v->ip4src;
3065 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3066 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3067 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
3068 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3069 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3070 cmp_a = true;
3071 }
3072
3073
3074 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3075
3076 w0 = 0;
3077 w1 = 0;
3078 w0 = tp4sp_v->ip4dst;
3079 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3080 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3081 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3082 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3083 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3084 cmp_b = true;
3085 }
3086
3087
3088 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3089
3090 w0 = 0;
3091 w1 = 0;
3092 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3093 if (tp4sp_m->psrc == tp4sp_m->pdst) {
3094 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3095 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3096 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3097 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3098 } else {
3099
3100 w1 = GEM_BFINS(T2DISMSK, 0, w1);
3101 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3102 if (tp4sp_m->psrc == 0xFFFF) {
3103 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3104 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3105 } else {
3106 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3107 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3108 }
3109 }
3110 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3111 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3112 cmp_c = true;
3113 }
3114
3115 t2_scr = 0;
3116 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3117 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3118 if (cmp_a)
3119 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3120 if (cmp_b)
3121 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3122 if (cmp_c)
3123 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3124 gem_writel_n(bp, SCRT2, index, t2_scr);
3125}
3126
3127static int gem_add_flow_filter(struct net_device *netdev,
3128 struct ethtool_rxnfc *cmd)
3129{
3130 struct macb *bp = netdev_priv(netdev);
3131 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3132 struct ethtool_rx_fs_item *item, *newfs;
3133 unsigned long flags;
3134 int ret = -EINVAL;
3135 bool added = false;
3136
3137 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
3138 if (newfs == NULL)
3139 return -ENOMEM;
3140 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3141
3142 netdev_dbg(netdev,
3143 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3144 fs->flow_type, (int)fs->ring_cookie, fs->location,
3145 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3146 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3147 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
3148
3149 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3150
3151
3152 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3153 if (item->fs.location > newfs->fs.location) {
3154 list_add_tail(&newfs->list, &item->list);
3155 added = true;
3156 break;
3157 } else if (item->fs.location == fs->location) {
3158 netdev_err(netdev, "Rule not added: location %d not free!\n",
3159 fs->location);
3160 ret = -EBUSY;
3161 goto err;
3162 }
3163 }
3164 if (!added)
3165 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3166
3167 gem_prog_cmp_regs(bp, fs);
3168 bp->rx_fs_list.count++;
3169
3170 gem_enable_flow_filters(bp, 1);
3171
3172 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3173 return 0;
3174
3175err:
3176 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3177 kfree(newfs);
3178 return ret;
3179}
3180
3181static int gem_del_flow_filter(struct net_device *netdev,
3182 struct ethtool_rxnfc *cmd)
3183{
3184 struct macb *bp = netdev_priv(netdev);
3185 struct ethtool_rx_fs_item *item;
3186 struct ethtool_rx_flow_spec *fs;
3187 unsigned long flags;
3188
3189 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3190
3191 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3192 if (item->fs.location == cmd->fs.location) {
3193
3194 fs = &(item->fs);
3195 netdev_dbg(netdev,
3196 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3197 fs->flow_type, (int)fs->ring_cookie, fs->location,
3198 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3199 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3200 htons(fs->h_u.tcp_ip4_spec.psrc),
3201 htons(fs->h_u.tcp_ip4_spec.pdst));
3202
3203 gem_writel_n(bp, SCRT2, fs->location, 0);
3204
3205 list_del(&item->list);
3206 bp->rx_fs_list.count--;
3207 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3208 kfree(item);
3209 return 0;
3210 }
3211 }
3212
3213 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3214 return -EINVAL;
3215}
3216
3217static int gem_get_flow_entry(struct net_device *netdev,
3218 struct ethtool_rxnfc *cmd)
3219{
3220 struct macb *bp = netdev_priv(netdev);
3221 struct ethtool_rx_fs_item *item;
3222
3223 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3224 if (item->fs.location == cmd->fs.location) {
3225 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3226 return 0;
3227 }
3228 }
3229 return -EINVAL;
3230}
3231
3232static int gem_get_all_flow_entries(struct net_device *netdev,
3233 struct ethtool_rxnfc *cmd, u32 *rule_locs)
3234{
3235 struct macb *bp = netdev_priv(netdev);
3236 struct ethtool_rx_fs_item *item;
3237 uint32_t cnt = 0;
3238
3239 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3240 if (cnt == cmd->rule_cnt)
3241 return -EMSGSIZE;
3242 rule_locs[cnt] = item->fs.location;
3243 cnt++;
3244 }
3245 cmd->data = bp->max_tuples;
3246 cmd->rule_cnt = cnt;
3247
3248 return 0;
3249}
3250
3251static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3252 u32 *rule_locs)
3253{
3254 struct macb *bp = netdev_priv(netdev);
3255 int ret = 0;
3256
3257 switch (cmd->cmd) {
3258 case ETHTOOL_GRXRINGS:
3259 cmd->data = bp->num_queues;
3260 break;
3261 case ETHTOOL_GRXCLSRLCNT:
3262 cmd->rule_cnt = bp->rx_fs_list.count;
3263 break;
3264 case ETHTOOL_GRXCLSRULE:
3265 ret = gem_get_flow_entry(netdev, cmd);
3266 break;
3267 case ETHTOOL_GRXCLSRLALL:
3268 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3269 break;
3270 default:
3271 netdev_err(netdev,
3272 "Command parameter %d is not supported\n", cmd->cmd);
3273 ret = -EOPNOTSUPP;
3274 }
3275
3276 return ret;
3277}
3278
3279static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3280{
3281 struct macb *bp = netdev_priv(netdev);
3282 int ret;
3283
3284 switch (cmd->cmd) {
3285 case ETHTOOL_SRXCLSRLINS:
3286 if ((cmd->fs.location >= bp->max_tuples)
3287 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3288 ret = -EINVAL;
3289 break;
3290 }
3291 ret = gem_add_flow_filter(netdev, cmd);
3292 break;
3293 case ETHTOOL_SRXCLSRLDEL:
3294 ret = gem_del_flow_filter(netdev, cmd);
3295 break;
3296 default:
3297 netdev_err(netdev,
3298 "Command parameter %d is not supported\n", cmd->cmd);
3299 ret = -EOPNOTSUPP;
3300 }
3301
3302 return ret;
3303}
3304
3305static const struct ethtool_ops macb_ethtool_ops = {
3306 .get_regs_len = macb_get_regs_len,
3307 .get_regs = macb_get_regs,
3308 .get_link = ethtool_op_get_link,
3309 .get_ts_info = ethtool_op_get_ts_info,
3310 .get_wol = macb_get_wol,
3311 .set_wol = macb_set_wol,
3312 .get_link_ksettings = macb_get_link_ksettings,
3313 .set_link_ksettings = macb_set_link_ksettings,
3314 .get_ringparam = macb_get_ringparam,
3315 .set_ringparam = macb_set_ringparam,
3316};
3317
3318static const struct ethtool_ops gem_ethtool_ops = {
3319 .get_regs_len = macb_get_regs_len,
3320 .get_regs = macb_get_regs,
3321 .get_link = ethtool_op_get_link,
3322 .get_ts_info = macb_get_ts_info,
3323 .get_ethtool_stats = gem_get_ethtool_stats,
3324 .get_strings = gem_get_ethtool_strings,
3325 .get_sset_count = gem_get_sset_count,
3326 .get_link_ksettings = macb_get_link_ksettings,
3327 .set_link_ksettings = macb_set_link_ksettings,
3328 .get_ringparam = macb_get_ringparam,
3329 .set_ringparam = macb_set_ringparam,
3330 .get_rxnfc = gem_get_rxnfc,
3331 .set_rxnfc = gem_set_rxnfc,
3332};
3333
3334static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3335{
3336 struct macb *bp = netdev_priv(dev);
3337
3338 if (!netif_running(dev))
3339 return -EINVAL;
3340
3341 if (bp->ptp_info) {
3342 switch (cmd) {
3343 case SIOCSHWTSTAMP:
3344 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3345 case SIOCGHWTSTAMP:
3346 return bp->ptp_info->get_hwtst(dev, rq);
3347 }
3348 }
3349
3350 return phylink_mii_ioctl(bp->phylink, rq, cmd);
3351}
3352
3353static inline void macb_set_txcsum_feature(struct macb *bp,
3354 netdev_features_t features)
3355{
3356 u32 val;
3357
3358 if (!macb_is_gem(bp))
3359 return;
3360
3361 val = gem_readl(bp, DMACFG);
3362 if (features & NETIF_F_HW_CSUM)
3363 val |= GEM_BIT(TXCOEN);
3364 else
3365 val &= ~GEM_BIT(TXCOEN);
3366
3367 gem_writel(bp, DMACFG, val);
3368}
3369
3370static inline void macb_set_rxcsum_feature(struct macb *bp,
3371 netdev_features_t features)
3372{
3373 struct net_device *netdev = bp->dev;
3374 u32 val;
3375
3376 if (!macb_is_gem(bp))
3377 return;
3378
3379 val = gem_readl(bp, NCFGR);
3380 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3381 val |= GEM_BIT(RXCOEN);
3382 else
3383 val &= ~GEM_BIT(RXCOEN);
3384
3385 gem_writel(bp, NCFGR, val);
3386}
3387
3388static inline void macb_set_rxflow_feature(struct macb *bp,
3389 netdev_features_t features)
3390{
3391 if (!macb_is_gem(bp))
3392 return;
3393
3394 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3395}
3396
3397static int macb_set_features(struct net_device *netdev,
3398 netdev_features_t features)
3399{
3400 struct macb *bp = netdev_priv(netdev);
3401 netdev_features_t changed = features ^ netdev->features;
3402
3403
3404 if (changed & NETIF_F_HW_CSUM)
3405 macb_set_txcsum_feature(bp, features);
3406
3407
3408 if (changed & NETIF_F_RXCSUM)
3409 macb_set_rxcsum_feature(bp, features);
3410
3411
3412 if (changed & NETIF_F_NTUPLE)
3413 macb_set_rxflow_feature(bp, features);
3414
3415 return 0;
3416}
3417
3418static void macb_restore_features(struct macb *bp)
3419{
3420 struct net_device *netdev = bp->dev;
3421 netdev_features_t features = netdev->features;
3422
3423
3424 macb_set_txcsum_feature(bp, features);
3425
3426
3427 macb_set_rxcsum_feature(bp, features);
3428
3429
3430 macb_set_rxflow_feature(bp, features);
3431}
3432
3433static const struct net_device_ops macb_netdev_ops = {
3434 .ndo_open = macb_open,
3435 .ndo_stop = macb_close,
3436 .ndo_start_xmit = macb_start_xmit,
3437 .ndo_set_rx_mode = macb_set_rx_mode,
3438 .ndo_get_stats = macb_get_stats,
3439 .ndo_do_ioctl = macb_ioctl,
3440 .ndo_validate_addr = eth_validate_addr,
3441 .ndo_change_mtu = macb_change_mtu,
3442 .ndo_set_mac_address = eth_mac_addr,
3443#ifdef CONFIG_NET_POLL_CONTROLLER
3444 .ndo_poll_controller = macb_poll_controller,
3445#endif
3446 .ndo_set_features = macb_set_features,
3447 .ndo_features_check = macb_features_check,
3448};
3449
3450
3451
3452
3453static void macb_configure_caps(struct macb *bp,
3454 const struct macb_config *dt_conf)
3455{
3456 u32 dcfg;
3457
3458 if (dt_conf)
3459 bp->caps = dt_conf->caps;
3460
3461 if (hw_is_gem(bp->regs, bp->native_io)) {
3462 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3463
3464 dcfg = gem_readl(bp, DCFG1);
3465 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3466 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3467 dcfg = gem_readl(bp, DCFG2);
3468 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3469 bp->caps |= MACB_CAPS_FIFO_MODE;
3470#ifdef CONFIG_MACB_USE_HWSTAMP
3471 if (gem_has_ptp(bp)) {
3472 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3473 dev_err(&bp->pdev->dev,
3474 "GEM doesn't support hardware ptp.\n");
3475 else {
3476 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3477 bp->ptp_info = &gem_ptp_info;
3478 }
3479 }
3480#endif
3481 }
3482
3483 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3484}
3485
3486static void macb_probe_queues(void __iomem *mem,
3487 bool native_io,
3488 unsigned int *queue_mask,
3489 unsigned int *num_queues)
3490{
3491 unsigned int hw_q;
3492
3493 *queue_mask = 0x1;
3494 *num_queues = 1;
3495
3496
3497
3498
3499
3500
3501
3502 if (!hw_is_gem(mem, native_io))
3503 return;
3504
3505
3506 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3507
3508 *queue_mask |= 0x1;
3509
3510 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3511 if (*queue_mask & (1 << hw_q))
3512 (*num_queues)++;
3513}
3514
3515static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3516 struct clk **hclk, struct clk **tx_clk,
3517 struct clk **rx_clk, struct clk **tsu_clk)
3518{
3519 struct macb_platform_data *pdata;
3520 int err;
3521
3522 pdata = dev_get_platdata(&pdev->dev);
3523 if (pdata) {
3524 *pclk = pdata->pclk;
3525 *hclk = pdata->hclk;
3526 } else {
3527 *pclk = devm_clk_get(&pdev->dev, "pclk");
3528 *hclk = devm_clk_get(&pdev->dev, "hclk");
3529 }
3530
3531 if (IS_ERR_OR_NULL(*pclk)) {
3532 err = PTR_ERR(*pclk);
3533 if (!err)
3534 err = -ENODEV;
3535
3536 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
3537 return err;
3538 }
3539
3540 if (IS_ERR_OR_NULL(*hclk)) {
3541 err = PTR_ERR(*hclk);
3542 if (!err)
3543 err = -ENODEV;
3544
3545 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
3546 return err;
3547 }
3548
3549 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
3550 if (IS_ERR(*tx_clk))
3551 return PTR_ERR(*tx_clk);
3552
3553 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
3554 if (IS_ERR(*rx_clk))
3555 return PTR_ERR(*rx_clk);
3556
3557 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3558 if (IS_ERR(*tsu_clk))
3559 return PTR_ERR(*tsu_clk);
3560
3561 err = clk_prepare_enable(*pclk);
3562 if (err) {
3563 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3564 return err;
3565 }
3566
3567 err = clk_prepare_enable(*hclk);
3568 if (err) {
3569 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
3570 goto err_disable_pclk;
3571 }
3572
3573 err = clk_prepare_enable(*tx_clk);
3574 if (err) {
3575 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
3576 goto err_disable_hclk;
3577 }
3578
3579 err = clk_prepare_enable(*rx_clk);
3580 if (err) {
3581 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
3582 goto err_disable_txclk;
3583 }
3584
3585 err = clk_prepare_enable(*tsu_clk);
3586 if (err) {
3587 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
3588 goto err_disable_rxclk;
3589 }
3590
3591 return 0;
3592
3593err_disable_rxclk:
3594 clk_disable_unprepare(*rx_clk);
3595
3596err_disable_txclk:
3597 clk_disable_unprepare(*tx_clk);
3598
3599err_disable_hclk:
3600 clk_disable_unprepare(*hclk);
3601
3602err_disable_pclk:
3603 clk_disable_unprepare(*pclk);
3604
3605 return err;
3606}
3607
3608static int macb_init(struct platform_device *pdev)
3609{
3610 struct net_device *dev = platform_get_drvdata(pdev);
3611 unsigned int hw_q, q;
3612 struct macb *bp = netdev_priv(dev);
3613 struct macb_queue *queue;
3614 int err;
3615 u32 val, reg;
3616
3617 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3618 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3619
3620
3621
3622
3623
3624 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3625 if (!(bp->queue_mask & (1 << hw_q)))
3626 continue;
3627
3628 queue = &bp->queues[q];
3629 queue->bp = bp;
3630 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
3631 if (hw_q) {
3632 queue->ISR = GEM_ISR(hw_q - 1);
3633 queue->IER = GEM_IER(hw_q - 1);
3634 queue->IDR = GEM_IDR(hw_q - 1);
3635 queue->IMR = GEM_IMR(hw_q - 1);
3636 queue->TBQP = GEM_TBQP(hw_q - 1);
3637 queue->RBQP = GEM_RBQP(hw_q - 1);
3638 queue->RBQS = GEM_RBQS(hw_q - 1);
3639#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3640 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3641 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3642 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3643 }
3644#endif
3645 } else {
3646
3647 queue->ISR = MACB_ISR;
3648 queue->IER = MACB_IER;
3649 queue->IDR = MACB_IDR;
3650 queue->IMR = MACB_IMR;
3651 queue->TBQP = MACB_TBQP;
3652 queue->RBQP = MACB_RBQP;
3653#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3654 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3655 queue->TBQPH = MACB_TBQPH;
3656 queue->RBQPH = MACB_RBQPH;
3657 }
3658#endif
3659 }
3660
3661
3662
3663
3664
3665
3666 queue->irq = platform_get_irq(pdev, q);
3667 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3668 IRQF_SHARED, dev->name, queue);
3669 if (err) {
3670 dev_err(&pdev->dev,
3671 "Unable to request IRQ %d (error %d)\n",
3672 queue->irq, err);
3673 return err;
3674 }
3675
3676 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3677 q++;
3678 }
3679
3680 dev->netdev_ops = &macb_netdev_ops;
3681
3682
3683 if (macb_is_gem(bp)) {
3684 bp->max_tx_length = GEM_MAX_TX_LEN;
3685 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3686 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3687 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3688 bp->macbgem_ops.mog_rx = gem_rx;
3689 dev->ethtool_ops = &gem_ethtool_ops;
3690 } else {
3691 bp->max_tx_length = MACB_MAX_TX_LEN;
3692 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3693 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3694 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3695 bp->macbgem_ops.mog_rx = macb_rx;
3696 dev->ethtool_ops = &macb_ethtool_ops;
3697 }
3698
3699
3700 dev->hw_features = NETIF_F_SG;
3701
3702
3703 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3704 dev->hw_features |= MACB_NETIF_LSO;
3705
3706
3707 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3708 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3709 if (bp->caps & MACB_CAPS_SG_DISABLED)
3710 dev->hw_features &= ~NETIF_F_SG;
3711 dev->features = dev->hw_features;
3712
3713
3714
3715
3716
3717 reg = gem_readl(bp, DCFG8);
3718 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3719 GEM_BFEXT(T2SCR, reg));
3720 if (bp->max_tuples > 0) {
3721
3722 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3723
3724 reg = 0;
3725 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3726 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3727
3728 dev->hw_features |= NETIF_F_NTUPLE;
3729
3730 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3731 bp->rx_fs_list.count = 0;
3732 spin_lock_init(&bp->rx_fs_lock);
3733 } else
3734 bp->max_tuples = 0;
3735 }
3736
3737 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3738 val = 0;
3739 if (phy_interface_mode_is_rgmii(bp->phy_interface))
3740 val = GEM_BIT(RGMII);
3741 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3742 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3743 val = MACB_BIT(RMII);
3744 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3745 val = MACB_BIT(MII);
3746
3747 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3748 val |= MACB_BIT(CLKEN);
3749
3750 macb_or_gem_writel(bp, USRIO, val);
3751 }
3752
3753
3754 val = macb_mdc_clk_div(bp);
3755 val |= macb_dbw(bp);
3756 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3757 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3758 macb_writel(bp, NCFGR, val);
3759
3760 return 0;
3761}
3762
3763#if defined(CONFIG_OF)
3764
3765#define AT91ETHER_MAX_RBUFF_SZ 0x600
3766
3767#define AT91ETHER_MAX_RX_DESCR 9
3768
3769static struct sifive_fu540_macb_mgmt *mgmt;
3770
3771static int at91ether_alloc_coherent(struct macb *lp)
3772{
3773 struct macb_queue *q = &lp->queues[0];
3774
3775 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3776 (AT91ETHER_MAX_RX_DESCR *
3777 macb_dma_desc_get_size(lp)),
3778 &q->rx_ring_dma, GFP_KERNEL);
3779 if (!q->rx_ring)
3780 return -ENOMEM;
3781
3782 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3783 AT91ETHER_MAX_RX_DESCR *
3784 AT91ETHER_MAX_RBUFF_SZ,
3785 &q->rx_buffers_dma, GFP_KERNEL);
3786 if (!q->rx_buffers) {
3787 dma_free_coherent(&lp->pdev->dev,
3788 AT91ETHER_MAX_RX_DESCR *
3789 macb_dma_desc_get_size(lp),
3790 q->rx_ring, q->rx_ring_dma);
3791 q->rx_ring = NULL;
3792 return -ENOMEM;
3793 }
3794
3795 return 0;
3796}
3797
3798static void at91ether_free_coherent(struct macb *lp)
3799{
3800 struct macb_queue *q = &lp->queues[0];
3801
3802 if (q->rx_ring) {
3803 dma_free_coherent(&lp->pdev->dev,
3804 AT91ETHER_MAX_RX_DESCR *
3805 macb_dma_desc_get_size(lp),
3806 q->rx_ring, q->rx_ring_dma);
3807 q->rx_ring = NULL;
3808 }
3809
3810 if (q->rx_buffers) {
3811 dma_free_coherent(&lp->pdev->dev,
3812 AT91ETHER_MAX_RX_DESCR *
3813 AT91ETHER_MAX_RBUFF_SZ,
3814 q->rx_buffers, q->rx_buffers_dma);
3815 q->rx_buffers = NULL;
3816 }
3817}
3818
3819
3820static int at91ether_start(struct macb *lp)
3821{
3822 struct macb_queue *q = &lp->queues[0];
3823 struct macb_dma_desc *desc;
3824 dma_addr_t addr;
3825 u32 ctl;
3826 int i, ret;
3827
3828 ret = at91ether_alloc_coherent(lp);
3829 if (ret)
3830 return ret;
3831
3832 addr = q->rx_buffers_dma;
3833 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3834 desc = macb_rx_desc(q, i);
3835 macb_set_addr(lp, desc, addr);
3836 desc->ctrl = 0;
3837 addr += AT91ETHER_MAX_RBUFF_SZ;
3838 }
3839
3840
3841 desc->addr |= MACB_BIT(RX_WRAP);
3842
3843
3844 q->rx_tail = 0;
3845
3846
3847 macb_writel(lp, RBQP, q->rx_ring_dma);
3848
3849
3850 ctl = macb_readl(lp, NCR);
3851 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3852
3853
3854 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3855 MACB_BIT(RXUBR) |
3856 MACB_BIT(ISR_TUND) |
3857 MACB_BIT(ISR_RLE) |
3858 MACB_BIT(TCOMP) |
3859 MACB_BIT(ISR_ROVR) |
3860 MACB_BIT(HRESP));
3861
3862 return 0;
3863}
3864
3865static void at91ether_stop(struct macb *lp)
3866{
3867 u32 ctl;
3868
3869
3870 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3871 MACB_BIT(RXUBR) |
3872 MACB_BIT(ISR_TUND) |
3873 MACB_BIT(ISR_RLE) |
3874 MACB_BIT(TCOMP) |
3875 MACB_BIT(ISR_ROVR) |
3876 MACB_BIT(HRESP));
3877
3878
3879 ctl = macb_readl(lp, NCR);
3880 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3881
3882
3883 at91ether_free_coherent(lp);
3884}
3885
3886
3887static int at91ether_open(struct net_device *dev)
3888{
3889 struct macb *lp = netdev_priv(dev);
3890 u32 ctl;
3891 int ret;
3892
3893 ret = pm_runtime_get_sync(&lp->pdev->dev);
3894 if (ret < 0) {
3895 pm_runtime_put_noidle(&lp->pdev->dev);
3896 return ret;
3897 }
3898
3899
3900 ctl = macb_readl(lp, NCR);
3901 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3902
3903 macb_set_hwaddr(lp);
3904
3905 ret = at91ether_start(lp);
3906 if (ret)
3907 goto pm_exit;
3908
3909 ret = macb_phylink_connect(lp);
3910 if (ret)
3911 goto stop;
3912
3913 netif_start_queue(dev);
3914
3915 return 0;
3916
3917stop:
3918 at91ether_stop(lp);
3919pm_exit:
3920 pm_runtime_put_sync(&lp->pdev->dev);
3921 return ret;
3922}
3923
3924
3925static int at91ether_close(struct net_device *dev)
3926{
3927 struct macb *lp = netdev_priv(dev);
3928
3929 netif_stop_queue(dev);
3930
3931 phylink_stop(lp->phylink);
3932 phylink_disconnect_phy(lp->phylink);
3933
3934 at91ether_stop(lp);
3935
3936 return pm_runtime_put(&lp->pdev->dev);
3937}
3938
3939
3940static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
3941 struct net_device *dev)
3942{
3943 struct macb *lp = netdev_priv(dev);
3944
3945 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3946 netif_stop_queue(dev);
3947
3948
3949 lp->skb = skb;
3950 lp->skb_length = skb->len;
3951 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
3952 skb->len, DMA_TO_DEVICE);
3953 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
3954 dev_kfree_skb_any(skb);
3955 dev->stats.tx_dropped++;
3956 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3957 return NETDEV_TX_OK;
3958 }
3959
3960
3961 macb_writel(lp, TAR, lp->skb_physaddr);
3962
3963 macb_writel(lp, TCR, skb->len);
3964
3965 } else {
3966 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3967 return NETDEV_TX_BUSY;
3968 }
3969
3970 return NETDEV_TX_OK;
3971}
3972
3973
3974
3975
3976static void at91ether_rx(struct net_device *dev)
3977{
3978 struct macb *lp = netdev_priv(dev);
3979 struct macb_queue *q = &lp->queues[0];
3980 struct macb_dma_desc *desc;
3981 unsigned char *p_recv;
3982 struct sk_buff *skb;
3983 unsigned int pktlen;
3984
3985 desc = macb_rx_desc(q, q->rx_tail);
3986 while (desc->addr & MACB_BIT(RX_USED)) {
3987 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3988 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3989 skb = netdev_alloc_skb(dev, pktlen + 2);
3990 if (skb) {
3991 skb_reserve(skb, 2);
3992 skb_put_data(skb, p_recv, pktlen);
3993
3994 skb->protocol = eth_type_trans(skb, dev);
3995 dev->stats.rx_packets++;
3996 dev->stats.rx_bytes += pktlen;
3997 netif_rx(skb);
3998 } else {
3999 dev->stats.rx_dropped++;
4000 }
4001
4002 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
4003 dev->stats.multicast++;
4004
4005
4006 desc->addr &= ~MACB_BIT(RX_USED);
4007
4008
4009 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
4010 q->rx_tail = 0;
4011 else
4012 q->rx_tail++;
4013
4014 desc = macb_rx_desc(q, q->rx_tail);
4015 }
4016}
4017
4018
4019static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
4020{
4021 struct net_device *dev = dev_id;
4022 struct macb *lp = netdev_priv(dev);
4023 u32 intstatus, ctl;
4024
4025
4026
4027
4028 intstatus = macb_readl(lp, ISR);
4029
4030
4031 if (intstatus & MACB_BIT(RCOMP))
4032 at91ether_rx(dev);
4033
4034
4035 if (intstatus & MACB_BIT(TCOMP)) {
4036
4037 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
4038 dev->stats.tx_errors++;
4039
4040 if (lp->skb) {
4041 dev_consume_skb_irq(lp->skb);
4042 lp->skb = NULL;
4043 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
4044 lp->skb_length, DMA_TO_DEVICE);
4045 dev->stats.tx_packets++;
4046 dev->stats.tx_bytes += lp->skb_length;
4047 }
4048 netif_wake_queue(dev);
4049 }
4050
4051
4052 if (intstatus & MACB_BIT(RXUBR)) {
4053 ctl = macb_readl(lp, NCR);
4054 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
4055 wmb();
4056 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
4057 }
4058
4059 if (intstatus & MACB_BIT(ISR_ROVR))
4060 netdev_err(dev, "ROVR error\n");
4061
4062 return IRQ_HANDLED;
4063}
4064
4065#ifdef CONFIG_NET_POLL_CONTROLLER
4066static void at91ether_poll_controller(struct net_device *dev)
4067{
4068 unsigned long flags;
4069
4070 local_irq_save(flags);
4071 at91ether_interrupt(dev->irq, dev);
4072 local_irq_restore(flags);
4073}
4074#endif
4075
4076static const struct net_device_ops at91ether_netdev_ops = {
4077 .ndo_open = at91ether_open,
4078 .ndo_stop = at91ether_close,
4079 .ndo_start_xmit = at91ether_start_xmit,
4080 .ndo_get_stats = macb_get_stats,
4081 .ndo_set_rx_mode = macb_set_rx_mode,
4082 .ndo_set_mac_address = eth_mac_addr,
4083 .ndo_do_ioctl = macb_ioctl,
4084 .ndo_validate_addr = eth_validate_addr,
4085#ifdef CONFIG_NET_POLL_CONTROLLER
4086 .ndo_poll_controller = at91ether_poll_controller,
4087#endif
4088};
4089
4090static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
4091 struct clk **hclk, struct clk **tx_clk,
4092 struct clk **rx_clk, struct clk **tsu_clk)
4093{
4094 int err;
4095
4096 *hclk = NULL;
4097 *tx_clk = NULL;
4098 *rx_clk = NULL;
4099 *tsu_clk = NULL;
4100
4101 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
4102 if (IS_ERR(*pclk))
4103 return PTR_ERR(*pclk);
4104
4105 err = clk_prepare_enable(*pclk);
4106 if (err) {
4107 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4108 return err;
4109 }
4110
4111 return 0;
4112}
4113
4114static int at91ether_init(struct platform_device *pdev)
4115{
4116 struct net_device *dev = platform_get_drvdata(pdev);
4117 struct macb *bp = netdev_priv(dev);
4118 int err;
4119
4120 bp->queues[0].bp = bp;
4121
4122 dev->netdev_ops = &at91ether_netdev_ops;
4123 dev->ethtool_ops = &macb_ethtool_ops;
4124
4125 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
4126 0, dev->name, dev);
4127 if (err)
4128 return err;
4129
4130 macb_writel(bp, NCR, 0);
4131
4132 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
4133
4134 return 0;
4135}
4136
4137static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4138 unsigned long parent_rate)
4139{
4140 return mgmt->rate;
4141}
4142
4143static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4144 unsigned long *parent_rate)
4145{
4146 if (WARN_ON(rate < 2500000))
4147 return 2500000;
4148 else if (rate == 2500000)
4149 return 2500000;
4150 else if (WARN_ON(rate < 13750000))
4151 return 2500000;
4152 else if (WARN_ON(rate < 25000000))
4153 return 25000000;
4154 else if (rate == 25000000)
4155 return 25000000;
4156 else if (WARN_ON(rate < 75000000))
4157 return 25000000;
4158 else if (WARN_ON(rate < 125000000))
4159 return 125000000;
4160 else if (rate == 125000000)
4161 return 125000000;
4162
4163 WARN_ON(rate > 125000000);
4164
4165 return 125000000;
4166}
4167
4168static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4169 unsigned long parent_rate)
4170{
4171 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4172 if (rate != 125000000)
4173 iowrite32(1, mgmt->reg);
4174 else
4175 iowrite32(0, mgmt->reg);
4176 mgmt->rate = rate;
4177
4178 return 0;
4179}
4180
4181static const struct clk_ops fu540_c000_ops = {
4182 .recalc_rate = fu540_macb_tx_recalc_rate,
4183 .round_rate = fu540_macb_tx_round_rate,
4184 .set_rate = fu540_macb_tx_set_rate,
4185};
4186
4187static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4188 struct clk **hclk, struct clk **tx_clk,
4189 struct clk **rx_clk, struct clk **tsu_clk)
4190{
4191 struct clk_init_data init;
4192 int err = 0;
4193
4194 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4195 if (err)
4196 return err;
4197
4198 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4199 if (!mgmt)
4200 return -ENOMEM;
4201
4202 init.name = "sifive-gemgxl-mgmt";
4203 init.ops = &fu540_c000_ops;
4204 init.flags = 0;
4205 init.num_parents = 0;
4206
4207 mgmt->rate = 0;
4208 mgmt->hw.init = &init;
4209
4210 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4211 if (IS_ERR(*tx_clk))
4212 return PTR_ERR(*tx_clk);
4213
4214 err = clk_prepare_enable(*tx_clk);
4215 if (err)
4216 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4217 else
4218 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4219
4220 return 0;
4221}
4222
4223static int fu540_c000_init(struct platform_device *pdev)
4224{
4225 mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4226 if (IS_ERR(mgmt->reg))
4227 return PTR_ERR(mgmt->reg);
4228
4229 return macb_init(pdev);
4230}
4231
4232static const struct macb_config fu540_c000_config = {
4233 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4234 MACB_CAPS_GEM_HAS_PTP,
4235 .dma_burst_length = 16,
4236 .clk_init = fu540_c000_clk_init,
4237 .init = fu540_c000_init,
4238 .jumbo_max_len = 10240,
4239};
4240
4241static const struct macb_config at91sam9260_config = {
4242 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4243 .clk_init = macb_clk_init,
4244 .init = macb_init,
4245};
4246
4247static const struct macb_config sama5d3macb_config = {
4248 .caps = MACB_CAPS_SG_DISABLED
4249 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4250 .clk_init = macb_clk_init,
4251 .init = macb_init,
4252};
4253
4254static const struct macb_config pc302gem_config = {
4255 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
4256 .dma_burst_length = 16,
4257 .clk_init = macb_clk_init,
4258 .init = macb_init,
4259};
4260
4261static const struct macb_config sama5d2_config = {
4262 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4263 .dma_burst_length = 16,
4264 .clk_init = macb_clk_init,
4265 .init = macb_init,
4266};
4267
4268static const struct macb_config sama5d3_config = {
4269 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
4270 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
4271 .dma_burst_length = 16,
4272 .clk_init = macb_clk_init,
4273 .init = macb_init,
4274 .jumbo_max_len = 10240,
4275};
4276
4277static const struct macb_config sama5d4_config = {
4278 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4279 .dma_burst_length = 4,
4280 .clk_init = macb_clk_init,
4281 .init = macb_init,
4282};
4283
4284static const struct macb_config emac_config = {
4285 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
4286 .clk_init = at91ether_clk_init,
4287 .init = at91ether_init,
4288};
4289
4290static const struct macb_config np4_config = {
4291 .caps = MACB_CAPS_USRIO_DISABLED,
4292 .clk_init = macb_clk_init,
4293 .init = macb_init,
4294};
4295
4296static const struct macb_config zynqmp_config = {
4297 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4298 MACB_CAPS_JUMBO |
4299 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
4300 .dma_burst_length = 16,
4301 .clk_init = macb_clk_init,
4302 .init = macb_init,
4303 .jumbo_max_len = 10240,
4304};
4305
4306static const struct macb_config zynq_config = {
4307 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
4308 MACB_CAPS_NEEDS_RSTONUBR,
4309 .dma_burst_length = 16,
4310 .clk_init = macb_clk_init,
4311 .init = macb_init,
4312};
4313
4314static const struct of_device_id macb_dt_ids[] = {
4315 { .compatible = "cdns,at32ap7000-macb" },
4316 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4317 { .compatible = "cdns,macb" },
4318 { .compatible = "cdns,np4-macb", .data = &np4_config },
4319 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4320 { .compatible = "cdns,gem", .data = &pc302gem_config },
4321 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4322 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4323 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4324 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4325 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4326 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4327 { .compatible = "cdns,emac", .data = &emac_config },
4328 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4329 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4330 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4331 { }
4332};
4333MODULE_DEVICE_TABLE(of, macb_dt_ids);
4334#endif
4335
4336static const struct macb_config default_gem_config = {
4337 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4338 MACB_CAPS_JUMBO |
4339 MACB_CAPS_GEM_HAS_PTP,
4340 .dma_burst_length = 16,
4341 .clk_init = macb_clk_init,
4342 .init = macb_init,
4343 .jumbo_max_len = 10240,
4344};
4345
4346static int macb_probe(struct platform_device *pdev)
4347{
4348 const struct macb_config *macb_config = &default_gem_config;
4349 int (*clk_init)(struct platform_device *, struct clk **,
4350 struct clk **, struct clk **, struct clk **,
4351 struct clk **) = macb_config->clk_init;
4352 int (*init)(struct platform_device *) = macb_config->init;
4353 struct device_node *np = pdev->dev.of_node;
4354 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4355 struct clk *tsu_clk = NULL;
4356 unsigned int queue_mask, num_queues;
4357 bool native_io;
4358 phy_interface_t interface;
4359 struct net_device *dev;
4360 struct resource *regs;
4361 void __iomem *mem;
4362 const char *mac;
4363 struct macb *bp;
4364 int err, val;
4365
4366 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4367 mem = devm_ioremap_resource(&pdev->dev, regs);
4368 if (IS_ERR(mem))
4369 return PTR_ERR(mem);
4370
4371 if (np) {
4372 const struct of_device_id *match;
4373
4374 match = of_match_node(macb_dt_ids, np);
4375 if (match && match->data) {
4376 macb_config = match->data;
4377 clk_init = macb_config->clk_init;
4378 init = macb_config->init;
4379 }
4380 }
4381
4382 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
4383 if (err)
4384 return err;
4385
4386 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4387 pm_runtime_use_autosuspend(&pdev->dev);
4388 pm_runtime_get_noresume(&pdev->dev);
4389 pm_runtime_set_active(&pdev->dev);
4390 pm_runtime_enable(&pdev->dev);
4391 native_io = hw_is_native_io(mem);
4392
4393 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
4394 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
4395 if (!dev) {
4396 err = -ENOMEM;
4397 goto err_disable_clocks;
4398 }
4399
4400 dev->base_addr = regs->start;
4401
4402 SET_NETDEV_DEV(dev, &pdev->dev);
4403
4404 bp = netdev_priv(dev);
4405 bp->pdev = pdev;
4406 bp->dev = dev;
4407 bp->regs = mem;
4408 bp->native_io = native_io;
4409 if (native_io) {
4410 bp->macb_reg_readl = hw_readl_native;
4411 bp->macb_reg_writel = hw_writel_native;
4412 } else {
4413 bp->macb_reg_readl = hw_readl;
4414 bp->macb_reg_writel = hw_writel;
4415 }
4416 bp->num_queues = num_queues;
4417 bp->queue_mask = queue_mask;
4418 if (macb_config)
4419 bp->dma_burst_length = macb_config->dma_burst_length;
4420 bp->pclk = pclk;
4421 bp->hclk = hclk;
4422 bp->tx_clk = tx_clk;
4423 bp->rx_clk = rx_clk;
4424 bp->tsu_clk = tsu_clk;
4425 if (macb_config)
4426 bp->jumbo_max_len = macb_config->jumbo_max_len;
4427
4428 bp->wol = 0;
4429 if (of_get_property(np, "magic-packet", NULL))
4430 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
4431 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
4432
4433 spin_lock_init(&bp->lock);
4434
4435
4436 macb_configure_caps(bp, macb_config);
4437
4438#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4439 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
4440 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
4441 bp->hw_dma_cap |= HW_DMA_CAP_64B;
4442 }
4443#endif
4444 platform_set_drvdata(pdev, dev);
4445
4446 dev->irq = platform_get_irq(pdev, 0);
4447 if (dev->irq < 0) {
4448 err = dev->irq;
4449 goto err_out_free_netdev;
4450 }
4451
4452
4453 dev->min_mtu = GEM_MTU_MIN_SIZE;
4454 if (bp->caps & MACB_CAPS_JUMBO)
4455 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
4456 else
4457 dev->max_mtu = ETH_DATA_LEN;
4458
4459 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
4460 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
4461 if (val)
4462 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
4463 macb_dma_desc_get_size(bp);
4464
4465 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
4466 if (val)
4467 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
4468 macb_dma_desc_get_size(bp);
4469 }
4470
4471 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4472 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4473 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4474
4475 mac = of_get_mac_address(np);
4476 if (PTR_ERR(mac) == -EPROBE_DEFER) {
4477 err = -EPROBE_DEFER;
4478 goto err_out_free_netdev;
4479 } else if (!IS_ERR_OR_NULL(mac)) {
4480 ether_addr_copy(bp->dev->dev_addr, mac);
4481 } else {
4482 macb_get_hwaddr(bp);
4483 }
4484
4485 err = of_get_phy_mode(np, &interface);
4486 if (err)
4487
4488 bp->phy_interface = PHY_INTERFACE_MODE_MII;
4489 else
4490 bp->phy_interface = interface;
4491
4492
4493 err = init(pdev);
4494 if (err)
4495 goto err_out_free_netdev;
4496
4497 err = macb_mii_init(bp);
4498 if (err)
4499 goto err_out_free_netdev;
4500
4501 netif_carrier_off(dev);
4502
4503 err = register_netdev(dev);
4504 if (err) {
4505 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
4506 goto err_out_unregister_mdio;
4507 }
4508
4509 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
4510 (unsigned long)bp);
4511
4512 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4513 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4514 dev->base_addr, dev->irq, dev->dev_addr);
4515
4516 pm_runtime_mark_last_busy(&bp->pdev->dev);
4517 pm_runtime_put_autosuspend(&bp->pdev->dev);
4518
4519 return 0;
4520
4521err_out_unregister_mdio:
4522 mdiobus_unregister(bp->mii_bus);
4523 mdiobus_free(bp->mii_bus);
4524
4525err_out_free_netdev:
4526 free_netdev(dev);
4527
4528err_disable_clocks:
4529 clk_disable_unprepare(tx_clk);
4530 clk_disable_unprepare(hclk);
4531 clk_disable_unprepare(pclk);
4532 clk_disable_unprepare(rx_clk);
4533 clk_disable_unprepare(tsu_clk);
4534 pm_runtime_disable(&pdev->dev);
4535 pm_runtime_set_suspended(&pdev->dev);
4536 pm_runtime_dont_use_autosuspend(&pdev->dev);
4537
4538 return err;
4539}
4540
4541static int macb_remove(struct platform_device *pdev)
4542{
4543 struct net_device *dev;
4544 struct macb *bp;
4545
4546 dev = platform_get_drvdata(pdev);
4547
4548 if (dev) {
4549 bp = netdev_priv(dev);
4550 mdiobus_unregister(bp->mii_bus);
4551 mdiobus_free(bp->mii_bus);
4552
4553 unregister_netdev(dev);
4554 tasklet_kill(&bp->hresp_err_tasklet);
4555 pm_runtime_disable(&pdev->dev);
4556 pm_runtime_dont_use_autosuspend(&pdev->dev);
4557 if (!pm_runtime_suspended(&pdev->dev)) {
4558 clk_disable_unprepare(bp->tx_clk);
4559 clk_disable_unprepare(bp->hclk);
4560 clk_disable_unprepare(bp->pclk);
4561 clk_disable_unprepare(bp->rx_clk);
4562 clk_disable_unprepare(bp->tsu_clk);
4563 pm_runtime_set_suspended(&pdev->dev);
4564 }
4565 phylink_destroy(bp->phylink);
4566 free_netdev(dev);
4567 }
4568
4569 return 0;
4570}
4571
4572static int __maybe_unused macb_suspend(struct device *dev)
4573{
4574 struct net_device *netdev = dev_get_drvdata(dev);
4575 struct macb *bp = netdev_priv(netdev);
4576 struct macb_queue *queue = bp->queues;
4577 unsigned long flags;
4578 unsigned int q;
4579
4580 if (!netif_running(netdev))
4581 return 0;
4582
4583 if (bp->wol & MACB_WOL_ENABLED) {
4584 macb_writel(bp, IER, MACB_BIT(WOL));
4585 macb_writel(bp, WOL, MACB_BIT(MAG));
4586 enable_irq_wake(bp->queues[0].irq);
4587 netif_device_detach(netdev);
4588 } else {
4589 netif_device_detach(netdev);
4590 for (q = 0, queue = bp->queues; q < bp->num_queues;
4591 ++q, ++queue)
4592 napi_disable(&queue->napi);
4593 rtnl_lock();
4594 phylink_stop(bp->phylink);
4595 rtnl_unlock();
4596 spin_lock_irqsave(&bp->lock, flags);
4597 macb_reset_hw(bp);
4598 spin_unlock_irqrestore(&bp->lock, flags);
4599
4600 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4601 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
4602
4603 if (netdev->hw_features & NETIF_F_NTUPLE)
4604 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
4605 }
4606
4607 if (bp->ptp_info)
4608 bp->ptp_info->ptp_remove(netdev);
4609 if (!device_may_wakeup(dev))
4610 pm_runtime_force_suspend(dev);
4611
4612 return 0;
4613}
4614
4615static int __maybe_unused macb_resume(struct device *dev)
4616{
4617 struct net_device *netdev = dev_get_drvdata(dev);
4618 struct macb *bp = netdev_priv(netdev);
4619 struct macb_queue *queue = bp->queues;
4620 unsigned int q;
4621
4622 if (!netif_running(netdev))
4623 return 0;
4624
4625 if (!device_may_wakeup(dev))
4626 pm_runtime_force_resume(dev);
4627
4628 if (bp->wol & MACB_WOL_ENABLED) {
4629 macb_writel(bp, IDR, MACB_BIT(WOL));
4630 macb_writel(bp, WOL, 0);
4631 disable_irq_wake(bp->queues[0].irq);
4632 } else {
4633 macb_writel(bp, NCR, MACB_BIT(MPE));
4634
4635 if (netdev->hw_features & NETIF_F_NTUPLE)
4636 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
4637
4638 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4639 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
4640
4641 for (q = 0, queue = bp->queues; q < bp->num_queues;
4642 ++q, ++queue)
4643 napi_enable(&queue->napi);
4644 rtnl_lock();
4645 phylink_start(bp->phylink);
4646 rtnl_unlock();
4647 }
4648
4649 macb_init_hw(bp);
4650 macb_set_rx_mode(netdev);
4651 macb_restore_features(bp);
4652 netif_device_attach(netdev);
4653 if (bp->ptp_info)
4654 bp->ptp_info->ptp_init(netdev);
4655
4656 return 0;
4657}
4658
4659static int __maybe_unused macb_runtime_suspend(struct device *dev)
4660{
4661 struct net_device *netdev = dev_get_drvdata(dev);
4662 struct macb *bp = netdev_priv(netdev);
4663
4664 if (!(device_may_wakeup(dev))) {
4665 clk_disable_unprepare(bp->tx_clk);
4666 clk_disable_unprepare(bp->hclk);
4667 clk_disable_unprepare(bp->pclk);
4668 clk_disable_unprepare(bp->rx_clk);
4669 }
4670 clk_disable_unprepare(bp->tsu_clk);
4671
4672 return 0;
4673}
4674
4675static int __maybe_unused macb_runtime_resume(struct device *dev)
4676{
4677 struct net_device *netdev = dev_get_drvdata(dev);
4678 struct macb *bp = netdev_priv(netdev);
4679
4680 if (!(device_may_wakeup(dev))) {
4681 clk_prepare_enable(bp->pclk);
4682 clk_prepare_enable(bp->hclk);
4683 clk_prepare_enable(bp->tx_clk);
4684 clk_prepare_enable(bp->rx_clk);
4685 }
4686 clk_prepare_enable(bp->tsu_clk);
4687
4688 return 0;
4689}
4690
4691static const struct dev_pm_ops macb_pm_ops = {
4692 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4693 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4694};
4695
4696static struct platform_driver macb_driver = {
4697 .probe = macb_probe,
4698 .remove = macb_remove,
4699 .driver = {
4700 .name = "macb",
4701 .of_match_table = of_match_ptr(macb_dt_ids),
4702 .pm = &macb_pm_ops,
4703 },
4704};
4705
4706module_platform_driver(macb_driver);
4707
4708MODULE_LICENSE("GPL");
4709MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4710MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4711MODULE_ALIAS("platform:macb");
4712