1
2
3
4
5
6
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/clk.h>
10#include <linux/clk-provider.h>
11#include <linux/crc32.h>
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
16#include <linux/circ_buf.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21#include <linux/gpio/consumer.h>
22#include <linux/interrupt.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/dma-mapping.h>
26#include <linux/platform_device.h>
27#include <linux/phylink.h>
28#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/of_gpio.h>
31#include <linux/of_mdio.h>
32#include <linux/of_net.h>
33#include <linux/ip.h>
34#include <linux/udp.h>
35#include <linux/tcp.h>
36#include <linux/iopoll.h>
37#include <linux/pm_runtime.h>
38#include "macb.h"
39
40
41struct sifive_fu540_macb_mgmt {
42 void __iomem *reg;
43 unsigned long rate;
44 struct clk_hw hw;
45};
46
47#define MACB_RX_BUFFER_SIZE 128
48#define RX_BUFFER_MULTIPLE 64
49
50#define DEFAULT_RX_RING_SIZE 512
51#define MIN_RX_RING_SIZE 64
52#define MAX_RX_RING_SIZE 8192
53#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
54 * (bp)->rx_ring_size)
55
56#define DEFAULT_TX_RING_SIZE 512
57#define MIN_TX_RING_SIZE 64
58#define MAX_TX_RING_SIZE 4096
59#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
60 * (bp)->tx_ring_size)
61
62
63#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
64
65#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
66#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
67 | MACB_BIT(ISR_RLE) \
68 | MACB_BIT(TXERR))
69#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
70 | MACB_BIT(TXUBR))
71
72
73#define MACB_TX_LEN_ALIGN 8
74#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
75
76
77
78
79#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
80
81#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
82#define MACB_NETIF_LSO NETIF_F_TSO
83
84#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
85#define MACB_WOL_ENABLED (0x1 << 1)
86
87#define HS_SPEED_10000M 4
88#define MACB_SERDES_RATE_10G 1
89
90
91
92
93#define MACB_HALT_TIMEOUT 1230
94
95#define MACB_PM_TIMEOUT 100
96
97#define MACB_MDIO_TIMEOUT 1000000
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126static unsigned int macb_dma_desc_get_size(struct macb *bp)
127{
128#ifdef MACB_EXT_DESC
129 unsigned int desc_size;
130
131 switch (bp->hw_dma_cap) {
132 case HW_DMA_CAP_64B:
133 desc_size = sizeof(struct macb_dma_desc)
134 + sizeof(struct macb_dma_desc_64);
135 break;
136 case HW_DMA_CAP_PTP:
137 desc_size = sizeof(struct macb_dma_desc)
138 + sizeof(struct macb_dma_desc_ptp);
139 break;
140 case HW_DMA_CAP_64B_PTP:
141 desc_size = sizeof(struct macb_dma_desc)
142 + sizeof(struct macb_dma_desc_64)
143 + sizeof(struct macb_dma_desc_ptp);
144 break;
145 default:
146 desc_size = sizeof(struct macb_dma_desc);
147 }
148 return desc_size;
149#endif
150 return sizeof(struct macb_dma_desc);
151}
152
153static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
154{
155#ifdef MACB_EXT_DESC
156 switch (bp->hw_dma_cap) {
157 case HW_DMA_CAP_64B:
158 case HW_DMA_CAP_PTP:
159 desc_idx <<= 1;
160 break;
161 case HW_DMA_CAP_64B_PTP:
162 desc_idx *= 3;
163 break;
164 default:
165 break;
166 }
167#endif
168 return desc_idx;
169}
170
171#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
172static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
173{
174 return (struct macb_dma_desc_64 *)((void *)desc
175 + sizeof(struct macb_dma_desc));
176}
177#endif
178
179
180static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
181{
182 return index & (bp->tx_ring_size - 1);
183}
184
185static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
186 unsigned int index)
187{
188 index = macb_tx_ring_wrap(queue->bp, index);
189 index = macb_adj_dma_desc_idx(queue->bp, index);
190 return &queue->tx_ring[index];
191}
192
193static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
194 unsigned int index)
195{
196 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
197}
198
199static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
200{
201 dma_addr_t offset;
202
203 offset = macb_tx_ring_wrap(queue->bp, index) *
204 macb_dma_desc_get_size(queue->bp);
205
206 return queue->tx_ring_dma + offset;
207}
208
209static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
210{
211 return index & (bp->rx_ring_size - 1);
212}
213
214static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
215{
216 index = macb_rx_ring_wrap(queue->bp, index);
217 index = macb_adj_dma_desc_idx(queue->bp, index);
218 return &queue->rx_ring[index];
219}
220
221static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
222{
223 return queue->rx_buffers + queue->bp->rx_buffer_size *
224 macb_rx_ring_wrap(queue->bp, index);
225}
226
227
228static u32 hw_readl_native(struct macb *bp, int offset)
229{
230 return __raw_readl(bp->regs + offset);
231}
232
233static void hw_writel_native(struct macb *bp, int offset, u32 value)
234{
235 __raw_writel(value, bp->regs + offset);
236}
237
238static u32 hw_readl(struct macb *bp, int offset)
239{
240 return readl_relaxed(bp->regs + offset);
241}
242
243static void hw_writel(struct macb *bp, int offset, u32 value)
244{
245 writel_relaxed(value, bp->regs + offset);
246}
247
248
249
250
251
252static bool hw_is_native_io(void __iomem *addr)
253{
254 u32 value = MACB_BIT(LLB);
255
256 __raw_writel(value, addr + MACB_NCR);
257 value = __raw_readl(addr + MACB_NCR);
258
259
260 __raw_writel(0, addr + MACB_NCR);
261
262 return value == MACB_BIT(LLB);
263}
264
265static bool hw_is_gem(void __iomem *addr, bool native_io)
266{
267 u32 id;
268
269 if (native_io)
270 id = __raw_readl(addr + MACB_MID);
271 else
272 id = readl_relaxed(addr + MACB_MID);
273
274 return MACB_BFEXT(IDNUM, id) >= 0x2;
275}
276
277static void macb_set_hwaddr(struct macb *bp)
278{
279 u32 bottom;
280 u16 top;
281
282 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
283 macb_or_gem_writel(bp, SA1B, bottom);
284 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
285 macb_or_gem_writel(bp, SA1T, top);
286
287
288 macb_or_gem_writel(bp, SA2B, 0);
289 macb_or_gem_writel(bp, SA2T, 0);
290 macb_or_gem_writel(bp, SA3B, 0);
291 macb_or_gem_writel(bp, SA3T, 0);
292 macb_or_gem_writel(bp, SA4B, 0);
293 macb_or_gem_writel(bp, SA4T, 0);
294}
295
296static void macb_get_hwaddr(struct macb *bp)
297{
298 u32 bottom;
299 u16 top;
300 u8 addr[6];
301 int i;
302
303
304 for (i = 0; i < 4; i++) {
305 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
306 top = macb_or_gem_readl(bp, SA1T + i * 8);
307
308 addr[0] = bottom & 0xff;
309 addr[1] = (bottom >> 8) & 0xff;
310 addr[2] = (bottom >> 16) & 0xff;
311 addr[3] = (bottom >> 24) & 0xff;
312 addr[4] = top & 0xff;
313 addr[5] = (top >> 8) & 0xff;
314
315 if (is_valid_ether_addr(addr)) {
316 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
317 return;
318 }
319 }
320
321 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
322 eth_hw_addr_random(bp->dev);
323}
324
325static int macb_mdio_wait_for_idle(struct macb *bp)
326{
327 u32 val;
328
329 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
330 1, MACB_MDIO_TIMEOUT);
331}
332
333static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
334{
335 struct macb *bp = bus->priv;
336 int status;
337
338 status = pm_runtime_get_sync(&bp->pdev->dev);
339 if (status < 0) {
340 pm_runtime_put_noidle(&bp->pdev->dev);
341 goto mdio_pm_exit;
342 }
343
344 status = macb_mdio_wait_for_idle(bp);
345 if (status < 0)
346 goto mdio_read_exit;
347
348 if (regnum & MII_ADDR_C45) {
349 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
350 | MACB_BF(RW, MACB_MAN_C45_ADDR)
351 | MACB_BF(PHYA, mii_id)
352 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
353 | MACB_BF(DATA, regnum & 0xFFFF)
354 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
355
356 status = macb_mdio_wait_for_idle(bp);
357 if (status < 0)
358 goto mdio_read_exit;
359
360 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
361 | MACB_BF(RW, MACB_MAN_C45_READ)
362 | MACB_BF(PHYA, mii_id)
363 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
364 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
365 } else {
366 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
367 | MACB_BF(RW, MACB_MAN_C22_READ)
368 | MACB_BF(PHYA, mii_id)
369 | MACB_BF(REGA, regnum)
370 | MACB_BF(CODE, MACB_MAN_C22_CODE)));
371 }
372
373 status = macb_mdio_wait_for_idle(bp);
374 if (status < 0)
375 goto mdio_read_exit;
376
377 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
378
379mdio_read_exit:
380 pm_runtime_mark_last_busy(&bp->pdev->dev);
381 pm_runtime_put_autosuspend(&bp->pdev->dev);
382mdio_pm_exit:
383 return status;
384}
385
386static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
387 u16 value)
388{
389 struct macb *bp = bus->priv;
390 int status;
391
392 status = pm_runtime_get_sync(&bp->pdev->dev);
393 if (status < 0) {
394 pm_runtime_put_noidle(&bp->pdev->dev);
395 goto mdio_pm_exit;
396 }
397
398 status = macb_mdio_wait_for_idle(bp);
399 if (status < 0)
400 goto mdio_write_exit;
401
402 if (regnum & MII_ADDR_C45) {
403 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
404 | MACB_BF(RW, MACB_MAN_C45_ADDR)
405 | MACB_BF(PHYA, mii_id)
406 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
407 | MACB_BF(DATA, regnum & 0xFFFF)
408 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
409
410 status = macb_mdio_wait_for_idle(bp);
411 if (status < 0)
412 goto mdio_write_exit;
413
414 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
415 | MACB_BF(RW, MACB_MAN_C45_WRITE)
416 | MACB_BF(PHYA, mii_id)
417 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
418 | MACB_BF(CODE, MACB_MAN_C45_CODE)
419 | MACB_BF(DATA, value)));
420 } else {
421 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
422 | MACB_BF(RW, MACB_MAN_C22_WRITE)
423 | MACB_BF(PHYA, mii_id)
424 | MACB_BF(REGA, regnum)
425 | MACB_BF(CODE, MACB_MAN_C22_CODE)
426 | MACB_BF(DATA, value)));
427 }
428
429 status = macb_mdio_wait_for_idle(bp);
430 if (status < 0)
431 goto mdio_write_exit;
432
433mdio_write_exit:
434 pm_runtime_mark_last_busy(&bp->pdev->dev);
435 pm_runtime_put_autosuspend(&bp->pdev->dev);
436mdio_pm_exit:
437 return status;
438}
439
440static void macb_init_buffers(struct macb *bp)
441{
442 struct macb_queue *queue;
443 unsigned int q;
444
445 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
446 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
447#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
448 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
449 queue_writel(queue, RBQPH,
450 upper_32_bits(queue->rx_ring_dma));
451#endif
452 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
453#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
454 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
455 queue_writel(queue, TBQPH,
456 upper_32_bits(queue->tx_ring_dma));
457#endif
458 }
459}
460
461
462
463
464
465
466static void macb_set_tx_clk(struct macb *bp, int speed)
467{
468 long ferr, rate, rate_rounded;
469
470 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
471 return;
472
473
474 if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
475 return;
476
477 switch (speed) {
478 case SPEED_10:
479 rate = 2500000;
480 break;
481 case SPEED_100:
482 rate = 25000000;
483 break;
484 case SPEED_1000:
485 rate = 125000000;
486 break;
487 default:
488 return;
489 }
490
491 rate_rounded = clk_round_rate(bp->tx_clk, rate);
492 if (rate_rounded < 0)
493 return;
494
495
496
497
498 ferr = abs(rate_rounded - rate);
499 ferr = DIV_ROUND_UP(ferr, rate / 100000);
500 if (ferr > 5)
501 netdev_warn(bp->dev,
502 "unable to generate target frequency: %ld Hz\n",
503 rate);
504
505 if (clk_set_rate(bp->tx_clk, rate_rounded))
506 netdev_err(bp->dev, "adjusting tx_clk failed.\n");
507}
508
509static void macb_validate(struct phylink_config *config,
510 unsigned long *supported,
511 struct phylink_link_state *state)
512{
513 struct net_device *ndev = to_net_dev(config->dev);
514 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
515 struct macb *bp = netdev_priv(ndev);
516
517
518 if (state->interface != PHY_INTERFACE_MODE_NA &&
519 state->interface != PHY_INTERFACE_MODE_MII &&
520 state->interface != PHY_INTERFACE_MODE_RMII &&
521 state->interface != PHY_INTERFACE_MODE_GMII &&
522 state->interface != PHY_INTERFACE_MODE_SGMII &&
523 state->interface != PHY_INTERFACE_MODE_10GBASER &&
524 !phy_interface_mode_is_rgmii(state->interface)) {
525 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
526 return;
527 }
528
529 if (!macb_is_gem(bp) &&
530 (state->interface == PHY_INTERFACE_MODE_GMII ||
531 phy_interface_mode_is_rgmii(state->interface))) {
532 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
533 return;
534 }
535
536 if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
537 !(bp->caps & MACB_CAPS_HIGH_SPEED &&
538 bp->caps & MACB_CAPS_PCS)) {
539 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
540 return;
541 }
542
543 phylink_set_port_modes(mask);
544 phylink_set(mask, Autoneg);
545 phylink_set(mask, Asym_Pause);
546
547 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
548 (state->interface == PHY_INTERFACE_MODE_NA ||
549 state->interface == PHY_INTERFACE_MODE_10GBASER)) {
550 phylink_set(mask, 10000baseCR_Full);
551 phylink_set(mask, 10000baseER_Full);
552 phylink_set(mask, 10000baseKR_Full);
553 phylink_set(mask, 10000baseLR_Full);
554 phylink_set(mask, 10000baseLRM_Full);
555 phylink_set(mask, 10000baseSR_Full);
556 phylink_set(mask, 10000baseT_Full);
557 if (state->interface != PHY_INTERFACE_MODE_NA)
558 goto out;
559 }
560
561 phylink_set(mask, 10baseT_Half);
562 phylink_set(mask, 10baseT_Full);
563 phylink_set(mask, 100baseT_Half);
564 phylink_set(mask, 100baseT_Full);
565
566 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
567 (state->interface == PHY_INTERFACE_MODE_NA ||
568 state->interface == PHY_INTERFACE_MODE_GMII ||
569 state->interface == PHY_INTERFACE_MODE_SGMII ||
570 phy_interface_mode_is_rgmii(state->interface))) {
571 phylink_set(mask, 1000baseT_Full);
572 phylink_set(mask, 1000baseX_Full);
573
574 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
575 phylink_set(mask, 1000baseT_Half);
576 }
577out:
578 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
579 bitmap_and(state->advertising, state->advertising, mask,
580 __ETHTOOL_LINK_MODE_MASK_NBITS);
581}
582
583static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
584 phy_interface_t interface, int speed,
585 int duplex)
586{
587 struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
588 u32 config;
589
590 config = gem_readl(bp, USX_CONTROL);
591 config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
592 config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
593 config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
594 config |= GEM_BIT(TX_EN);
595 gem_writel(bp, USX_CONTROL, config);
596}
597
598static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
599 struct phylink_link_state *state)
600{
601 struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
602 u32 val;
603
604 state->speed = SPEED_10000;
605 state->duplex = 1;
606 state->an_complete = 1;
607
608 val = gem_readl(bp, USX_STATUS);
609 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
610 val = gem_readl(bp, NCFGR);
611 if (val & GEM_BIT(PAE))
612 state->pause = MLO_PAUSE_RX;
613}
614
615static int macb_usx_pcs_config(struct phylink_pcs *pcs,
616 unsigned int mode,
617 phy_interface_t interface,
618 const unsigned long *advertising,
619 bool permit_pause_to_mac)
620{
621 struct macb *bp = container_of(pcs, struct macb, phylink_pcs);
622
623 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
624 GEM_BIT(SIGNAL_OK));
625
626 return 0;
627}
628
629static void macb_pcs_get_state(struct phylink_pcs *pcs,
630 struct phylink_link_state *state)
631{
632 state->link = 0;
633}
634
635static void macb_pcs_an_restart(struct phylink_pcs *pcs)
636{
637
638}
639
640static int macb_pcs_config(struct phylink_pcs *pcs,
641 unsigned int mode,
642 phy_interface_t interface,
643 const unsigned long *advertising,
644 bool permit_pause_to_mac)
645{
646 return 0;
647}
648
649static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
650 .pcs_get_state = macb_usx_pcs_get_state,
651 .pcs_config = macb_usx_pcs_config,
652 .pcs_link_up = macb_usx_pcs_link_up,
653};
654
655static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
656 .pcs_get_state = macb_pcs_get_state,
657 .pcs_an_restart = macb_pcs_an_restart,
658 .pcs_config = macb_pcs_config,
659};
660
661static void macb_mac_config(struct phylink_config *config, unsigned int mode,
662 const struct phylink_link_state *state)
663{
664 struct net_device *ndev = to_net_dev(config->dev);
665 struct macb *bp = netdev_priv(ndev);
666 unsigned long flags;
667 u32 old_ctrl, ctrl;
668 u32 old_ncr, ncr;
669
670 spin_lock_irqsave(&bp->lock, flags);
671
672 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
673 old_ncr = ncr = macb_or_gem_readl(bp, NCR);
674
675 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
676 if (state->interface == PHY_INTERFACE_MODE_RMII)
677 ctrl |= MACB_BIT(RM9200_RMII);
678 } else if (macb_is_gem(bp)) {
679 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
680 ncr &= ~GEM_BIT(ENABLE_HS_MAC);
681
682 if (state->interface == PHY_INTERFACE_MODE_SGMII) {
683 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
684 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
685 ctrl |= GEM_BIT(PCSSEL);
686 ncr |= GEM_BIT(ENABLE_HS_MAC);
687 }
688 }
689
690
691 if (old_ctrl ^ ctrl)
692 macb_or_gem_writel(bp, NCFGR, ctrl);
693
694 if (old_ncr ^ ncr)
695 macb_or_gem_writel(bp, NCR, ncr);
696
697
698
699
700
701 if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
702 u32 pcsctrl, old_pcsctrl;
703
704 old_pcsctrl = gem_readl(bp, PCSCNTRL);
705 if (mode == MLO_AN_FIXED)
706 pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG);
707 else
708 pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG);
709 if (old_pcsctrl != pcsctrl)
710 gem_writel(bp, PCSCNTRL, pcsctrl);
711 }
712
713 spin_unlock_irqrestore(&bp->lock, flags);
714}
715
716static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
717 phy_interface_t interface)
718{
719 struct net_device *ndev = to_net_dev(config->dev);
720 struct macb *bp = netdev_priv(ndev);
721 struct macb_queue *queue;
722 unsigned int q;
723 u32 ctrl;
724
725 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
726 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
727 queue_writel(queue, IDR,
728 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
729
730
731 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
732 macb_writel(bp, NCR, ctrl);
733
734 netif_tx_stop_all_queues(ndev);
735}
736
737static void macb_mac_link_up(struct phylink_config *config,
738 struct phy_device *phy,
739 unsigned int mode, phy_interface_t interface,
740 int speed, int duplex,
741 bool tx_pause, bool rx_pause)
742{
743 struct net_device *ndev = to_net_dev(config->dev);
744 struct macb *bp = netdev_priv(ndev);
745 struct macb_queue *queue;
746 unsigned long flags;
747 unsigned int q;
748 u32 ctrl;
749
750 spin_lock_irqsave(&bp->lock, flags);
751
752 ctrl = macb_or_gem_readl(bp, NCFGR);
753
754 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
755
756 if (speed == SPEED_100)
757 ctrl |= MACB_BIT(SPD);
758
759 if (duplex)
760 ctrl |= MACB_BIT(FD);
761
762 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
763 ctrl &= ~MACB_BIT(PAE);
764 if (macb_is_gem(bp)) {
765 ctrl &= ~GEM_BIT(GBE);
766
767 if (speed == SPEED_1000)
768 ctrl |= GEM_BIT(GBE);
769 }
770
771 if (rx_pause)
772 ctrl |= MACB_BIT(PAE);
773
774 macb_set_tx_clk(bp, speed);
775
776
777
778
779 bp->macbgem_ops.mog_init_rings(bp);
780 macb_init_buffers(bp);
781
782 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
783 queue_writel(queue, IER,
784 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
785 }
786
787 macb_or_gem_writel(bp, NCFGR, ctrl);
788
789 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
790 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
791 gem_readl(bp, HS_MAC_CONFIG)));
792
793 spin_unlock_irqrestore(&bp->lock, flags);
794
795
796 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
797
798 netif_tx_wake_all_queues(ndev);
799}
800
801static int macb_mac_prepare(struct phylink_config *config, unsigned int mode,
802 phy_interface_t interface)
803{
804 struct net_device *ndev = to_net_dev(config->dev);
805 struct macb *bp = netdev_priv(ndev);
806
807 if (interface == PHY_INTERFACE_MODE_10GBASER)
808 bp->phylink_pcs.ops = &macb_phylink_usx_pcs_ops;
809 else if (interface == PHY_INTERFACE_MODE_SGMII)
810 bp->phylink_pcs.ops = &macb_phylink_pcs_ops;
811 else
812 bp->phylink_pcs.ops = NULL;
813
814 if (bp->phylink_pcs.ops)
815 phylink_set_pcs(bp->phylink, &bp->phylink_pcs);
816
817 return 0;
818}
819
820static const struct phylink_mac_ops macb_phylink_ops = {
821 .validate = macb_validate,
822 .mac_prepare = macb_mac_prepare,
823 .mac_config = macb_mac_config,
824 .mac_link_down = macb_mac_link_down,
825 .mac_link_up = macb_mac_link_up,
826};
827
828static bool macb_phy_handle_exists(struct device_node *dn)
829{
830 dn = of_parse_phandle(dn, "phy-handle", 0);
831 of_node_put(dn);
832 return dn != NULL;
833}
834
835static int macb_phylink_connect(struct macb *bp)
836{
837 struct device_node *dn = bp->pdev->dev.of_node;
838 struct net_device *dev = bp->dev;
839 struct phy_device *phydev;
840 int ret;
841
842 if (dn)
843 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
844
845 if (!dn || (ret && !macb_phy_handle_exists(dn))) {
846 phydev = phy_find_first(bp->mii_bus);
847 if (!phydev) {
848 netdev_err(dev, "no PHY found\n");
849 return -ENXIO;
850 }
851
852
853 ret = phylink_connect_phy(bp->phylink, phydev);
854 }
855
856 if (ret) {
857 netdev_err(dev, "Could not attach PHY (%d)\n", ret);
858 return ret;
859 }
860
861 phylink_start(bp->phylink);
862
863 return 0;
864}
865
866static void macb_get_pcs_fixed_state(struct phylink_config *config,
867 struct phylink_link_state *state)
868{
869 struct net_device *ndev = to_net_dev(config->dev);
870 struct macb *bp = netdev_priv(ndev);
871
872 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
873}
874
875
876static int macb_mii_probe(struct net_device *dev)
877{
878 struct macb *bp = netdev_priv(dev);
879
880 bp->phylink_config.dev = &dev->dev;
881 bp->phylink_config.type = PHYLINK_NETDEV;
882
883 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
884 bp->phylink_config.poll_fixed_state = true;
885 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
886 }
887
888 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
889 bp->phy_interface, &macb_phylink_ops);
890 if (IS_ERR(bp->phylink)) {
891 netdev_err(dev, "Could not create a phylink instance (%ld)\n",
892 PTR_ERR(bp->phylink));
893 return PTR_ERR(bp->phylink);
894 }
895
896 return 0;
897}
898
899static int macb_mdiobus_register(struct macb *bp)
900{
901 struct device_node *child, *np = bp->pdev->dev.of_node;
902
903 if (of_phy_is_fixed_link(np))
904 return mdiobus_register(bp->mii_bus);
905
906
907
908
909
910
911 for_each_available_child_of_node(np, child)
912 if (of_mdiobus_child_is_phy(child)) {
913
914
915
916 of_node_put(child);
917
918 return of_mdiobus_register(bp->mii_bus, np);
919 }
920
921 return mdiobus_register(bp->mii_bus);
922}
923
924static int macb_mii_init(struct macb *bp)
925{
926 int err = -ENXIO;
927
928
929 macb_writel(bp, NCR, MACB_BIT(MPE));
930
931 bp->mii_bus = mdiobus_alloc();
932 if (!bp->mii_bus) {
933 err = -ENOMEM;
934 goto err_out;
935 }
936
937 bp->mii_bus->name = "MACB_mii_bus";
938 bp->mii_bus->read = &macb_mdio_read;
939 bp->mii_bus->write = &macb_mdio_write;
940 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
941 bp->pdev->name, bp->pdev->id);
942 bp->mii_bus->priv = bp;
943 bp->mii_bus->parent = &bp->pdev->dev;
944
945 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
946
947 err = macb_mdiobus_register(bp);
948 if (err)
949 goto err_out_free_mdiobus;
950
951 err = macb_mii_probe(bp->dev);
952 if (err)
953 goto err_out_unregister_bus;
954
955 return 0;
956
957err_out_unregister_bus:
958 mdiobus_unregister(bp->mii_bus);
959err_out_free_mdiobus:
960 mdiobus_free(bp->mii_bus);
961err_out:
962 return err;
963}
964
965static void macb_update_stats(struct macb *bp)
966{
967 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
968 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
969 int offset = MACB_PFR;
970
971 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
972
973 for (; p < end; p++, offset += 4)
974 *p += bp->macb_reg_readl(bp, offset);
975}
976
977static int macb_halt_tx(struct macb *bp)
978{
979 unsigned long halt_time, timeout;
980 u32 status;
981
982 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
983
984 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
985 do {
986 halt_time = jiffies;
987 status = macb_readl(bp, TSR);
988 if (!(status & MACB_BIT(TGO)))
989 return 0;
990
991 udelay(250);
992 } while (time_before(halt_time, timeout));
993
994 return -ETIMEDOUT;
995}
996
997static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
998{
999 if (tx_skb->mapping) {
1000 if (tx_skb->mapped_as_page)
1001 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
1002 tx_skb->size, DMA_TO_DEVICE);
1003 else
1004 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
1005 tx_skb->size, DMA_TO_DEVICE);
1006 tx_skb->mapping = 0;
1007 }
1008
1009 if (tx_skb->skb) {
1010 dev_kfree_skb_any(tx_skb->skb);
1011 tx_skb->skb = NULL;
1012 }
1013}
1014
1015static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
1016{
1017#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1018 struct macb_dma_desc_64 *desc_64;
1019
1020 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1021 desc_64 = macb_64b_desc(bp, desc);
1022 desc_64->addrh = upper_32_bits(addr);
1023
1024
1025
1026
1027 dma_wmb();
1028 }
1029#endif
1030 desc->addr = lower_32_bits(addr);
1031}
1032
1033static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
1034{
1035 dma_addr_t addr = 0;
1036#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1037 struct macb_dma_desc_64 *desc_64;
1038
1039 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
1040 desc_64 = macb_64b_desc(bp, desc);
1041 addr = ((u64)(desc_64->addrh) << 32);
1042 }
1043#endif
1044 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1045 return addr;
1046}
1047
1048static void macb_tx_error_task(struct work_struct *work)
1049{
1050 struct macb_queue *queue = container_of(work, struct macb_queue,
1051 tx_error_task);
1052 struct macb *bp = queue->bp;
1053 struct macb_tx_skb *tx_skb;
1054 struct macb_dma_desc *desc;
1055 struct sk_buff *skb;
1056 unsigned int tail;
1057 unsigned long flags;
1058
1059 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
1060 (unsigned int)(queue - bp->queues),
1061 queue->tx_tail, queue->tx_head);
1062
1063
1064
1065
1066
1067
1068
1069 spin_lock_irqsave(&bp->lock, flags);
1070
1071
1072 netif_tx_stop_all_queues(bp->dev);
1073
1074
1075
1076
1077
1078 if (macb_halt_tx(bp))
1079
1080 netdev_err(bp->dev, "BUG: halt tx timed out\n");
1081
1082
1083
1084
1085 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
1086 u32 ctrl;
1087
1088 desc = macb_tx_desc(queue, tail);
1089 ctrl = desc->ctrl;
1090 tx_skb = macb_tx_skb(queue, tail);
1091 skb = tx_skb->skb;
1092
1093 if (ctrl & MACB_BIT(TX_USED)) {
1094
1095 while (!skb) {
1096 macb_tx_unmap(bp, tx_skb);
1097 tail++;
1098 tx_skb = macb_tx_skb(queue, tail);
1099 skb = tx_skb->skb;
1100 }
1101
1102
1103
1104
1105 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
1106 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
1107 macb_tx_ring_wrap(bp, tail),
1108 skb->data);
1109 bp->dev->stats.tx_packets++;
1110 queue->stats.tx_packets++;
1111 bp->dev->stats.tx_bytes += skb->len;
1112 queue->stats.tx_bytes += skb->len;
1113 }
1114 } else {
1115
1116
1117
1118
1119 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
1120 netdev_err(bp->dev,
1121 "BUG: TX buffers exhausted mid-frame\n");
1122
1123 desc->ctrl = ctrl | MACB_BIT(TX_USED);
1124 }
1125
1126 macb_tx_unmap(bp, tx_skb);
1127 }
1128
1129
1130 desc = macb_tx_desc(queue, 0);
1131 macb_set_addr(bp, desc, 0);
1132 desc->ctrl = MACB_BIT(TX_USED);
1133
1134
1135 wmb();
1136
1137
1138 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1139#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1140 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1141 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1142#endif
1143
1144 queue->tx_head = 0;
1145 queue->tx_tail = 0;
1146
1147
1148 macb_writel(bp, TSR, macb_readl(bp, TSR));
1149 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
1150
1151
1152 netif_tx_start_all_queues(bp->dev);
1153 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1154
1155 spin_unlock_irqrestore(&bp->lock, flags);
1156}
1157
1158static void macb_tx_interrupt(struct macb_queue *queue)
1159{
1160 unsigned int tail;
1161 unsigned int head;
1162 u32 status;
1163 struct macb *bp = queue->bp;
1164 u16 queue_index = queue - bp->queues;
1165
1166 status = macb_readl(bp, TSR);
1167 macb_writel(bp, TSR, status);
1168
1169 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1170 queue_writel(queue, ISR, MACB_BIT(TCOMP));
1171
1172 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
1173 (unsigned long)status);
1174
1175 head = queue->tx_head;
1176 for (tail = queue->tx_tail; tail != head; tail++) {
1177 struct macb_tx_skb *tx_skb;
1178 struct sk_buff *skb;
1179 struct macb_dma_desc *desc;
1180 u32 ctrl;
1181
1182 desc = macb_tx_desc(queue, tail);
1183
1184
1185 rmb();
1186
1187 ctrl = desc->ctrl;
1188
1189
1190
1191
1192 if (!(ctrl & MACB_BIT(TX_USED)))
1193 break;
1194
1195
1196 for (;; tail++) {
1197 tx_skb = macb_tx_skb(queue, tail);
1198 skb = tx_skb->skb;
1199
1200
1201 if (skb) {
1202 if (unlikely(skb_shinfo(skb)->tx_flags &
1203 SKBTX_HW_TSTAMP) &&
1204 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
1205
1206
1207
1208 tx_skb->skb = NULL;
1209 }
1210 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1211 macb_tx_ring_wrap(bp, tail),
1212 skb->data);
1213 bp->dev->stats.tx_packets++;
1214 queue->stats.tx_packets++;
1215 bp->dev->stats.tx_bytes += skb->len;
1216 queue->stats.tx_bytes += skb->len;
1217 }
1218
1219
1220 macb_tx_unmap(bp, tx_skb);
1221
1222
1223
1224
1225
1226 if (skb)
1227 break;
1228 }
1229 }
1230
1231 queue->tx_tail = tail;
1232 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1233 CIRC_CNT(queue->tx_head, queue->tx_tail,
1234 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1235 netif_wake_subqueue(bp->dev, queue_index);
1236}
1237
1238static void gem_rx_refill(struct macb_queue *queue)
1239{
1240 unsigned int entry;
1241 struct sk_buff *skb;
1242 dma_addr_t paddr;
1243 struct macb *bp = queue->bp;
1244 struct macb_dma_desc *desc;
1245
1246 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1247 bp->rx_ring_size) > 0) {
1248 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1249
1250
1251 rmb();
1252
1253 queue->rx_prepared_head++;
1254 desc = macb_rx_desc(queue, entry);
1255
1256 if (!queue->rx_skbuff[entry]) {
1257
1258 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1259 if (unlikely(!skb)) {
1260 netdev_err(bp->dev,
1261 "Unable to allocate sk_buff\n");
1262 break;
1263 }
1264
1265
1266 paddr = dma_map_single(&bp->pdev->dev, skb->data,
1267 bp->rx_buffer_size,
1268 DMA_FROM_DEVICE);
1269 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1270 dev_kfree_skb(skb);
1271 break;
1272 }
1273
1274 queue->rx_skbuff[entry] = skb;
1275
1276 if (entry == bp->rx_ring_size - 1)
1277 paddr |= MACB_BIT(RX_WRAP);
1278 desc->ctrl = 0;
1279
1280
1281
1282 dma_wmb();
1283 macb_set_addr(bp, desc, paddr);
1284
1285
1286 skb_reserve(skb, NET_IP_ALIGN);
1287 } else {
1288 desc->ctrl = 0;
1289 dma_wmb();
1290 desc->addr &= ~MACB_BIT(RX_USED);
1291 }
1292 }
1293
1294
1295 wmb();
1296
1297 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1298 queue, queue->rx_prepared_head, queue->rx_tail);
1299}
1300
1301
1302static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1303 unsigned int end)
1304{
1305 unsigned int frag;
1306
1307 for (frag = begin; frag != end; frag++) {
1308 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1309
1310 desc->addr &= ~MACB_BIT(RX_USED);
1311 }
1312
1313
1314 wmb();
1315
1316
1317
1318
1319
1320}
1321
1322static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1323 int budget)
1324{
1325 struct macb *bp = queue->bp;
1326 unsigned int len;
1327 unsigned int entry;
1328 struct sk_buff *skb;
1329 struct macb_dma_desc *desc;
1330 int count = 0;
1331
1332 while (count < budget) {
1333 u32 ctrl;
1334 dma_addr_t addr;
1335 bool rxused;
1336
1337 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1338 desc = macb_rx_desc(queue, entry);
1339
1340
1341 rmb();
1342
1343 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1344 addr = macb_get_addr(bp, desc);
1345
1346 if (!rxused)
1347 break;
1348
1349
1350 dma_rmb();
1351
1352 ctrl = desc->ctrl;
1353
1354 queue->rx_tail++;
1355 count++;
1356
1357 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1358 netdev_err(bp->dev,
1359 "not whole frame pointed by descriptor\n");
1360 bp->dev->stats.rx_dropped++;
1361 queue->stats.rx_dropped++;
1362 break;
1363 }
1364 skb = queue->rx_skbuff[entry];
1365 if (unlikely(!skb)) {
1366 netdev_err(bp->dev,
1367 "inconsistent Rx descriptor chain\n");
1368 bp->dev->stats.rx_dropped++;
1369 queue->stats.rx_dropped++;
1370 break;
1371 }
1372
1373 queue->rx_skbuff[entry] = NULL;
1374 len = ctrl & bp->rx_frm_len_mask;
1375
1376 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1377
1378 skb_put(skb, len);
1379 dma_unmap_single(&bp->pdev->dev, addr,
1380 bp->rx_buffer_size, DMA_FROM_DEVICE);
1381
1382 skb->protocol = eth_type_trans(skb, bp->dev);
1383 skb_checksum_none_assert(skb);
1384 if (bp->dev->features & NETIF_F_RXCSUM &&
1385 !(bp->dev->flags & IFF_PROMISC) &&
1386 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1387 skb->ip_summed = CHECKSUM_UNNECESSARY;
1388
1389 bp->dev->stats.rx_packets++;
1390 queue->stats.rx_packets++;
1391 bp->dev->stats.rx_bytes += skb->len;
1392 queue->stats.rx_bytes += skb->len;
1393
1394 gem_ptp_do_rxstamp(bp, skb, desc);
1395
1396#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1397 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1398 skb->len, skb->csum);
1399 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1400 skb_mac_header(skb), 16, true);
1401 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1402 skb->data, 32, true);
1403#endif
1404
1405 napi_gro_receive(napi, skb);
1406 }
1407
1408 gem_rx_refill(queue);
1409
1410 return count;
1411}
1412
1413static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1414 unsigned int first_frag, unsigned int last_frag)
1415{
1416 unsigned int len;
1417 unsigned int frag;
1418 unsigned int offset;
1419 struct sk_buff *skb;
1420 struct macb_dma_desc *desc;
1421 struct macb *bp = queue->bp;
1422
1423 desc = macb_rx_desc(queue, last_frag);
1424 len = desc->ctrl & bp->rx_frm_len_mask;
1425
1426 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1427 macb_rx_ring_wrap(bp, first_frag),
1428 macb_rx_ring_wrap(bp, last_frag), len);
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1439 if (!skb) {
1440 bp->dev->stats.rx_dropped++;
1441 for (frag = first_frag; ; frag++) {
1442 desc = macb_rx_desc(queue, frag);
1443 desc->addr &= ~MACB_BIT(RX_USED);
1444 if (frag == last_frag)
1445 break;
1446 }
1447
1448
1449 wmb();
1450
1451 return 1;
1452 }
1453
1454 offset = 0;
1455 len += NET_IP_ALIGN;
1456 skb_checksum_none_assert(skb);
1457 skb_put(skb, len);
1458
1459 for (frag = first_frag; ; frag++) {
1460 unsigned int frag_len = bp->rx_buffer_size;
1461
1462 if (offset + frag_len > len) {
1463 if (unlikely(frag != last_frag)) {
1464 dev_kfree_skb_any(skb);
1465 return -1;
1466 }
1467 frag_len = len - offset;
1468 }
1469 skb_copy_to_linear_data_offset(skb, offset,
1470 macb_rx_buffer(queue, frag),
1471 frag_len);
1472 offset += bp->rx_buffer_size;
1473 desc = macb_rx_desc(queue, frag);
1474 desc->addr &= ~MACB_BIT(RX_USED);
1475
1476 if (frag == last_frag)
1477 break;
1478 }
1479
1480
1481 wmb();
1482
1483 __skb_pull(skb, NET_IP_ALIGN);
1484 skb->protocol = eth_type_trans(skb, bp->dev);
1485
1486 bp->dev->stats.rx_packets++;
1487 bp->dev->stats.rx_bytes += skb->len;
1488 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1489 skb->len, skb->csum);
1490 napi_gro_receive(napi, skb);
1491
1492 return 0;
1493}
1494
1495static inline void macb_init_rx_ring(struct macb_queue *queue)
1496{
1497 struct macb *bp = queue->bp;
1498 dma_addr_t addr;
1499 struct macb_dma_desc *desc = NULL;
1500 int i;
1501
1502 addr = queue->rx_buffers_dma;
1503 for (i = 0; i < bp->rx_ring_size; i++) {
1504 desc = macb_rx_desc(queue, i);
1505 macb_set_addr(bp, desc, addr);
1506 desc->ctrl = 0;
1507 addr += bp->rx_buffer_size;
1508 }
1509 desc->addr |= MACB_BIT(RX_WRAP);
1510 queue->rx_tail = 0;
1511}
1512
1513static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1514 int budget)
1515{
1516 struct macb *bp = queue->bp;
1517 bool reset_rx_queue = false;
1518 int received = 0;
1519 unsigned int tail;
1520 int first_frag = -1;
1521
1522 for (tail = queue->rx_tail; budget > 0; tail++) {
1523 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1524 u32 ctrl;
1525
1526
1527 rmb();
1528
1529 if (!(desc->addr & MACB_BIT(RX_USED)))
1530 break;
1531
1532
1533 dma_rmb();
1534
1535 ctrl = desc->ctrl;
1536
1537 if (ctrl & MACB_BIT(RX_SOF)) {
1538 if (first_frag != -1)
1539 discard_partial_frame(queue, first_frag, tail);
1540 first_frag = tail;
1541 }
1542
1543 if (ctrl & MACB_BIT(RX_EOF)) {
1544 int dropped;
1545
1546 if (unlikely(first_frag == -1)) {
1547 reset_rx_queue = true;
1548 continue;
1549 }
1550
1551 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1552 first_frag = -1;
1553 if (unlikely(dropped < 0)) {
1554 reset_rx_queue = true;
1555 continue;
1556 }
1557 if (!dropped) {
1558 received++;
1559 budget--;
1560 }
1561 }
1562 }
1563
1564 if (unlikely(reset_rx_queue)) {
1565 unsigned long flags;
1566 u32 ctrl;
1567
1568 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1569
1570 spin_lock_irqsave(&bp->lock, flags);
1571
1572 ctrl = macb_readl(bp, NCR);
1573 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1574
1575 macb_init_rx_ring(queue);
1576 queue_writel(queue, RBQP, queue->rx_ring_dma);
1577
1578 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1579
1580 spin_unlock_irqrestore(&bp->lock, flags);
1581 return received;
1582 }
1583
1584 if (first_frag != -1)
1585 queue->rx_tail = first_frag;
1586 else
1587 queue->rx_tail = tail;
1588
1589 return received;
1590}
1591
1592static int macb_poll(struct napi_struct *napi, int budget)
1593{
1594 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1595 struct macb *bp = queue->bp;
1596 int work_done;
1597 u32 status;
1598
1599 status = macb_readl(bp, RSR);
1600 macb_writel(bp, RSR, status);
1601
1602 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1603 (unsigned long)status, budget);
1604
1605 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1606 if (work_done < budget) {
1607 napi_complete_done(napi, work_done);
1608
1609
1610 status = macb_readl(bp, RSR);
1611 if (status) {
1612 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1613 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1614 napi_reschedule(napi);
1615 } else {
1616 queue_writel(queue, IER, bp->rx_intr_mask);
1617 }
1618 }
1619
1620
1621
1622 return work_done;
1623}
1624
1625static void macb_hresp_error_task(struct tasklet_struct *t)
1626{
1627 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
1628 struct net_device *dev = bp->dev;
1629 struct macb_queue *queue;
1630 unsigned int q;
1631 u32 ctrl;
1632
1633 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1634 queue_writel(queue, IDR, bp->rx_intr_mask |
1635 MACB_TX_INT_FLAGS |
1636 MACB_BIT(HRESP));
1637 }
1638 ctrl = macb_readl(bp, NCR);
1639 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1640 macb_writel(bp, NCR, ctrl);
1641
1642 netif_tx_stop_all_queues(dev);
1643 netif_carrier_off(dev);
1644
1645 bp->macbgem_ops.mog_init_rings(bp);
1646
1647
1648 macb_init_buffers(bp);
1649
1650
1651 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1652 queue_writel(queue, IER,
1653 bp->rx_intr_mask |
1654 MACB_TX_INT_FLAGS |
1655 MACB_BIT(HRESP));
1656
1657 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1658 macb_writel(bp, NCR, ctrl);
1659
1660 netif_carrier_on(dev);
1661 netif_tx_start_all_queues(dev);
1662}
1663
1664static void macb_tx_restart(struct macb_queue *queue)
1665{
1666 unsigned int head = queue->tx_head;
1667 unsigned int tail = queue->tx_tail;
1668 struct macb *bp = queue->bp;
1669
1670 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1671 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1672
1673 if (head == tail)
1674 return;
1675
1676 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1677}
1678
1679static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
1680{
1681 struct macb_queue *queue = dev_id;
1682 struct macb *bp = queue->bp;
1683 u32 status;
1684
1685 status = queue_readl(queue, ISR);
1686
1687 if (unlikely(!status))
1688 return IRQ_NONE;
1689
1690 spin_lock(&bp->lock);
1691
1692 if (status & MACB_BIT(WOL)) {
1693 queue_writel(queue, IDR, MACB_BIT(WOL));
1694 macb_writel(bp, WOL, 0);
1695 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1696 (unsigned int)(queue - bp->queues),
1697 (unsigned long)status);
1698 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1699 queue_writel(queue, ISR, MACB_BIT(WOL));
1700 pm_wakeup_event(&bp->pdev->dev, 0);
1701 }
1702
1703 spin_unlock(&bp->lock);
1704
1705 return IRQ_HANDLED;
1706}
1707
1708static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
1709{
1710 struct macb_queue *queue = dev_id;
1711 struct macb *bp = queue->bp;
1712 u32 status;
1713
1714 status = queue_readl(queue, ISR);
1715
1716 if (unlikely(!status))
1717 return IRQ_NONE;
1718
1719 spin_lock(&bp->lock);
1720
1721 if (status & GEM_BIT(WOL)) {
1722 queue_writel(queue, IDR, GEM_BIT(WOL));
1723 gem_writel(bp, WOL, 0);
1724 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1725 (unsigned int)(queue - bp->queues),
1726 (unsigned long)status);
1727 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1728 queue_writel(queue, ISR, GEM_BIT(WOL));
1729 pm_wakeup_event(&bp->pdev->dev, 0);
1730 }
1731
1732 spin_unlock(&bp->lock);
1733
1734 return IRQ_HANDLED;
1735}
1736
1737static irqreturn_t macb_interrupt(int irq, void *dev_id)
1738{
1739 struct macb_queue *queue = dev_id;
1740 struct macb *bp = queue->bp;
1741 struct net_device *dev = bp->dev;
1742 u32 status, ctrl;
1743
1744 status = queue_readl(queue, ISR);
1745
1746 if (unlikely(!status))
1747 return IRQ_NONE;
1748
1749 spin_lock(&bp->lock);
1750
1751 while (status) {
1752
1753 if (unlikely(!netif_running(dev))) {
1754 queue_writel(queue, IDR, -1);
1755 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1756 queue_writel(queue, ISR, -1);
1757 break;
1758 }
1759
1760 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1761 (unsigned int)(queue - bp->queues),
1762 (unsigned long)status);
1763
1764 if (status & bp->rx_intr_mask) {
1765
1766
1767
1768
1769
1770
1771 queue_writel(queue, IDR, bp->rx_intr_mask);
1772 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1773 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1774
1775 if (napi_schedule_prep(&queue->napi)) {
1776 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1777 __napi_schedule(&queue->napi);
1778 }
1779 }
1780
1781 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1782 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1783 schedule_work(&queue->tx_error_task);
1784
1785 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1786 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1787
1788 break;
1789 }
1790
1791 if (status & MACB_BIT(TCOMP))
1792 macb_tx_interrupt(queue);
1793
1794 if (status & MACB_BIT(TXUBR))
1795 macb_tx_restart(queue);
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808 if (status & MACB_BIT(RXUBR)) {
1809 ctrl = macb_readl(bp, NCR);
1810 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1811 wmb();
1812 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1813
1814 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1815 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1816 }
1817
1818 if (status & MACB_BIT(ISR_ROVR)) {
1819
1820 if (macb_is_gem(bp))
1821 bp->hw_stats.gem.rx_overruns++;
1822 else
1823 bp->hw_stats.macb.rx_overruns++;
1824
1825 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1826 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1827 }
1828
1829 if (status & MACB_BIT(HRESP)) {
1830 tasklet_schedule(&bp->hresp_err_tasklet);
1831 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1832
1833 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1834 queue_writel(queue, ISR, MACB_BIT(HRESP));
1835 }
1836 status = queue_readl(queue, ISR);
1837 }
1838
1839 spin_unlock(&bp->lock);
1840
1841 return IRQ_HANDLED;
1842}
1843
1844#ifdef CONFIG_NET_POLL_CONTROLLER
1845
1846
1847
1848static void macb_poll_controller(struct net_device *dev)
1849{
1850 struct macb *bp = netdev_priv(dev);
1851 struct macb_queue *queue;
1852 unsigned long flags;
1853 unsigned int q;
1854
1855 local_irq_save(flags);
1856 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1857 macb_interrupt(dev->irq, queue);
1858 local_irq_restore(flags);
1859}
1860#endif
1861
1862static unsigned int macb_tx_map(struct macb *bp,
1863 struct macb_queue *queue,
1864 struct sk_buff *skb,
1865 unsigned int hdrlen)
1866{
1867 dma_addr_t mapping;
1868 unsigned int len, entry, i, tx_head = queue->tx_head;
1869 struct macb_tx_skb *tx_skb = NULL;
1870 struct macb_dma_desc *desc;
1871 unsigned int offset, size, count = 0;
1872 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1873 unsigned int eof = 1, mss_mfs = 0;
1874 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1875
1876
1877 if (skb_shinfo(skb)->gso_size != 0) {
1878 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1879
1880 lso_ctrl = MACB_LSO_UFO_ENABLE;
1881 else
1882
1883 lso_ctrl = MACB_LSO_TSO_ENABLE;
1884 }
1885
1886
1887 len = skb_headlen(skb);
1888
1889
1890 size = hdrlen;
1891
1892 offset = 0;
1893 while (len) {
1894 entry = macb_tx_ring_wrap(bp, tx_head);
1895 tx_skb = &queue->tx_skb[entry];
1896
1897 mapping = dma_map_single(&bp->pdev->dev,
1898 skb->data + offset,
1899 size, DMA_TO_DEVICE);
1900 if (dma_mapping_error(&bp->pdev->dev, mapping))
1901 goto dma_error;
1902
1903
1904 tx_skb->skb = NULL;
1905 tx_skb->mapping = mapping;
1906 tx_skb->size = size;
1907 tx_skb->mapped_as_page = false;
1908
1909 len -= size;
1910 offset += size;
1911 count++;
1912 tx_head++;
1913
1914 size = min(len, bp->max_tx_length);
1915 }
1916
1917
1918 for (f = 0; f < nr_frags; f++) {
1919 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1920
1921 len = skb_frag_size(frag);
1922 offset = 0;
1923 while (len) {
1924 size = min(len, bp->max_tx_length);
1925 entry = macb_tx_ring_wrap(bp, tx_head);
1926 tx_skb = &queue->tx_skb[entry];
1927
1928 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1929 offset, size, DMA_TO_DEVICE);
1930 if (dma_mapping_error(&bp->pdev->dev, mapping))
1931 goto dma_error;
1932
1933
1934 tx_skb->skb = NULL;
1935 tx_skb->mapping = mapping;
1936 tx_skb->size = size;
1937 tx_skb->mapped_as_page = true;
1938
1939 len -= size;
1940 offset += size;
1941 count++;
1942 tx_head++;
1943 }
1944 }
1945
1946
1947 if (unlikely(!tx_skb)) {
1948 netdev_err(bp->dev, "BUG! empty skb!\n");
1949 return 0;
1950 }
1951
1952
1953 tx_skb->skb = skb;
1954
1955
1956
1957
1958
1959
1960
1961
1962 i = tx_head;
1963 entry = macb_tx_ring_wrap(bp, i);
1964 ctrl = MACB_BIT(TX_USED);
1965 desc = macb_tx_desc(queue, entry);
1966 desc->ctrl = ctrl;
1967
1968 if (lso_ctrl) {
1969 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1970
1971 mss_mfs = skb_shinfo(skb)->gso_size +
1972 skb_transport_offset(skb) +
1973 ETH_FCS_LEN;
1974 else {
1975 mss_mfs = skb_shinfo(skb)->gso_size;
1976
1977
1978
1979 seq_ctrl = 0;
1980 }
1981 }
1982
1983 do {
1984 i--;
1985 entry = macb_tx_ring_wrap(bp, i);
1986 tx_skb = &queue->tx_skb[entry];
1987 desc = macb_tx_desc(queue, entry);
1988
1989 ctrl = (u32)tx_skb->size;
1990 if (eof) {
1991 ctrl |= MACB_BIT(TX_LAST);
1992 eof = 0;
1993 }
1994 if (unlikely(entry == (bp->tx_ring_size - 1)))
1995 ctrl |= MACB_BIT(TX_WRAP);
1996
1997
1998 if (i == queue->tx_head) {
1999 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
2000 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
2001 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
2002 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
2003 ctrl |= MACB_BIT(TX_NOCRC);
2004 } else
2005
2006
2007
2008 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
2009
2010
2011 macb_set_addr(bp, desc, tx_skb->mapping);
2012
2013
2014
2015 wmb();
2016 desc->ctrl = ctrl;
2017 } while (i != queue->tx_head);
2018
2019 queue->tx_head = tx_head;
2020
2021 return count;
2022
2023dma_error:
2024 netdev_err(bp->dev, "TX DMA map failed\n");
2025
2026 for (i = queue->tx_head; i != tx_head; i++) {
2027 tx_skb = macb_tx_skb(queue, i);
2028
2029 macb_tx_unmap(bp, tx_skb);
2030 }
2031
2032 return 0;
2033}
2034
2035static netdev_features_t macb_features_check(struct sk_buff *skb,
2036 struct net_device *dev,
2037 netdev_features_t features)
2038{
2039 unsigned int nr_frags, f;
2040 unsigned int hdrlen;
2041
2042
2043
2044
2045 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
2046 return features;
2047
2048
2049 hdrlen = skb_transport_offset(skb);
2050
2051
2052
2053
2054
2055 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
2056 return features & ~MACB_NETIF_LSO;
2057
2058 nr_frags = skb_shinfo(skb)->nr_frags;
2059
2060 nr_frags--;
2061 for (f = 0; f < nr_frags; f++) {
2062 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2063
2064 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
2065 return features & ~MACB_NETIF_LSO;
2066 }
2067 return features;
2068}
2069
2070static inline int macb_clear_csum(struct sk_buff *skb)
2071{
2072
2073 if (skb->ip_summed != CHECKSUM_PARTIAL)
2074 return 0;
2075
2076
2077 if (unlikely(skb_cow_head(skb, 0)))
2078 return -1;
2079
2080
2081
2082
2083
2084 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
2085 return 0;
2086}
2087
2088static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
2089{
2090 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
2091 skb_is_nonlinear(*skb);
2092 int padlen = ETH_ZLEN - (*skb)->len;
2093 int headroom = skb_headroom(*skb);
2094 int tailroom = skb_tailroom(*skb);
2095 struct sk_buff *nskb;
2096 u32 fcs;
2097
2098 if (!(ndev->features & NETIF_F_HW_CSUM) ||
2099 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
2100 skb_shinfo(*skb)->gso_size)
2101 return 0;
2102
2103 if (padlen <= 0) {
2104
2105 if (tailroom >= ETH_FCS_LEN)
2106 goto add_fcs;
2107
2108 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
2109 padlen = 0;
2110
2111 else
2112 padlen = ETH_FCS_LEN;
2113 } else {
2114
2115 padlen += ETH_FCS_LEN;
2116 }
2117
2118 if (!cloned && headroom + tailroom >= padlen) {
2119 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
2120 skb_set_tail_pointer(*skb, (*skb)->len);
2121 } else {
2122 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
2123 if (!nskb)
2124 return -ENOMEM;
2125
2126 dev_consume_skb_any(*skb);
2127 *skb = nskb;
2128 }
2129
2130 if (padlen > ETH_FCS_LEN)
2131 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
2132
2133add_fcs:
2134
2135 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
2136 fcs = ~fcs;
2137
2138 skb_put_u8(*skb, fcs & 0xff);
2139 skb_put_u8(*skb, (fcs >> 8) & 0xff);
2140 skb_put_u8(*skb, (fcs >> 16) & 0xff);
2141 skb_put_u8(*skb, (fcs >> 24) & 0xff);
2142
2143 return 0;
2144}
2145
2146static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
2147{
2148 u16 queue_index = skb_get_queue_mapping(skb);
2149 struct macb *bp = netdev_priv(dev);
2150 struct macb_queue *queue = &bp->queues[queue_index];
2151 unsigned long flags;
2152 unsigned int desc_cnt, nr_frags, frag_size, f;
2153 unsigned int hdrlen;
2154 bool is_lso;
2155 netdev_tx_t ret = NETDEV_TX_OK;
2156
2157 if (macb_clear_csum(skb)) {
2158 dev_kfree_skb_any(skb);
2159 return ret;
2160 }
2161
2162 if (macb_pad_and_fcs(&skb, dev)) {
2163 dev_kfree_skb_any(skb);
2164 return ret;
2165 }
2166
2167 is_lso = (skb_shinfo(skb)->gso_size != 0);
2168
2169 if (is_lso) {
2170
2171 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2172
2173 hdrlen = skb_transport_offset(skb);
2174 else
2175 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
2176 if (skb_headlen(skb) < hdrlen) {
2177 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2178
2179 return NETDEV_TX_BUSY;
2180 }
2181 } else
2182 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
2183
2184#if defined(DEBUG) && defined(VERBOSE_DEBUG)
2185 netdev_vdbg(bp->dev,
2186 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2187 queue_index, skb->len, skb->head, skb->data,
2188 skb_tail_pointer(skb), skb_end_pointer(skb));
2189 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
2190 skb->data, 16, true);
2191#endif
2192
2193
2194
2195
2196
2197 if (is_lso && (skb_headlen(skb) > hdrlen))
2198
2199 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2200 else
2201 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
2202 nr_frags = skb_shinfo(skb)->nr_frags;
2203 for (f = 0; f < nr_frags; f++) {
2204 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
2205 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
2206 }
2207
2208 spin_lock_irqsave(&bp->lock, flags);
2209
2210
2211 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2212 bp->tx_ring_size) < desc_cnt) {
2213 netif_stop_subqueue(dev, queue_index);
2214 spin_unlock_irqrestore(&bp->lock, flags);
2215 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2216 queue->tx_head, queue->tx_tail);
2217 return NETDEV_TX_BUSY;
2218 }
2219
2220
2221 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
2222 dev_kfree_skb_any(skb);
2223 goto unlock;
2224 }
2225
2226
2227 wmb();
2228 skb_tx_timestamp(skb);
2229
2230 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2231
2232 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2233 netif_stop_subqueue(dev, queue_index);
2234
2235unlock:
2236 spin_unlock_irqrestore(&bp->lock, flags);
2237
2238 return ret;
2239}
2240
2241static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
2242{
2243 if (!macb_is_gem(bp)) {
2244 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2245 } else {
2246 bp->rx_buffer_size = size;
2247
2248 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2249 netdev_dbg(bp->dev,
2250 "RX buffer must be multiple of %d bytes, expanding\n",
2251 RX_BUFFER_MULTIPLE);
2252 bp->rx_buffer_size =
2253 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2254 }
2255 }
2256
2257 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2258 bp->dev->mtu, bp->rx_buffer_size);
2259}
2260
2261static void gem_free_rx_buffers(struct macb *bp)
2262{
2263 struct sk_buff *skb;
2264 struct macb_dma_desc *desc;
2265 struct macb_queue *queue;
2266 dma_addr_t addr;
2267 unsigned int q;
2268 int i;
2269
2270 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2271 if (!queue->rx_skbuff)
2272 continue;
2273
2274 for (i = 0; i < bp->rx_ring_size; i++) {
2275 skb = queue->rx_skbuff[i];
2276
2277 if (!skb)
2278 continue;
2279
2280 desc = macb_rx_desc(queue, i);
2281 addr = macb_get_addr(bp, desc);
2282
2283 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2284 DMA_FROM_DEVICE);
2285 dev_kfree_skb_any(skb);
2286 skb = NULL;
2287 }
2288
2289 kfree(queue->rx_skbuff);
2290 queue->rx_skbuff = NULL;
2291 }
2292}
2293
2294static void macb_free_rx_buffers(struct macb *bp)
2295{
2296 struct macb_queue *queue = &bp->queues[0];
2297
2298 if (queue->rx_buffers) {
2299 dma_free_coherent(&bp->pdev->dev,
2300 bp->rx_ring_size * bp->rx_buffer_size,
2301 queue->rx_buffers, queue->rx_buffers_dma);
2302 queue->rx_buffers = NULL;
2303 }
2304}
2305
2306static void macb_free_consistent(struct macb *bp)
2307{
2308 struct macb_queue *queue;
2309 unsigned int q;
2310 int size;
2311
2312 bp->macbgem_ops.mog_free_rx_buffers(bp);
2313
2314 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2315 kfree(queue->tx_skb);
2316 queue->tx_skb = NULL;
2317 if (queue->tx_ring) {
2318 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2319 dma_free_coherent(&bp->pdev->dev, size,
2320 queue->tx_ring, queue->tx_ring_dma);
2321 queue->tx_ring = NULL;
2322 }
2323 if (queue->rx_ring) {
2324 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2325 dma_free_coherent(&bp->pdev->dev, size,
2326 queue->rx_ring, queue->rx_ring_dma);
2327 queue->rx_ring = NULL;
2328 }
2329 }
2330}
2331
2332static int gem_alloc_rx_buffers(struct macb *bp)
2333{
2334 struct macb_queue *queue;
2335 unsigned int q;
2336 int size;
2337
2338 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2339 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2340 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2341 if (!queue->rx_skbuff)
2342 return -ENOMEM;
2343 else
2344 netdev_dbg(bp->dev,
2345 "Allocated %d RX struct sk_buff entries at %p\n",
2346 bp->rx_ring_size, queue->rx_skbuff);
2347 }
2348 return 0;
2349}
2350
2351static int macb_alloc_rx_buffers(struct macb *bp)
2352{
2353 struct macb_queue *queue = &bp->queues[0];
2354 int size;
2355
2356 size = bp->rx_ring_size * bp->rx_buffer_size;
2357 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2358 &queue->rx_buffers_dma, GFP_KERNEL);
2359 if (!queue->rx_buffers)
2360 return -ENOMEM;
2361
2362 netdev_dbg(bp->dev,
2363 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2364 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2365 return 0;
2366}
2367
2368static int macb_alloc_consistent(struct macb *bp)
2369{
2370 struct macb_queue *queue;
2371 unsigned int q;
2372 int size;
2373
2374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2375 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2376 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2377 &queue->tx_ring_dma,
2378 GFP_KERNEL);
2379 if (!queue->tx_ring)
2380 goto out_err;
2381 netdev_dbg(bp->dev,
2382 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2383 q, size, (unsigned long)queue->tx_ring_dma,
2384 queue->tx_ring);
2385
2386 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2387 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2388 if (!queue->tx_skb)
2389 goto out_err;
2390
2391 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2392 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2393 &queue->rx_ring_dma, GFP_KERNEL);
2394 if (!queue->rx_ring)
2395 goto out_err;
2396 netdev_dbg(bp->dev,
2397 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2398 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
2399 }
2400 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2401 goto out_err;
2402
2403 return 0;
2404
2405out_err:
2406 macb_free_consistent(bp);
2407 return -ENOMEM;
2408}
2409
2410static void gem_init_rings(struct macb *bp)
2411{
2412 struct macb_queue *queue;
2413 struct macb_dma_desc *desc = NULL;
2414 unsigned int q;
2415 int i;
2416
2417 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2418 for (i = 0; i < bp->tx_ring_size; i++) {
2419 desc = macb_tx_desc(queue, i);
2420 macb_set_addr(bp, desc, 0);
2421 desc->ctrl = MACB_BIT(TX_USED);
2422 }
2423 desc->ctrl |= MACB_BIT(TX_WRAP);
2424 queue->tx_head = 0;
2425 queue->tx_tail = 0;
2426
2427 queue->rx_tail = 0;
2428 queue->rx_prepared_head = 0;
2429
2430 gem_rx_refill(queue);
2431 }
2432
2433}
2434
2435static void macb_init_rings(struct macb *bp)
2436{
2437 int i;
2438 struct macb_dma_desc *desc = NULL;
2439
2440 macb_init_rx_ring(&bp->queues[0]);
2441
2442 for (i = 0; i < bp->tx_ring_size; i++) {
2443 desc = macb_tx_desc(&bp->queues[0], i);
2444 macb_set_addr(bp, desc, 0);
2445 desc->ctrl = MACB_BIT(TX_USED);
2446 }
2447 bp->queues[0].tx_head = 0;
2448 bp->queues[0].tx_tail = 0;
2449 desc->ctrl |= MACB_BIT(TX_WRAP);
2450}
2451
2452static void macb_reset_hw(struct macb *bp)
2453{
2454 struct macb_queue *queue;
2455 unsigned int q;
2456 u32 ctrl = macb_readl(bp, NCR);
2457
2458
2459
2460
2461 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2462
2463
2464 ctrl |= MACB_BIT(CLRSTAT);
2465
2466 macb_writel(bp, NCR, ctrl);
2467
2468
2469 macb_writel(bp, TSR, -1);
2470 macb_writel(bp, RSR, -1);
2471
2472
2473 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2474 queue_writel(queue, IDR, -1);
2475 queue_readl(queue, ISR);
2476 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2477 queue_writel(queue, ISR, -1);
2478 }
2479}
2480
2481static u32 gem_mdc_clk_div(struct macb *bp)
2482{
2483 u32 config;
2484 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2485
2486 if (pclk_hz <= 20000000)
2487 config = GEM_BF(CLK, GEM_CLK_DIV8);
2488 else if (pclk_hz <= 40000000)
2489 config = GEM_BF(CLK, GEM_CLK_DIV16);
2490 else if (pclk_hz <= 80000000)
2491 config = GEM_BF(CLK, GEM_CLK_DIV32);
2492 else if (pclk_hz <= 120000000)
2493 config = GEM_BF(CLK, GEM_CLK_DIV48);
2494 else if (pclk_hz <= 160000000)
2495 config = GEM_BF(CLK, GEM_CLK_DIV64);
2496 else
2497 config = GEM_BF(CLK, GEM_CLK_DIV96);
2498
2499 return config;
2500}
2501
2502static u32 macb_mdc_clk_div(struct macb *bp)
2503{
2504 u32 config;
2505 unsigned long pclk_hz;
2506
2507 if (macb_is_gem(bp))
2508 return gem_mdc_clk_div(bp);
2509
2510 pclk_hz = clk_get_rate(bp->pclk);
2511 if (pclk_hz <= 20000000)
2512 config = MACB_BF(CLK, MACB_CLK_DIV8);
2513 else if (pclk_hz <= 40000000)
2514 config = MACB_BF(CLK, MACB_CLK_DIV16);
2515 else if (pclk_hz <= 80000000)
2516 config = MACB_BF(CLK, MACB_CLK_DIV32);
2517 else
2518 config = MACB_BF(CLK, MACB_CLK_DIV64);
2519
2520 return config;
2521}
2522
2523
2524
2525
2526
2527static u32 macb_dbw(struct macb *bp)
2528{
2529 if (!macb_is_gem(bp))
2530 return 0;
2531
2532 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2533 case 4:
2534 return GEM_BF(DBW, GEM_DBW128);
2535 case 2:
2536 return GEM_BF(DBW, GEM_DBW64);
2537 case 1:
2538 default:
2539 return GEM_BF(DBW, GEM_DBW32);
2540 }
2541}
2542
2543
2544
2545
2546
2547
2548
2549
2550static void macb_configure_dma(struct macb *bp)
2551{
2552 struct macb_queue *queue;
2553 u32 buffer_size;
2554 unsigned int q;
2555 u32 dmacfg;
2556
2557 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2558 if (macb_is_gem(bp)) {
2559 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2560 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2561 if (q)
2562 queue_writel(queue, RBQS, buffer_size);
2563 else
2564 dmacfg |= GEM_BF(RXBS, buffer_size);
2565 }
2566 if (bp->dma_burst_length)
2567 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2568 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2569 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2570
2571 if (bp->native_io)
2572 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2573 else
2574 dmacfg |= GEM_BIT(ENDIA_DESC);
2575
2576 if (bp->dev->features & NETIF_F_HW_CSUM)
2577 dmacfg |= GEM_BIT(TXCOEN);
2578 else
2579 dmacfg &= ~GEM_BIT(TXCOEN);
2580
2581 dmacfg &= ~GEM_BIT(ADDR64);
2582#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2583 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2584 dmacfg |= GEM_BIT(ADDR64);
2585#endif
2586#ifdef CONFIG_MACB_USE_HWSTAMP
2587 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2588 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2589#endif
2590 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2591 dmacfg);
2592 gem_writel(bp, DMACFG, dmacfg);
2593 }
2594}
2595
2596static void macb_init_hw(struct macb *bp)
2597{
2598 u32 config;
2599
2600 macb_reset_hw(bp);
2601 macb_set_hwaddr(bp);
2602
2603 config = macb_mdc_clk_div(bp);
2604 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2605 config |= MACB_BIT(DRFCS);
2606 if (bp->caps & MACB_CAPS_JUMBO)
2607 config |= MACB_BIT(JFRAME);
2608 else
2609 config |= MACB_BIT(BIG);
2610 if (bp->dev->flags & IFF_PROMISC)
2611 config |= MACB_BIT(CAF);
2612 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2613 config |= GEM_BIT(RXCOEN);
2614 if (!(bp->dev->flags & IFF_BROADCAST))
2615 config |= MACB_BIT(NBC);
2616 config |= macb_dbw(bp);
2617 macb_writel(bp, NCFGR, config);
2618 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2619 gem_writel(bp, JML, bp->jumbo_max_len);
2620 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2621 if (bp->caps & MACB_CAPS_JUMBO)
2622 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2623
2624 macb_configure_dma(bp);
2625}
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660static inline int hash_bit_value(int bitnr, __u8 *addr)
2661{
2662 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2663 return 1;
2664 return 0;
2665}
2666
2667
2668static int hash_get_index(__u8 *addr)
2669{
2670 int i, j, bitval;
2671 int hash_index = 0;
2672
2673 for (j = 0; j < 6; j++) {
2674 for (i = 0, bitval = 0; i < 8; i++)
2675 bitval ^= hash_bit_value(i * 6 + j, addr);
2676
2677 hash_index |= (bitval << j);
2678 }
2679
2680 return hash_index;
2681}
2682
2683
2684static void macb_sethashtable(struct net_device *dev)
2685{
2686 struct netdev_hw_addr *ha;
2687 unsigned long mc_filter[2];
2688 unsigned int bitnr;
2689 struct macb *bp = netdev_priv(dev);
2690
2691 mc_filter[0] = 0;
2692 mc_filter[1] = 0;
2693
2694 netdev_for_each_mc_addr(ha, dev) {
2695 bitnr = hash_get_index(ha->addr);
2696 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2697 }
2698
2699 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2700 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2701}
2702
2703
2704static void macb_set_rx_mode(struct net_device *dev)
2705{
2706 unsigned long cfg;
2707 struct macb *bp = netdev_priv(dev);
2708
2709 cfg = macb_readl(bp, NCFGR);
2710
2711 if (dev->flags & IFF_PROMISC) {
2712
2713 cfg |= MACB_BIT(CAF);
2714
2715
2716 if (macb_is_gem(bp))
2717 cfg &= ~GEM_BIT(RXCOEN);
2718 } else {
2719
2720 cfg &= ~MACB_BIT(CAF);
2721
2722
2723 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2724 cfg |= GEM_BIT(RXCOEN);
2725 }
2726
2727 if (dev->flags & IFF_ALLMULTI) {
2728
2729 macb_or_gem_writel(bp, HRB, -1);
2730 macb_or_gem_writel(bp, HRT, -1);
2731 cfg |= MACB_BIT(NCFGR_MTI);
2732 } else if (!netdev_mc_empty(dev)) {
2733
2734 macb_sethashtable(dev);
2735 cfg |= MACB_BIT(NCFGR_MTI);
2736 } else if (dev->flags & (~IFF_ALLMULTI)) {
2737
2738 macb_or_gem_writel(bp, HRB, 0);
2739 macb_or_gem_writel(bp, HRT, 0);
2740 cfg &= ~MACB_BIT(NCFGR_MTI);
2741 }
2742
2743 macb_writel(bp, NCFGR, cfg);
2744}
2745
2746static int macb_open(struct net_device *dev)
2747{
2748 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2749 struct macb *bp = netdev_priv(dev);
2750 struct macb_queue *queue;
2751 unsigned int q;
2752 int err;
2753
2754 netdev_dbg(bp->dev, "open\n");
2755
2756 err = pm_runtime_get_sync(&bp->pdev->dev);
2757 if (err < 0)
2758 goto pm_exit;
2759
2760
2761 macb_init_rx_buffer_size(bp, bufsz);
2762
2763 err = macb_alloc_consistent(bp);
2764 if (err) {
2765 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2766 err);
2767 goto pm_exit;
2768 }
2769
2770 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2771 napi_enable(&queue->napi);
2772
2773 macb_init_hw(bp);
2774
2775 err = macb_phylink_connect(bp);
2776 if (err)
2777 goto reset_hw;
2778
2779 netif_tx_start_all_queues(dev);
2780
2781 if (bp->ptp_info)
2782 bp->ptp_info->ptp_init(dev);
2783
2784 return 0;
2785
2786reset_hw:
2787 macb_reset_hw(bp);
2788 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2789 napi_disable(&queue->napi);
2790 macb_free_consistent(bp);
2791pm_exit:
2792 pm_runtime_put_sync(&bp->pdev->dev);
2793 return err;
2794}
2795
2796static int macb_close(struct net_device *dev)
2797{
2798 struct macb *bp = netdev_priv(dev);
2799 struct macb_queue *queue;
2800 unsigned long flags;
2801 unsigned int q;
2802
2803 netif_tx_stop_all_queues(dev);
2804
2805 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2806 napi_disable(&queue->napi);
2807
2808 phylink_stop(bp->phylink);
2809 phylink_disconnect_phy(bp->phylink);
2810
2811 spin_lock_irqsave(&bp->lock, flags);
2812 macb_reset_hw(bp);
2813 netif_carrier_off(dev);
2814 spin_unlock_irqrestore(&bp->lock, flags);
2815
2816 macb_free_consistent(bp);
2817
2818 if (bp->ptp_info)
2819 bp->ptp_info->ptp_remove(dev);
2820
2821 pm_runtime_put(&bp->pdev->dev);
2822
2823 return 0;
2824}
2825
2826static int macb_change_mtu(struct net_device *dev, int new_mtu)
2827{
2828 if (netif_running(dev))
2829 return -EBUSY;
2830
2831 dev->mtu = new_mtu;
2832
2833 return 0;
2834}
2835
2836static void gem_update_stats(struct macb *bp)
2837{
2838 struct macb_queue *queue;
2839 unsigned int i, q, idx;
2840 unsigned long *stat;
2841
2842 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2843
2844 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2845 u32 offset = gem_statistics[i].offset;
2846 u64 val = bp->macb_reg_readl(bp, offset);
2847
2848 bp->ethtool_stats[i] += val;
2849 *p += val;
2850
2851 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2852
2853 val = bp->macb_reg_readl(bp, offset + 4);
2854 bp->ethtool_stats[i] += ((u64)val) << 32;
2855 *(++p) += val;
2856 }
2857 }
2858
2859 idx = GEM_STATS_LEN;
2860 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2861 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2862 bp->ethtool_stats[idx++] = *stat;
2863}
2864
2865static struct net_device_stats *gem_get_stats(struct macb *bp)
2866{
2867 struct gem_stats *hwstat = &bp->hw_stats.gem;
2868 struct net_device_stats *nstat = &bp->dev->stats;
2869
2870 if (!netif_running(bp->dev))
2871 return nstat;
2872
2873 gem_update_stats(bp);
2874
2875 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2876 hwstat->rx_alignment_errors +
2877 hwstat->rx_resource_errors +
2878 hwstat->rx_overruns +
2879 hwstat->rx_oversize_frames +
2880 hwstat->rx_jabbers +
2881 hwstat->rx_undersized_frames +
2882 hwstat->rx_length_field_frame_errors);
2883 nstat->tx_errors = (hwstat->tx_late_collisions +
2884 hwstat->tx_excessive_collisions +
2885 hwstat->tx_underrun +
2886 hwstat->tx_carrier_sense_errors);
2887 nstat->multicast = hwstat->rx_multicast_frames;
2888 nstat->collisions = (hwstat->tx_single_collision_frames +
2889 hwstat->tx_multiple_collision_frames +
2890 hwstat->tx_excessive_collisions);
2891 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2892 hwstat->rx_jabbers +
2893 hwstat->rx_undersized_frames +
2894 hwstat->rx_length_field_frame_errors);
2895 nstat->rx_over_errors = hwstat->rx_resource_errors;
2896 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2897 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2898 nstat->rx_fifo_errors = hwstat->rx_overruns;
2899 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2900 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2901 nstat->tx_fifo_errors = hwstat->tx_underrun;
2902
2903 return nstat;
2904}
2905
2906static void gem_get_ethtool_stats(struct net_device *dev,
2907 struct ethtool_stats *stats, u64 *data)
2908{
2909 struct macb *bp;
2910
2911 bp = netdev_priv(dev);
2912 gem_update_stats(bp);
2913 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2914 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2915}
2916
2917static int gem_get_sset_count(struct net_device *dev, int sset)
2918{
2919 struct macb *bp = netdev_priv(dev);
2920
2921 switch (sset) {
2922 case ETH_SS_STATS:
2923 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2924 default:
2925 return -EOPNOTSUPP;
2926 }
2927}
2928
2929static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2930{
2931 char stat_string[ETH_GSTRING_LEN];
2932 struct macb *bp = netdev_priv(dev);
2933 struct macb_queue *queue;
2934 unsigned int i;
2935 unsigned int q;
2936
2937 switch (sset) {
2938 case ETH_SS_STATS:
2939 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2940 memcpy(p, gem_statistics[i].stat_string,
2941 ETH_GSTRING_LEN);
2942
2943 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2944 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2945 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2946 q, queue_statistics[i].stat_string);
2947 memcpy(p, stat_string, ETH_GSTRING_LEN);
2948 }
2949 }
2950 break;
2951 }
2952}
2953
2954static struct net_device_stats *macb_get_stats(struct net_device *dev)
2955{
2956 struct macb *bp = netdev_priv(dev);
2957 struct net_device_stats *nstat = &bp->dev->stats;
2958 struct macb_stats *hwstat = &bp->hw_stats.macb;
2959
2960 if (macb_is_gem(bp))
2961 return gem_get_stats(bp);
2962
2963
2964 macb_update_stats(bp);
2965
2966
2967 nstat->rx_errors = (hwstat->rx_fcs_errors +
2968 hwstat->rx_align_errors +
2969 hwstat->rx_resource_errors +
2970 hwstat->rx_overruns +
2971 hwstat->rx_oversize_pkts +
2972 hwstat->rx_jabbers +
2973 hwstat->rx_undersize_pkts +
2974 hwstat->rx_length_mismatch);
2975 nstat->tx_errors = (hwstat->tx_late_cols +
2976 hwstat->tx_excessive_cols +
2977 hwstat->tx_underruns +
2978 hwstat->tx_carrier_errors +
2979 hwstat->sqe_test_errors);
2980 nstat->collisions = (hwstat->tx_single_cols +
2981 hwstat->tx_multiple_cols +
2982 hwstat->tx_excessive_cols);
2983 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2984 hwstat->rx_jabbers +
2985 hwstat->rx_undersize_pkts +
2986 hwstat->rx_length_mismatch);
2987 nstat->rx_over_errors = hwstat->rx_resource_errors +
2988 hwstat->rx_overruns;
2989 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2990 nstat->rx_frame_errors = hwstat->rx_align_errors;
2991 nstat->rx_fifo_errors = hwstat->rx_overruns;
2992
2993 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2994 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2995 nstat->tx_fifo_errors = hwstat->tx_underruns;
2996
2997
2998 return nstat;
2999}
3000
3001static int macb_get_regs_len(struct net_device *netdev)
3002{
3003 return MACB_GREGS_NBR * sizeof(u32);
3004}
3005
3006static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3007 void *p)
3008{
3009 struct macb *bp = netdev_priv(dev);
3010 unsigned int tail, head;
3011 u32 *regs_buff = p;
3012
3013 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
3014 | MACB_GREGS_VERSION;
3015
3016 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
3017 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
3018
3019 regs_buff[0] = macb_readl(bp, NCR);
3020 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
3021 regs_buff[2] = macb_readl(bp, NSR);
3022 regs_buff[3] = macb_readl(bp, TSR);
3023 regs_buff[4] = macb_readl(bp, RBQP);
3024 regs_buff[5] = macb_readl(bp, TBQP);
3025 regs_buff[6] = macb_readl(bp, RSR);
3026 regs_buff[7] = macb_readl(bp, IMR);
3027
3028 regs_buff[8] = tail;
3029 regs_buff[9] = head;
3030 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
3031 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
3032
3033 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
3034 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
3035 if (macb_is_gem(bp))
3036 regs_buff[13] = gem_readl(bp, DMACFG);
3037}
3038
3039static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3040{
3041 struct macb *bp = netdev_priv(netdev);
3042
3043 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
3044 phylink_ethtool_get_wol(bp->phylink, wol);
3045 wol->supported |= WAKE_MAGIC;
3046
3047 if (bp->wol & MACB_WOL_ENABLED)
3048 wol->wolopts |= WAKE_MAGIC;
3049 }
3050}
3051
3052static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3053{
3054 struct macb *bp = netdev_priv(netdev);
3055 int ret;
3056
3057
3058 ret = phylink_ethtool_set_wol(bp->phylink, wol);
3059
3060
3061
3062 if (!ret || ret != -EOPNOTSUPP)
3063 return ret;
3064
3065 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
3066 (wol->wolopts & ~WAKE_MAGIC))
3067 return -EOPNOTSUPP;
3068
3069 if (wol->wolopts & WAKE_MAGIC)
3070 bp->wol |= MACB_WOL_ENABLED;
3071 else
3072 bp->wol &= ~MACB_WOL_ENABLED;
3073
3074 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
3075
3076 return 0;
3077}
3078
3079static int macb_get_link_ksettings(struct net_device *netdev,
3080 struct ethtool_link_ksettings *kset)
3081{
3082 struct macb *bp = netdev_priv(netdev);
3083
3084 return phylink_ethtool_ksettings_get(bp->phylink, kset);
3085}
3086
3087static int macb_set_link_ksettings(struct net_device *netdev,
3088 const struct ethtool_link_ksettings *kset)
3089{
3090 struct macb *bp = netdev_priv(netdev);
3091
3092 return phylink_ethtool_ksettings_set(bp->phylink, kset);
3093}
3094
3095static void macb_get_ringparam(struct net_device *netdev,
3096 struct ethtool_ringparam *ring)
3097{
3098 struct macb *bp = netdev_priv(netdev);
3099
3100 ring->rx_max_pending = MAX_RX_RING_SIZE;
3101 ring->tx_max_pending = MAX_TX_RING_SIZE;
3102
3103 ring->rx_pending = bp->rx_ring_size;
3104 ring->tx_pending = bp->tx_ring_size;
3105}
3106
3107static int macb_set_ringparam(struct net_device *netdev,
3108 struct ethtool_ringparam *ring)
3109{
3110 struct macb *bp = netdev_priv(netdev);
3111 u32 new_rx_size, new_tx_size;
3112 unsigned int reset = 0;
3113
3114 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
3115 return -EINVAL;
3116
3117 new_rx_size = clamp_t(u32, ring->rx_pending,
3118 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
3119 new_rx_size = roundup_pow_of_two(new_rx_size);
3120
3121 new_tx_size = clamp_t(u32, ring->tx_pending,
3122 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
3123 new_tx_size = roundup_pow_of_two(new_tx_size);
3124
3125 if ((new_tx_size == bp->tx_ring_size) &&
3126 (new_rx_size == bp->rx_ring_size)) {
3127
3128 return 0;
3129 }
3130
3131 if (netif_running(bp->dev)) {
3132 reset = 1;
3133 macb_close(bp->dev);
3134 }
3135
3136 bp->rx_ring_size = new_rx_size;
3137 bp->tx_ring_size = new_tx_size;
3138
3139 if (reset)
3140 macb_open(bp->dev);
3141
3142 return 0;
3143}
3144
3145#ifdef CONFIG_MACB_USE_HWSTAMP
3146static unsigned int gem_get_tsu_rate(struct macb *bp)
3147{
3148 struct clk *tsu_clk;
3149 unsigned int tsu_rate;
3150
3151 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
3152 if (!IS_ERR(tsu_clk))
3153 tsu_rate = clk_get_rate(tsu_clk);
3154
3155 else if (!IS_ERR(bp->pclk)) {
3156 tsu_clk = bp->pclk;
3157 tsu_rate = clk_get_rate(tsu_clk);
3158 } else
3159 return -ENOTSUPP;
3160 return tsu_rate;
3161}
3162
3163static s32 gem_get_ptp_max_adj(void)
3164{
3165 return 64000000;
3166}
3167
3168static int gem_get_ts_info(struct net_device *dev,
3169 struct ethtool_ts_info *info)
3170{
3171 struct macb *bp = netdev_priv(dev);
3172
3173 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
3174 ethtool_op_get_ts_info(dev, info);
3175 return 0;
3176 }
3177
3178 info->so_timestamping =
3179 SOF_TIMESTAMPING_TX_SOFTWARE |
3180 SOF_TIMESTAMPING_RX_SOFTWARE |
3181 SOF_TIMESTAMPING_SOFTWARE |
3182 SOF_TIMESTAMPING_TX_HARDWARE |
3183 SOF_TIMESTAMPING_RX_HARDWARE |
3184 SOF_TIMESTAMPING_RAW_HARDWARE;
3185 info->tx_types =
3186 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
3187 (1 << HWTSTAMP_TX_OFF) |
3188 (1 << HWTSTAMP_TX_ON);
3189 info->rx_filters =
3190 (1 << HWTSTAMP_FILTER_NONE) |
3191 (1 << HWTSTAMP_FILTER_ALL);
3192
3193 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
3194
3195 return 0;
3196}
3197
3198static struct macb_ptp_info gem_ptp_info = {
3199 .ptp_init = gem_ptp_init,
3200 .ptp_remove = gem_ptp_remove,
3201 .get_ptp_max_adj = gem_get_ptp_max_adj,
3202 .get_tsu_rate = gem_get_tsu_rate,
3203 .get_ts_info = gem_get_ts_info,
3204 .get_hwtst = gem_get_hwtst,
3205 .set_hwtst = gem_set_hwtst,
3206};
3207#endif
3208
3209static int macb_get_ts_info(struct net_device *netdev,
3210 struct ethtool_ts_info *info)
3211{
3212 struct macb *bp = netdev_priv(netdev);
3213
3214 if (bp->ptp_info)
3215 return bp->ptp_info->get_ts_info(netdev, info);
3216
3217 return ethtool_op_get_ts_info(netdev, info);
3218}
3219
3220static void gem_enable_flow_filters(struct macb *bp, bool enable)
3221{
3222 struct net_device *netdev = bp->dev;
3223 struct ethtool_rx_fs_item *item;
3224 u32 t2_scr;
3225 int num_t2_scr;
3226
3227 if (!(netdev->features & NETIF_F_NTUPLE))
3228 return;
3229
3230 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3231
3232 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3233 struct ethtool_rx_flow_spec *fs = &item->fs;
3234 struct ethtool_tcpip4_spec *tp4sp_m;
3235
3236 if (fs->location >= num_t2_scr)
3237 continue;
3238
3239 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3240
3241
3242 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
3243
3244
3245 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3246
3247 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3248 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
3249 else
3250 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
3251
3252 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3253 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
3254 else
3255 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
3256
3257 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3258 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
3259 else
3260 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
3261
3262 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3263 }
3264}
3265
3266static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3267{
3268 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
3269 uint16_t index = fs->location;
3270 u32 w0, w1, t2_scr;
3271 bool cmp_a = false;
3272 bool cmp_b = false;
3273 bool cmp_c = false;
3274
3275 if (!macb_is_gem(bp))
3276 return;
3277
3278 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3279 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3280
3281
3282 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3283
3284 w0 = 0;
3285 w1 = 0;
3286 w0 = tp4sp_v->ip4src;
3287 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3288 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3289 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
3290 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3291 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3292 cmp_a = true;
3293 }
3294
3295
3296 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3297
3298 w0 = 0;
3299 w1 = 0;
3300 w0 = tp4sp_v->ip4dst;
3301 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3302 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3303 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3304 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3305 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3306 cmp_b = true;
3307 }
3308
3309
3310 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3311
3312 w0 = 0;
3313 w1 = 0;
3314 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3315 if (tp4sp_m->psrc == tp4sp_m->pdst) {
3316 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3317 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3318 w1 = GEM_BFINS(T2DISMSK, 1, w1);
3319 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3320 } else {
3321
3322 w1 = GEM_BFINS(T2DISMSK, 0, w1);
3323 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3324 if (tp4sp_m->psrc == 0xFFFF) {
3325 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3326 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3327 } else {
3328 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3329 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3330 }
3331 }
3332 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3333 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3334 cmp_c = true;
3335 }
3336
3337 t2_scr = 0;
3338 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3339 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3340 if (cmp_a)
3341 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3342 if (cmp_b)
3343 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3344 if (cmp_c)
3345 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3346 gem_writel_n(bp, SCRT2, index, t2_scr);
3347}
3348
3349static int gem_add_flow_filter(struct net_device *netdev,
3350 struct ethtool_rxnfc *cmd)
3351{
3352 struct macb *bp = netdev_priv(netdev);
3353 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3354 struct ethtool_rx_fs_item *item, *newfs;
3355 unsigned long flags;
3356 int ret = -EINVAL;
3357 bool added = false;
3358
3359 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
3360 if (newfs == NULL)
3361 return -ENOMEM;
3362 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3363
3364 netdev_dbg(netdev,
3365 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3366 fs->flow_type, (int)fs->ring_cookie, fs->location,
3367 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3368 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3369 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
3370
3371 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3372
3373
3374 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3375 if (item->fs.location > newfs->fs.location) {
3376 list_add_tail(&newfs->list, &item->list);
3377 added = true;
3378 break;
3379 } else if (item->fs.location == fs->location) {
3380 netdev_err(netdev, "Rule not added: location %d not free!\n",
3381 fs->location);
3382 ret = -EBUSY;
3383 goto err;
3384 }
3385 }
3386 if (!added)
3387 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3388
3389 gem_prog_cmp_regs(bp, fs);
3390 bp->rx_fs_list.count++;
3391
3392 gem_enable_flow_filters(bp, 1);
3393
3394 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3395 return 0;
3396
3397err:
3398 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3399 kfree(newfs);
3400 return ret;
3401}
3402
3403static int gem_del_flow_filter(struct net_device *netdev,
3404 struct ethtool_rxnfc *cmd)
3405{
3406 struct macb *bp = netdev_priv(netdev);
3407 struct ethtool_rx_fs_item *item;
3408 struct ethtool_rx_flow_spec *fs;
3409 unsigned long flags;
3410
3411 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3412
3413 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3414 if (item->fs.location == cmd->fs.location) {
3415
3416 fs = &(item->fs);
3417 netdev_dbg(netdev,
3418 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3419 fs->flow_type, (int)fs->ring_cookie, fs->location,
3420 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3421 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3422 htons(fs->h_u.tcp_ip4_spec.psrc),
3423 htons(fs->h_u.tcp_ip4_spec.pdst));
3424
3425 gem_writel_n(bp, SCRT2, fs->location, 0);
3426
3427 list_del(&item->list);
3428 bp->rx_fs_list.count--;
3429 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3430 kfree(item);
3431 return 0;
3432 }
3433 }
3434
3435 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3436 return -EINVAL;
3437}
3438
3439static int gem_get_flow_entry(struct net_device *netdev,
3440 struct ethtool_rxnfc *cmd)
3441{
3442 struct macb *bp = netdev_priv(netdev);
3443 struct ethtool_rx_fs_item *item;
3444
3445 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3446 if (item->fs.location == cmd->fs.location) {
3447 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3448 return 0;
3449 }
3450 }
3451 return -EINVAL;
3452}
3453
3454static int gem_get_all_flow_entries(struct net_device *netdev,
3455 struct ethtool_rxnfc *cmd, u32 *rule_locs)
3456{
3457 struct macb *bp = netdev_priv(netdev);
3458 struct ethtool_rx_fs_item *item;
3459 uint32_t cnt = 0;
3460
3461 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3462 if (cnt == cmd->rule_cnt)
3463 return -EMSGSIZE;
3464 rule_locs[cnt] = item->fs.location;
3465 cnt++;
3466 }
3467 cmd->data = bp->max_tuples;
3468 cmd->rule_cnt = cnt;
3469
3470 return 0;
3471}
3472
3473static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3474 u32 *rule_locs)
3475{
3476 struct macb *bp = netdev_priv(netdev);
3477 int ret = 0;
3478
3479 switch (cmd->cmd) {
3480 case ETHTOOL_GRXRINGS:
3481 cmd->data = bp->num_queues;
3482 break;
3483 case ETHTOOL_GRXCLSRLCNT:
3484 cmd->rule_cnt = bp->rx_fs_list.count;
3485 break;
3486 case ETHTOOL_GRXCLSRULE:
3487 ret = gem_get_flow_entry(netdev, cmd);
3488 break;
3489 case ETHTOOL_GRXCLSRLALL:
3490 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3491 break;
3492 default:
3493 netdev_err(netdev,
3494 "Command parameter %d is not supported\n", cmd->cmd);
3495 ret = -EOPNOTSUPP;
3496 }
3497
3498 return ret;
3499}
3500
3501static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3502{
3503 struct macb *bp = netdev_priv(netdev);
3504 int ret;
3505
3506 switch (cmd->cmd) {
3507 case ETHTOOL_SRXCLSRLINS:
3508 if ((cmd->fs.location >= bp->max_tuples)
3509 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3510 ret = -EINVAL;
3511 break;
3512 }
3513 ret = gem_add_flow_filter(netdev, cmd);
3514 break;
3515 case ETHTOOL_SRXCLSRLDEL:
3516 ret = gem_del_flow_filter(netdev, cmd);
3517 break;
3518 default:
3519 netdev_err(netdev,
3520 "Command parameter %d is not supported\n", cmd->cmd);
3521 ret = -EOPNOTSUPP;
3522 }
3523
3524 return ret;
3525}
3526
3527static const struct ethtool_ops macb_ethtool_ops = {
3528 .get_regs_len = macb_get_regs_len,
3529 .get_regs = macb_get_regs,
3530 .get_link = ethtool_op_get_link,
3531 .get_ts_info = ethtool_op_get_ts_info,
3532 .get_wol = macb_get_wol,
3533 .set_wol = macb_set_wol,
3534 .get_link_ksettings = macb_get_link_ksettings,
3535 .set_link_ksettings = macb_set_link_ksettings,
3536 .get_ringparam = macb_get_ringparam,
3537 .set_ringparam = macb_set_ringparam,
3538};
3539
3540static const struct ethtool_ops gem_ethtool_ops = {
3541 .get_regs_len = macb_get_regs_len,
3542 .get_regs = macb_get_regs,
3543 .get_wol = macb_get_wol,
3544 .set_wol = macb_set_wol,
3545 .get_link = ethtool_op_get_link,
3546 .get_ts_info = macb_get_ts_info,
3547 .get_ethtool_stats = gem_get_ethtool_stats,
3548 .get_strings = gem_get_ethtool_strings,
3549 .get_sset_count = gem_get_sset_count,
3550 .get_link_ksettings = macb_get_link_ksettings,
3551 .set_link_ksettings = macb_set_link_ksettings,
3552 .get_ringparam = macb_get_ringparam,
3553 .set_ringparam = macb_set_ringparam,
3554 .get_rxnfc = gem_get_rxnfc,
3555 .set_rxnfc = gem_set_rxnfc,
3556};
3557
3558static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3559{
3560 struct macb *bp = netdev_priv(dev);
3561
3562 if (!netif_running(dev))
3563 return -EINVAL;
3564
3565 if (bp->ptp_info) {
3566 switch (cmd) {
3567 case SIOCSHWTSTAMP:
3568 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3569 case SIOCGHWTSTAMP:
3570 return bp->ptp_info->get_hwtst(dev, rq);
3571 }
3572 }
3573
3574 return phylink_mii_ioctl(bp->phylink, rq, cmd);
3575}
3576
3577static inline void macb_set_txcsum_feature(struct macb *bp,
3578 netdev_features_t features)
3579{
3580 u32 val;
3581
3582 if (!macb_is_gem(bp))
3583 return;
3584
3585 val = gem_readl(bp, DMACFG);
3586 if (features & NETIF_F_HW_CSUM)
3587 val |= GEM_BIT(TXCOEN);
3588 else
3589 val &= ~GEM_BIT(TXCOEN);
3590
3591 gem_writel(bp, DMACFG, val);
3592}
3593
3594static inline void macb_set_rxcsum_feature(struct macb *bp,
3595 netdev_features_t features)
3596{
3597 struct net_device *netdev = bp->dev;
3598 u32 val;
3599
3600 if (!macb_is_gem(bp))
3601 return;
3602
3603 val = gem_readl(bp, NCFGR);
3604 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3605 val |= GEM_BIT(RXCOEN);
3606 else
3607 val &= ~GEM_BIT(RXCOEN);
3608
3609 gem_writel(bp, NCFGR, val);
3610}
3611
3612static inline void macb_set_rxflow_feature(struct macb *bp,
3613 netdev_features_t features)
3614{
3615 if (!macb_is_gem(bp))
3616 return;
3617
3618 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3619}
3620
3621static int macb_set_features(struct net_device *netdev,
3622 netdev_features_t features)
3623{
3624 struct macb *bp = netdev_priv(netdev);
3625 netdev_features_t changed = features ^ netdev->features;
3626
3627
3628 if (changed & NETIF_F_HW_CSUM)
3629 macb_set_txcsum_feature(bp, features);
3630
3631
3632 if (changed & NETIF_F_RXCSUM)
3633 macb_set_rxcsum_feature(bp, features);
3634
3635
3636 if (changed & NETIF_F_NTUPLE)
3637 macb_set_rxflow_feature(bp, features);
3638
3639 return 0;
3640}
3641
3642static void macb_restore_features(struct macb *bp)
3643{
3644 struct net_device *netdev = bp->dev;
3645 netdev_features_t features = netdev->features;
3646 struct ethtool_rx_fs_item *item;
3647
3648
3649 macb_set_txcsum_feature(bp, features);
3650
3651
3652 macb_set_rxcsum_feature(bp, features);
3653
3654
3655 list_for_each_entry(item, &bp->rx_fs_list.list, list)
3656 gem_prog_cmp_regs(bp, &item->fs);
3657
3658 macb_set_rxflow_feature(bp, features);
3659}
3660
3661static const struct net_device_ops macb_netdev_ops = {
3662 .ndo_open = macb_open,
3663 .ndo_stop = macb_close,
3664 .ndo_start_xmit = macb_start_xmit,
3665 .ndo_set_rx_mode = macb_set_rx_mode,
3666 .ndo_get_stats = macb_get_stats,
3667 .ndo_eth_ioctl = macb_ioctl,
3668 .ndo_validate_addr = eth_validate_addr,
3669 .ndo_change_mtu = macb_change_mtu,
3670 .ndo_set_mac_address = eth_mac_addr,
3671#ifdef CONFIG_NET_POLL_CONTROLLER
3672 .ndo_poll_controller = macb_poll_controller,
3673#endif
3674 .ndo_set_features = macb_set_features,
3675 .ndo_features_check = macb_features_check,
3676};
3677
3678
3679
3680
3681static void macb_configure_caps(struct macb *bp,
3682 const struct macb_config *dt_conf)
3683{
3684 u32 dcfg;
3685
3686 if (dt_conf)
3687 bp->caps = dt_conf->caps;
3688
3689 if (hw_is_gem(bp->regs, bp->native_io)) {
3690 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3691
3692 dcfg = gem_readl(bp, DCFG1);
3693 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3694 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3695 if (GEM_BFEXT(NO_PCS, dcfg) == 0)
3696 bp->caps |= MACB_CAPS_PCS;
3697 dcfg = gem_readl(bp, DCFG12);
3698 if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
3699 bp->caps |= MACB_CAPS_HIGH_SPEED;
3700 dcfg = gem_readl(bp, DCFG2);
3701 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3702 bp->caps |= MACB_CAPS_FIFO_MODE;
3703#ifdef CONFIG_MACB_USE_HWSTAMP
3704 if (gem_has_ptp(bp)) {
3705 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3706 dev_err(&bp->pdev->dev,
3707 "GEM doesn't support hardware ptp.\n");
3708 else {
3709 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3710 bp->ptp_info = &gem_ptp_info;
3711 }
3712 }
3713#endif
3714 }
3715
3716 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3717}
3718
3719static void macb_probe_queues(void __iomem *mem,
3720 bool native_io,
3721 unsigned int *queue_mask,
3722 unsigned int *num_queues)
3723{
3724 *queue_mask = 0x1;
3725 *num_queues = 1;
3726
3727
3728
3729
3730
3731
3732
3733 if (!hw_is_gem(mem, native_io))
3734 return;
3735
3736
3737 *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
3738 *num_queues = hweight32(*queue_mask);
3739}
3740
3741static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
3742 struct clk *rx_clk, struct clk *tsu_clk)
3743{
3744 struct clk_bulk_data clks[] = {
3745 { .clk = tsu_clk, },
3746 { .clk = rx_clk, },
3747 { .clk = pclk, },
3748 { .clk = hclk, },
3749 { .clk = tx_clk },
3750 };
3751
3752 clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
3753}
3754
3755static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3756 struct clk **hclk, struct clk **tx_clk,
3757 struct clk **rx_clk, struct clk **tsu_clk)
3758{
3759 struct macb_platform_data *pdata;
3760 int err;
3761
3762 pdata = dev_get_platdata(&pdev->dev);
3763 if (pdata) {
3764 *pclk = pdata->pclk;
3765 *hclk = pdata->hclk;
3766 } else {
3767 *pclk = devm_clk_get(&pdev->dev, "pclk");
3768 *hclk = devm_clk_get(&pdev->dev, "hclk");
3769 }
3770
3771 if (IS_ERR_OR_NULL(*pclk))
3772 return dev_err_probe(&pdev->dev,
3773 IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
3774 "failed to get pclk\n");
3775
3776 if (IS_ERR_OR_NULL(*hclk))
3777 return dev_err_probe(&pdev->dev,
3778 IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
3779 "failed to get hclk\n");
3780
3781 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
3782 if (IS_ERR(*tx_clk))
3783 return PTR_ERR(*tx_clk);
3784
3785 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
3786 if (IS_ERR(*rx_clk))
3787 return PTR_ERR(*rx_clk);
3788
3789 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
3790 if (IS_ERR(*tsu_clk))
3791 return PTR_ERR(*tsu_clk);
3792
3793 err = clk_prepare_enable(*pclk);
3794 if (err) {
3795 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
3796 return err;
3797 }
3798
3799 err = clk_prepare_enable(*hclk);
3800 if (err) {
3801 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
3802 goto err_disable_pclk;
3803 }
3804
3805 err = clk_prepare_enable(*tx_clk);
3806 if (err) {
3807 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
3808 goto err_disable_hclk;
3809 }
3810
3811 err = clk_prepare_enable(*rx_clk);
3812 if (err) {
3813 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
3814 goto err_disable_txclk;
3815 }
3816
3817 err = clk_prepare_enable(*tsu_clk);
3818 if (err) {
3819 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
3820 goto err_disable_rxclk;
3821 }
3822
3823 return 0;
3824
3825err_disable_rxclk:
3826 clk_disable_unprepare(*rx_clk);
3827
3828err_disable_txclk:
3829 clk_disable_unprepare(*tx_clk);
3830
3831err_disable_hclk:
3832 clk_disable_unprepare(*hclk);
3833
3834err_disable_pclk:
3835 clk_disable_unprepare(*pclk);
3836
3837 return err;
3838}
3839
3840static int macb_init(struct platform_device *pdev)
3841{
3842 struct net_device *dev = platform_get_drvdata(pdev);
3843 unsigned int hw_q, q;
3844 struct macb *bp = netdev_priv(dev);
3845 struct macb_queue *queue;
3846 int err;
3847 u32 val, reg;
3848
3849 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3850 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3851
3852
3853
3854
3855
3856 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3857 if (!(bp->queue_mask & (1 << hw_q)))
3858 continue;
3859
3860 queue = &bp->queues[q];
3861 queue->bp = bp;
3862 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
3863 if (hw_q) {
3864 queue->ISR = GEM_ISR(hw_q - 1);
3865 queue->IER = GEM_IER(hw_q - 1);
3866 queue->IDR = GEM_IDR(hw_q - 1);
3867 queue->IMR = GEM_IMR(hw_q - 1);
3868 queue->TBQP = GEM_TBQP(hw_q - 1);
3869 queue->RBQP = GEM_RBQP(hw_q - 1);
3870 queue->RBQS = GEM_RBQS(hw_q - 1);
3871#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3872 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3873 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3874 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3875 }
3876#endif
3877 } else {
3878
3879 queue->ISR = MACB_ISR;
3880 queue->IER = MACB_IER;
3881 queue->IDR = MACB_IDR;
3882 queue->IMR = MACB_IMR;
3883 queue->TBQP = MACB_TBQP;
3884 queue->RBQP = MACB_RBQP;
3885#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3886 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3887 queue->TBQPH = MACB_TBQPH;
3888 queue->RBQPH = MACB_RBQPH;
3889 }
3890#endif
3891 }
3892
3893
3894
3895
3896
3897
3898 queue->irq = platform_get_irq(pdev, q);
3899 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3900 IRQF_SHARED, dev->name, queue);
3901 if (err) {
3902 dev_err(&pdev->dev,
3903 "Unable to request IRQ %d (error %d)\n",
3904 queue->irq, err);
3905 return err;
3906 }
3907
3908 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3909 q++;
3910 }
3911
3912 dev->netdev_ops = &macb_netdev_ops;
3913
3914
3915 if (macb_is_gem(bp)) {
3916 bp->max_tx_length = GEM_MAX_TX_LEN;
3917 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3918 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3919 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3920 bp->macbgem_ops.mog_rx = gem_rx;
3921 dev->ethtool_ops = &gem_ethtool_ops;
3922 } else {
3923 bp->max_tx_length = MACB_MAX_TX_LEN;
3924 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3925 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3926 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3927 bp->macbgem_ops.mog_rx = macb_rx;
3928 dev->ethtool_ops = &macb_ethtool_ops;
3929 }
3930
3931
3932 dev->hw_features = NETIF_F_SG;
3933
3934
3935 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3936 dev->hw_features |= MACB_NETIF_LSO;
3937
3938
3939 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3940 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3941 if (bp->caps & MACB_CAPS_SG_DISABLED)
3942 dev->hw_features &= ~NETIF_F_SG;
3943 dev->features = dev->hw_features;
3944
3945
3946
3947
3948
3949 reg = gem_readl(bp, DCFG8);
3950 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3951 GEM_BFEXT(T2SCR, reg));
3952 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3953 if (bp->max_tuples > 0) {
3954
3955 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3956
3957 reg = 0;
3958 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3959 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3960
3961 dev->hw_features |= NETIF_F_NTUPLE;
3962
3963 bp->rx_fs_list.count = 0;
3964 spin_lock_init(&bp->rx_fs_lock);
3965 } else
3966 bp->max_tuples = 0;
3967 }
3968
3969 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3970 val = 0;
3971 if (phy_interface_mode_is_rgmii(bp->phy_interface))
3972 val = bp->usrio->rgmii;
3973 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3974 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3975 val = bp->usrio->rmii;
3976 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3977 val = bp->usrio->mii;
3978
3979 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3980 val |= bp->usrio->refclk;
3981
3982 macb_or_gem_writel(bp, USRIO, val);
3983 }
3984
3985
3986 val = macb_mdc_clk_div(bp);
3987 val |= macb_dbw(bp);
3988 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3989 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3990 macb_writel(bp, NCFGR, val);
3991
3992 return 0;
3993}
3994
3995static const struct macb_usrio_config macb_default_usrio = {
3996 .mii = MACB_BIT(MII),
3997 .rmii = MACB_BIT(RMII),
3998 .rgmii = GEM_BIT(RGMII),
3999 .refclk = MACB_BIT(CLKEN),
4000};
4001
4002#if defined(CONFIG_OF)
4003
4004#define AT91ETHER_MAX_RBUFF_SZ 0x600
4005
4006#define AT91ETHER_MAX_RX_DESCR 9
4007
4008static struct sifive_fu540_macb_mgmt *mgmt;
4009
4010static int at91ether_alloc_coherent(struct macb *lp)
4011{
4012 struct macb_queue *q = &lp->queues[0];
4013
4014 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
4015 (AT91ETHER_MAX_RX_DESCR *
4016 macb_dma_desc_get_size(lp)),
4017 &q->rx_ring_dma, GFP_KERNEL);
4018 if (!q->rx_ring)
4019 return -ENOMEM;
4020
4021 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
4022 AT91ETHER_MAX_RX_DESCR *
4023 AT91ETHER_MAX_RBUFF_SZ,
4024 &q->rx_buffers_dma, GFP_KERNEL);
4025 if (!q->rx_buffers) {
4026 dma_free_coherent(&lp->pdev->dev,
4027 AT91ETHER_MAX_RX_DESCR *
4028 macb_dma_desc_get_size(lp),
4029 q->rx_ring, q->rx_ring_dma);
4030 q->rx_ring = NULL;
4031 return -ENOMEM;
4032 }
4033
4034 return 0;
4035}
4036
4037static void at91ether_free_coherent(struct macb *lp)
4038{
4039 struct macb_queue *q = &lp->queues[0];
4040
4041 if (q->rx_ring) {
4042 dma_free_coherent(&lp->pdev->dev,
4043 AT91ETHER_MAX_RX_DESCR *
4044 macb_dma_desc_get_size(lp),
4045 q->rx_ring, q->rx_ring_dma);
4046 q->rx_ring = NULL;
4047 }
4048
4049 if (q->rx_buffers) {
4050 dma_free_coherent(&lp->pdev->dev,
4051 AT91ETHER_MAX_RX_DESCR *
4052 AT91ETHER_MAX_RBUFF_SZ,
4053 q->rx_buffers, q->rx_buffers_dma);
4054 q->rx_buffers = NULL;
4055 }
4056}
4057
4058
4059static int at91ether_start(struct macb *lp)
4060{
4061 struct macb_queue *q = &lp->queues[0];
4062 struct macb_dma_desc *desc;
4063 dma_addr_t addr;
4064 u32 ctl;
4065 int i, ret;
4066
4067 ret = at91ether_alloc_coherent(lp);
4068 if (ret)
4069 return ret;
4070
4071 addr = q->rx_buffers_dma;
4072 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
4073 desc = macb_rx_desc(q, i);
4074 macb_set_addr(lp, desc, addr);
4075 desc->ctrl = 0;
4076 addr += AT91ETHER_MAX_RBUFF_SZ;
4077 }
4078
4079
4080 desc->addr |= MACB_BIT(RX_WRAP);
4081
4082
4083 q->rx_tail = 0;
4084
4085
4086 macb_writel(lp, RBQP, q->rx_ring_dma);
4087
4088
4089 ctl = macb_readl(lp, NCR);
4090 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
4091
4092
4093 macb_writel(lp, IER, MACB_BIT(RCOMP) |
4094 MACB_BIT(RXUBR) |
4095 MACB_BIT(ISR_TUND) |
4096 MACB_BIT(ISR_RLE) |
4097 MACB_BIT(TCOMP) |
4098 MACB_BIT(ISR_ROVR) |
4099 MACB_BIT(HRESP));
4100
4101 return 0;
4102}
4103
4104static void at91ether_stop(struct macb *lp)
4105{
4106 u32 ctl;
4107
4108
4109 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
4110 MACB_BIT(RXUBR) |
4111 MACB_BIT(ISR_TUND) |
4112 MACB_BIT(ISR_RLE) |
4113 MACB_BIT(TCOMP) |
4114 MACB_BIT(ISR_ROVR) |
4115 MACB_BIT(HRESP));
4116
4117
4118 ctl = macb_readl(lp, NCR);
4119 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
4120
4121
4122 at91ether_free_coherent(lp);
4123}
4124
4125
4126static int at91ether_open(struct net_device *dev)
4127{
4128 struct macb *lp = netdev_priv(dev);
4129 u32 ctl;
4130 int ret;
4131
4132 ret = pm_runtime_get_sync(&lp->pdev->dev);
4133 if (ret < 0) {
4134 pm_runtime_put_noidle(&lp->pdev->dev);
4135 return ret;
4136 }
4137
4138
4139 ctl = macb_readl(lp, NCR);
4140 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
4141
4142 macb_set_hwaddr(lp);
4143
4144 ret = at91ether_start(lp);
4145 if (ret)
4146 goto pm_exit;
4147
4148 ret = macb_phylink_connect(lp);
4149 if (ret)
4150 goto stop;
4151
4152 netif_start_queue(dev);
4153
4154 return 0;
4155
4156stop:
4157 at91ether_stop(lp);
4158pm_exit:
4159 pm_runtime_put_sync(&lp->pdev->dev);
4160 return ret;
4161}
4162
4163
4164static int at91ether_close(struct net_device *dev)
4165{
4166 struct macb *lp = netdev_priv(dev);
4167
4168 netif_stop_queue(dev);
4169
4170 phylink_stop(lp->phylink);
4171 phylink_disconnect_phy(lp->phylink);
4172
4173 at91ether_stop(lp);
4174
4175 return pm_runtime_put(&lp->pdev->dev);
4176}
4177
4178
4179static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
4180 struct net_device *dev)
4181{
4182 struct macb *lp = netdev_priv(dev);
4183
4184 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
4185 int desc = 0;
4186
4187 netif_stop_queue(dev);
4188
4189
4190 lp->rm9200_txq[desc].skb = skb;
4191 lp->rm9200_txq[desc].size = skb->len;
4192 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
4193 skb->len, DMA_TO_DEVICE);
4194 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
4195 dev_kfree_skb_any(skb);
4196 dev->stats.tx_dropped++;
4197 netdev_err(dev, "%s: DMA mapping error\n", __func__);
4198 return NETDEV_TX_OK;
4199 }
4200
4201
4202 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
4203
4204 macb_writel(lp, TCR, skb->len);
4205
4206 } else {
4207 netdev_err(dev, "%s called, but device is busy!\n", __func__);
4208 return NETDEV_TX_BUSY;
4209 }
4210
4211 return NETDEV_TX_OK;
4212}
4213
4214
4215
4216
4217static void at91ether_rx(struct net_device *dev)
4218{
4219 struct macb *lp = netdev_priv(dev);
4220 struct macb_queue *q = &lp->queues[0];
4221 struct macb_dma_desc *desc;
4222 unsigned char *p_recv;
4223 struct sk_buff *skb;
4224 unsigned int pktlen;
4225
4226 desc = macb_rx_desc(q, q->rx_tail);
4227 while (desc->addr & MACB_BIT(RX_USED)) {
4228 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
4229 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
4230 skb = netdev_alloc_skb(dev, pktlen + 2);
4231 if (skb) {
4232 skb_reserve(skb, 2);
4233 skb_put_data(skb, p_recv, pktlen);
4234
4235 skb->protocol = eth_type_trans(skb, dev);
4236 dev->stats.rx_packets++;
4237 dev->stats.rx_bytes += pktlen;
4238 netif_rx(skb);
4239 } else {
4240 dev->stats.rx_dropped++;
4241 }
4242
4243 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
4244 dev->stats.multicast++;
4245
4246
4247 desc->addr &= ~MACB_BIT(RX_USED);
4248
4249
4250 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
4251 q->rx_tail = 0;
4252 else
4253 q->rx_tail++;
4254
4255 desc = macb_rx_desc(q, q->rx_tail);
4256 }
4257}
4258
4259
4260static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
4261{
4262 struct net_device *dev = dev_id;
4263 struct macb *lp = netdev_priv(dev);
4264 u32 intstatus, ctl;
4265 unsigned int desc;
4266
4267
4268
4269
4270 intstatus = macb_readl(lp, ISR);
4271
4272
4273 if (intstatus & MACB_BIT(RCOMP))
4274 at91ether_rx(dev);
4275
4276
4277 if (intstatus & MACB_BIT(TCOMP)) {
4278
4279 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
4280 dev->stats.tx_errors++;
4281
4282 desc = 0;
4283 if (lp->rm9200_txq[desc].skb) {
4284 dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
4285 lp->rm9200_txq[desc].skb = NULL;
4286 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
4287 lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
4288 dev->stats.tx_packets++;
4289 dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
4290 }
4291 netif_wake_queue(dev);
4292 }
4293
4294
4295 if (intstatus & MACB_BIT(RXUBR)) {
4296 ctl = macb_readl(lp, NCR);
4297 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
4298 wmb();
4299 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
4300 }
4301
4302 if (intstatus & MACB_BIT(ISR_ROVR))
4303 netdev_err(dev, "ROVR error\n");
4304
4305 return IRQ_HANDLED;
4306}
4307
4308#ifdef CONFIG_NET_POLL_CONTROLLER
4309static void at91ether_poll_controller(struct net_device *dev)
4310{
4311 unsigned long flags;
4312
4313 local_irq_save(flags);
4314 at91ether_interrupt(dev->irq, dev);
4315 local_irq_restore(flags);
4316}
4317#endif
4318
4319static const struct net_device_ops at91ether_netdev_ops = {
4320 .ndo_open = at91ether_open,
4321 .ndo_stop = at91ether_close,
4322 .ndo_start_xmit = at91ether_start_xmit,
4323 .ndo_get_stats = macb_get_stats,
4324 .ndo_set_rx_mode = macb_set_rx_mode,
4325 .ndo_set_mac_address = eth_mac_addr,
4326 .ndo_eth_ioctl = macb_ioctl,
4327 .ndo_validate_addr = eth_validate_addr,
4328#ifdef CONFIG_NET_POLL_CONTROLLER
4329 .ndo_poll_controller = at91ether_poll_controller,
4330#endif
4331};
4332
4333static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
4334 struct clk **hclk, struct clk **tx_clk,
4335 struct clk **rx_clk, struct clk **tsu_clk)
4336{
4337 int err;
4338
4339 *hclk = NULL;
4340 *tx_clk = NULL;
4341 *rx_clk = NULL;
4342 *tsu_clk = NULL;
4343
4344 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
4345 if (IS_ERR(*pclk))
4346 return PTR_ERR(*pclk);
4347
4348 err = clk_prepare_enable(*pclk);
4349 if (err) {
4350 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4351 return err;
4352 }
4353
4354 return 0;
4355}
4356
4357static int at91ether_init(struct platform_device *pdev)
4358{
4359 struct net_device *dev = platform_get_drvdata(pdev);
4360 struct macb *bp = netdev_priv(dev);
4361 int err;
4362
4363 bp->queues[0].bp = bp;
4364
4365 dev->netdev_ops = &at91ether_netdev_ops;
4366 dev->ethtool_ops = &macb_ethtool_ops;
4367
4368 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
4369 0, dev->name, dev);
4370 if (err)
4371 return err;
4372
4373 macb_writel(bp, NCR, 0);
4374
4375 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
4376
4377 return 0;
4378}
4379
4380static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4381 unsigned long parent_rate)
4382{
4383 return mgmt->rate;
4384}
4385
4386static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4387 unsigned long *parent_rate)
4388{
4389 if (WARN_ON(rate < 2500000))
4390 return 2500000;
4391 else if (rate == 2500000)
4392 return 2500000;
4393 else if (WARN_ON(rate < 13750000))
4394 return 2500000;
4395 else if (WARN_ON(rate < 25000000))
4396 return 25000000;
4397 else if (rate == 25000000)
4398 return 25000000;
4399 else if (WARN_ON(rate < 75000000))
4400 return 25000000;
4401 else if (WARN_ON(rate < 125000000))
4402 return 125000000;
4403 else if (rate == 125000000)
4404 return 125000000;
4405
4406 WARN_ON(rate > 125000000);
4407
4408 return 125000000;
4409}
4410
4411static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4412 unsigned long parent_rate)
4413{
4414 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4415 if (rate != 125000000)
4416 iowrite32(1, mgmt->reg);
4417 else
4418 iowrite32(0, mgmt->reg);
4419 mgmt->rate = rate;
4420
4421 return 0;
4422}
4423
4424static const struct clk_ops fu540_c000_ops = {
4425 .recalc_rate = fu540_macb_tx_recalc_rate,
4426 .round_rate = fu540_macb_tx_round_rate,
4427 .set_rate = fu540_macb_tx_set_rate,
4428};
4429
4430static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4431 struct clk **hclk, struct clk **tx_clk,
4432 struct clk **rx_clk, struct clk **tsu_clk)
4433{
4434 struct clk_init_data init;
4435 int err = 0;
4436
4437 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4438 if (err)
4439 return err;
4440
4441 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4442 if (!mgmt) {
4443 err = -ENOMEM;
4444 goto err_disable_clks;
4445 }
4446
4447 init.name = "sifive-gemgxl-mgmt";
4448 init.ops = &fu540_c000_ops;
4449 init.flags = 0;
4450 init.num_parents = 0;
4451
4452 mgmt->rate = 0;
4453 mgmt->hw.init = &init;
4454
4455 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
4456 if (IS_ERR(*tx_clk)) {
4457 err = PTR_ERR(*tx_clk);
4458 goto err_disable_clks;
4459 }
4460
4461 err = clk_prepare_enable(*tx_clk);
4462 if (err) {
4463 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4464 *tx_clk = NULL;
4465 goto err_disable_clks;
4466 } else {
4467 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4468 }
4469
4470 return 0;
4471
4472err_disable_clks:
4473 macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
4474
4475 return err;
4476}
4477
4478static int fu540_c000_init(struct platform_device *pdev)
4479{
4480 mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4481 if (IS_ERR(mgmt->reg))
4482 return PTR_ERR(mgmt->reg);
4483
4484 return macb_init(pdev);
4485}
4486
4487static const struct macb_usrio_config sama7g5_usrio = {
4488 .mii = 0,
4489 .rmii = 1,
4490 .rgmii = 2,
4491 .refclk = BIT(2),
4492 .hdfctlen = BIT(6),
4493};
4494
4495static const struct macb_config fu540_c000_config = {
4496 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4497 MACB_CAPS_GEM_HAS_PTP,
4498 .dma_burst_length = 16,
4499 .clk_init = fu540_c000_clk_init,
4500 .init = fu540_c000_init,
4501 .jumbo_max_len = 10240,
4502 .usrio = &macb_default_usrio,
4503};
4504
4505static const struct macb_config at91sam9260_config = {
4506 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4507 .clk_init = macb_clk_init,
4508 .init = macb_init,
4509 .usrio = &macb_default_usrio,
4510};
4511
4512static const struct macb_config sama5d3macb_config = {
4513 .caps = MACB_CAPS_SG_DISABLED
4514 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4515 .clk_init = macb_clk_init,
4516 .init = macb_init,
4517 .usrio = &macb_default_usrio,
4518};
4519
4520static const struct macb_config pc302gem_config = {
4521 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
4522 .dma_burst_length = 16,
4523 .clk_init = macb_clk_init,
4524 .init = macb_init,
4525 .usrio = &macb_default_usrio,
4526};
4527
4528static const struct macb_config sama5d2_config = {
4529 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4530 .dma_burst_length = 16,
4531 .clk_init = macb_clk_init,
4532 .init = macb_init,
4533 .usrio = &macb_default_usrio,
4534};
4535
4536static const struct macb_config sama5d29_config = {
4537 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP,
4538 .dma_burst_length = 16,
4539 .clk_init = macb_clk_init,
4540 .init = macb_init,
4541 .usrio = &macb_default_usrio,
4542};
4543
4544static const struct macb_config sama5d3_config = {
4545 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
4546 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
4547 .dma_burst_length = 16,
4548 .clk_init = macb_clk_init,
4549 .init = macb_init,
4550 .jumbo_max_len = 10240,
4551 .usrio = &macb_default_usrio,
4552};
4553
4554static const struct macb_config sama5d4_config = {
4555 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4556 .dma_burst_length = 4,
4557 .clk_init = macb_clk_init,
4558 .init = macb_init,
4559 .usrio = &macb_default_usrio,
4560};
4561
4562static const struct macb_config emac_config = {
4563 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
4564 .clk_init = at91ether_clk_init,
4565 .init = at91ether_init,
4566 .usrio = &macb_default_usrio,
4567};
4568
4569static const struct macb_config np4_config = {
4570 .caps = MACB_CAPS_USRIO_DISABLED,
4571 .clk_init = macb_clk_init,
4572 .init = macb_init,
4573 .usrio = &macb_default_usrio,
4574};
4575
4576static const struct macb_config zynqmp_config = {
4577 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4578 MACB_CAPS_JUMBO |
4579 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
4580 .dma_burst_length = 16,
4581 .clk_init = macb_clk_init,
4582 .init = macb_init,
4583 .jumbo_max_len = 10240,
4584 .usrio = &macb_default_usrio,
4585};
4586
4587static const struct macb_config zynq_config = {
4588 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
4589 MACB_CAPS_NEEDS_RSTONUBR,
4590 .dma_burst_length = 16,
4591 .clk_init = macb_clk_init,
4592 .init = macb_init,
4593 .usrio = &macb_default_usrio,
4594};
4595
4596static const struct macb_config sama7g5_gem_config = {
4597 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG,
4598 .dma_burst_length = 16,
4599 .clk_init = macb_clk_init,
4600 .init = macb_init,
4601 .usrio = &sama7g5_usrio,
4602};
4603
4604static const struct macb_config sama7g5_emac_config = {
4605 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN,
4606 .dma_burst_length = 16,
4607 .clk_init = macb_clk_init,
4608 .init = macb_init,
4609 .usrio = &sama7g5_usrio,
4610};
4611
4612static const struct of_device_id macb_dt_ids[] = {
4613 { .compatible = "cdns,at32ap7000-macb" },
4614 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4615 { .compatible = "cdns,macb" },
4616 { .compatible = "cdns,np4-macb", .data = &np4_config },
4617 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4618 { .compatible = "cdns,gem", .data = &pc302gem_config },
4619 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
4620 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
4621 { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
4622 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
4623 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
4624 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4625 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4626 { .compatible = "cdns,emac", .data = &emac_config },
4627 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
4628 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
4629 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
4630 { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
4631 { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
4632 { }
4633};
4634MODULE_DEVICE_TABLE(of, macb_dt_ids);
4635#endif
4636
4637static const struct macb_config default_gem_config = {
4638 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4639 MACB_CAPS_JUMBO |
4640 MACB_CAPS_GEM_HAS_PTP,
4641 .dma_burst_length = 16,
4642 .clk_init = macb_clk_init,
4643 .init = macb_init,
4644 .usrio = &macb_default_usrio,
4645 .jumbo_max_len = 10240,
4646};
4647
4648static int macb_probe(struct platform_device *pdev)
4649{
4650 const struct macb_config *macb_config = &default_gem_config;
4651 int (*clk_init)(struct platform_device *, struct clk **,
4652 struct clk **, struct clk **, struct clk **,
4653 struct clk **) = macb_config->clk_init;
4654 int (*init)(struct platform_device *) = macb_config->init;
4655 struct device_node *np = pdev->dev.of_node;
4656 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
4657 struct clk *tsu_clk = NULL;
4658 unsigned int queue_mask, num_queues;
4659 bool native_io;
4660 phy_interface_t interface;
4661 struct net_device *dev;
4662 struct resource *regs;
4663 void __iomem *mem;
4664 struct macb *bp;
4665 int err, val;
4666
4667 mem = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
4668 if (IS_ERR(mem))
4669 return PTR_ERR(mem);
4670
4671 if (np) {
4672 const struct of_device_id *match;
4673
4674 match = of_match_node(macb_dt_ids, np);
4675 if (match && match->data) {
4676 macb_config = match->data;
4677 clk_init = macb_config->clk_init;
4678 init = macb_config->init;
4679 }
4680 }
4681
4682 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
4683 if (err)
4684 return err;
4685
4686 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4687 pm_runtime_use_autosuspend(&pdev->dev);
4688 pm_runtime_get_noresume(&pdev->dev);
4689 pm_runtime_set_active(&pdev->dev);
4690 pm_runtime_enable(&pdev->dev);
4691 native_io = hw_is_native_io(mem);
4692
4693 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
4694 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
4695 if (!dev) {
4696 err = -ENOMEM;
4697 goto err_disable_clocks;
4698 }
4699
4700 dev->base_addr = regs->start;
4701
4702 SET_NETDEV_DEV(dev, &pdev->dev);
4703
4704 bp = netdev_priv(dev);
4705 bp->pdev = pdev;
4706 bp->dev = dev;
4707 bp->regs = mem;
4708 bp->native_io = native_io;
4709 if (native_io) {
4710 bp->macb_reg_readl = hw_readl_native;
4711 bp->macb_reg_writel = hw_writel_native;
4712 } else {
4713 bp->macb_reg_readl = hw_readl;
4714 bp->macb_reg_writel = hw_writel;
4715 }
4716 bp->num_queues = num_queues;
4717 bp->queue_mask = queue_mask;
4718 if (macb_config)
4719 bp->dma_burst_length = macb_config->dma_burst_length;
4720 bp->pclk = pclk;
4721 bp->hclk = hclk;
4722 bp->tx_clk = tx_clk;
4723 bp->rx_clk = rx_clk;
4724 bp->tsu_clk = tsu_clk;
4725 if (macb_config)
4726 bp->jumbo_max_len = macb_config->jumbo_max_len;
4727
4728 bp->wol = 0;
4729 if (of_get_property(np, "magic-packet", NULL))
4730 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
4731 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
4732
4733 bp->usrio = macb_config->usrio;
4734
4735 spin_lock_init(&bp->lock);
4736
4737
4738 macb_configure_caps(bp, macb_config);
4739
4740#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4741 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
4742 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
4743 bp->hw_dma_cap |= HW_DMA_CAP_64B;
4744 }
4745#endif
4746 platform_set_drvdata(pdev, dev);
4747
4748 dev->irq = platform_get_irq(pdev, 0);
4749 if (dev->irq < 0) {
4750 err = dev->irq;
4751 goto err_out_free_netdev;
4752 }
4753
4754
4755 dev->min_mtu = GEM_MTU_MIN_SIZE;
4756 if (bp->caps & MACB_CAPS_JUMBO)
4757 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
4758 else
4759 dev->max_mtu = ETH_DATA_LEN;
4760
4761 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
4762 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
4763 if (val)
4764 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
4765 macb_dma_desc_get_size(bp);
4766
4767 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
4768 if (val)
4769 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
4770 macb_dma_desc_get_size(bp);
4771 }
4772
4773 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4774 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4775 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4776
4777 err = of_get_mac_address(np, bp->dev->dev_addr);
4778 if (err == -EPROBE_DEFER)
4779 goto err_out_free_netdev;
4780 else if (err)
4781 macb_get_hwaddr(bp);
4782
4783 err = of_get_phy_mode(np, &interface);
4784 if (err)
4785
4786 bp->phy_interface = PHY_INTERFACE_MODE_MII;
4787 else
4788 bp->phy_interface = interface;
4789
4790
4791 err = init(pdev);
4792 if (err)
4793 goto err_out_free_netdev;
4794
4795 err = macb_mii_init(bp);
4796 if (err)
4797 goto err_out_free_netdev;
4798
4799 netif_carrier_off(dev);
4800
4801 err = register_netdev(dev);
4802 if (err) {
4803 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
4804 goto err_out_unregister_mdio;
4805 }
4806
4807 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
4808
4809 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4810 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4811 dev->base_addr, dev->irq, dev->dev_addr);
4812
4813 pm_runtime_mark_last_busy(&bp->pdev->dev);
4814 pm_runtime_put_autosuspend(&bp->pdev->dev);
4815
4816 return 0;
4817
4818err_out_unregister_mdio:
4819 mdiobus_unregister(bp->mii_bus);
4820 mdiobus_free(bp->mii_bus);
4821
4822err_out_free_netdev:
4823 free_netdev(dev);
4824
4825err_disable_clocks:
4826 macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
4827 pm_runtime_disable(&pdev->dev);
4828 pm_runtime_set_suspended(&pdev->dev);
4829 pm_runtime_dont_use_autosuspend(&pdev->dev);
4830
4831 return err;
4832}
4833
4834static int macb_remove(struct platform_device *pdev)
4835{
4836 struct net_device *dev;
4837 struct macb *bp;
4838
4839 dev = platform_get_drvdata(pdev);
4840
4841 if (dev) {
4842 bp = netdev_priv(dev);
4843 mdiobus_unregister(bp->mii_bus);
4844 mdiobus_free(bp->mii_bus);
4845
4846 unregister_netdev(dev);
4847 tasklet_kill(&bp->hresp_err_tasklet);
4848 pm_runtime_disable(&pdev->dev);
4849 pm_runtime_dont_use_autosuspend(&pdev->dev);
4850 if (!pm_runtime_suspended(&pdev->dev)) {
4851 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
4852 bp->rx_clk, bp->tsu_clk);
4853 pm_runtime_set_suspended(&pdev->dev);
4854 }
4855 phylink_destroy(bp->phylink);
4856 free_netdev(dev);
4857 }
4858
4859 return 0;
4860}
4861
4862static int __maybe_unused macb_suspend(struct device *dev)
4863{
4864 struct net_device *netdev = dev_get_drvdata(dev);
4865 struct macb *bp = netdev_priv(netdev);
4866 struct macb_queue *queue;
4867 unsigned long flags;
4868 unsigned int q;
4869 int err;
4870
4871 if (!netif_running(netdev))
4872 return 0;
4873
4874 if (bp->wol & MACB_WOL_ENABLED) {
4875 spin_lock_irqsave(&bp->lock, flags);
4876
4877 macb_writel(bp, TSR, -1);
4878 macb_writel(bp, RSR, -1);
4879 for (q = 0, queue = bp->queues; q < bp->num_queues;
4880 ++q, ++queue) {
4881
4882 queue_writel(queue, IDR, -1);
4883 queue_readl(queue, ISR);
4884 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4885 queue_writel(queue, ISR, -1);
4886 }
4887
4888
4889
4890 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
4891 if (macb_is_gem(bp)) {
4892 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
4893 IRQF_SHARED, netdev->name, bp->queues);
4894 if (err) {
4895 dev_err(dev,
4896 "Unable to request IRQ %d (error %d)\n",
4897 bp->queues[0].irq, err);
4898 spin_unlock_irqrestore(&bp->lock, flags);
4899 return err;
4900 }
4901 queue_writel(bp->queues, IER, GEM_BIT(WOL));
4902 gem_writel(bp, WOL, MACB_BIT(MAG));
4903 } else {
4904 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
4905 IRQF_SHARED, netdev->name, bp->queues);
4906 if (err) {
4907 dev_err(dev,
4908 "Unable to request IRQ %d (error %d)\n",
4909 bp->queues[0].irq, err);
4910 spin_unlock_irqrestore(&bp->lock, flags);
4911 return err;
4912 }
4913 queue_writel(bp->queues, IER, MACB_BIT(WOL));
4914 macb_writel(bp, WOL, MACB_BIT(MAG));
4915 }
4916 spin_unlock_irqrestore(&bp->lock, flags);
4917
4918 enable_irq_wake(bp->queues[0].irq);
4919 }
4920
4921 netif_device_detach(netdev);
4922 for (q = 0, queue = bp->queues; q < bp->num_queues;
4923 ++q, ++queue)
4924 napi_disable(&queue->napi);
4925
4926 if (!(bp->wol & MACB_WOL_ENABLED)) {
4927 rtnl_lock();
4928 phylink_stop(bp->phylink);
4929 rtnl_unlock();
4930 spin_lock_irqsave(&bp->lock, flags);
4931 macb_reset_hw(bp);
4932 spin_unlock_irqrestore(&bp->lock, flags);
4933 }
4934
4935 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4936 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
4937
4938 if (netdev->hw_features & NETIF_F_NTUPLE)
4939 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
4940
4941 if (bp->ptp_info)
4942 bp->ptp_info->ptp_remove(netdev);
4943 if (!device_may_wakeup(dev))
4944 pm_runtime_force_suspend(dev);
4945
4946 return 0;
4947}
4948
4949static int __maybe_unused macb_resume(struct device *dev)
4950{
4951 struct net_device *netdev = dev_get_drvdata(dev);
4952 struct macb *bp = netdev_priv(netdev);
4953 struct macb_queue *queue;
4954 unsigned long flags;
4955 unsigned int q;
4956 int err;
4957
4958 if (!netif_running(netdev))
4959 return 0;
4960
4961 if (!device_may_wakeup(dev))
4962 pm_runtime_force_resume(dev);
4963
4964 if (bp->wol & MACB_WOL_ENABLED) {
4965 spin_lock_irqsave(&bp->lock, flags);
4966
4967 if (macb_is_gem(bp)) {
4968 queue_writel(bp->queues, IDR, GEM_BIT(WOL));
4969 gem_writel(bp, WOL, 0);
4970 } else {
4971 queue_writel(bp->queues, IDR, MACB_BIT(WOL));
4972 macb_writel(bp, WOL, 0);
4973 }
4974
4975 queue_readl(bp->queues, ISR);
4976 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4977 queue_writel(bp->queues, ISR, -1);
4978
4979 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
4980 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
4981 IRQF_SHARED, netdev->name, bp->queues);
4982 if (err) {
4983 dev_err(dev,
4984 "Unable to request IRQ %d (error %d)\n",
4985 bp->queues[0].irq, err);
4986 spin_unlock_irqrestore(&bp->lock, flags);
4987 return err;
4988 }
4989 spin_unlock_irqrestore(&bp->lock, flags);
4990
4991 disable_irq_wake(bp->queues[0].irq);
4992
4993
4994
4995
4996 rtnl_lock();
4997 phylink_stop(bp->phylink);
4998 rtnl_unlock();
4999 }
5000
5001 for (q = 0, queue = bp->queues; q < bp->num_queues;
5002 ++q, ++queue)
5003 napi_enable(&queue->napi);
5004
5005 if (netdev->hw_features & NETIF_F_NTUPLE)
5006 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
5007
5008 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
5009 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
5010
5011 macb_writel(bp, NCR, MACB_BIT(MPE));
5012 macb_init_hw(bp);
5013 macb_set_rx_mode(netdev);
5014 macb_restore_features(bp);
5015 rtnl_lock();
5016 phylink_start(bp->phylink);
5017 rtnl_unlock();
5018
5019 netif_device_attach(netdev);
5020 if (bp->ptp_info)
5021 bp->ptp_info->ptp_init(netdev);
5022
5023 return 0;
5024}
5025
5026static int __maybe_unused macb_runtime_suspend(struct device *dev)
5027{
5028 struct net_device *netdev = dev_get_drvdata(dev);
5029 struct macb *bp = netdev_priv(netdev);
5030
5031 if (!(device_may_wakeup(dev)))
5032 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
5033 else
5034 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
5035
5036 return 0;
5037}
5038
5039static int __maybe_unused macb_runtime_resume(struct device *dev)
5040{
5041 struct net_device *netdev = dev_get_drvdata(dev);
5042 struct macb *bp = netdev_priv(netdev);
5043
5044 if (!(device_may_wakeup(dev))) {
5045 clk_prepare_enable(bp->pclk);
5046 clk_prepare_enable(bp->hclk);
5047 clk_prepare_enable(bp->tx_clk);
5048 clk_prepare_enable(bp->rx_clk);
5049 }
5050 clk_prepare_enable(bp->tsu_clk);
5051
5052 return 0;
5053}
5054
5055static const struct dev_pm_ops macb_pm_ops = {
5056 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
5057 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
5058};
5059
5060static struct platform_driver macb_driver = {
5061 .probe = macb_probe,
5062 .remove = macb_remove,
5063 .driver = {
5064 .name = "macb",
5065 .of_match_table = of_match_ptr(macb_dt_ids),
5066 .pm = &macb_pm_ops,
5067 },
5068};
5069
5070module_platform_driver(macb_driver);
5071
5072MODULE_LICENSE("GPL");
5073MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
5074MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
5075MODULE_ALIAS("platform:macb");
5076