1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/circ_buf.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/gpio/consumer.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_data/macb.h>
28#include <linux/platform_device.h>
29#include <linux/phy.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/of_gpio.h>
33#include <linux/of_mdio.h>
34#include <linux/of_net.h>
35#include <linux/ip.h>
36#include <linux/udp.h>
37#include <linux/tcp.h>
38#include "macb.h"
39
40#define MACB_RX_BUFFER_SIZE 128
41#define RX_BUFFER_MULTIPLE 64
42
43#define DEFAULT_RX_RING_SIZE 512
44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192
46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
47 * (bp)->rx_ring_size)
48
49#define DEFAULT_TX_RING_SIZE 512
50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096
52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
53 * (bp)->tx_ring_size)
54
55
56#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
57
58#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
59 | MACB_BIT(ISR_ROVR))
60#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
61 | MACB_BIT(ISR_RLE) \
62 | MACB_BIT(TXERR))
63#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
64
65
66#define MACB_TX_LEN_ALIGN 8
67#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
68#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
69
70#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
71#define MACB_NETIF_LSO NETIF_F_TSO
72
73#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
74#define MACB_WOL_ENABLED (0x1 << 1)
75
76
77
78
79#define MACB_HALT_TIMEOUT 1230
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108static unsigned int macb_dma_desc_get_size(struct macb *bp)
109{
110#ifdef MACB_EXT_DESC
111 unsigned int desc_size;
112
113 switch (bp->hw_dma_cap) {
114 case HW_DMA_CAP_64B:
115 desc_size = sizeof(struct macb_dma_desc)
116 + sizeof(struct macb_dma_desc_64);
117 break;
118 case HW_DMA_CAP_PTP:
119 desc_size = sizeof(struct macb_dma_desc)
120 + sizeof(struct macb_dma_desc_ptp);
121 break;
122 case HW_DMA_CAP_64B_PTP:
123 desc_size = sizeof(struct macb_dma_desc)
124 + sizeof(struct macb_dma_desc_64)
125 + sizeof(struct macb_dma_desc_ptp);
126 break;
127 default:
128 desc_size = sizeof(struct macb_dma_desc);
129 }
130 return desc_size;
131#endif
132 return sizeof(struct macb_dma_desc);
133}
134
135static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
136{
137#ifdef MACB_EXT_DESC
138 switch (bp->hw_dma_cap) {
139 case HW_DMA_CAP_64B:
140 case HW_DMA_CAP_PTP:
141 desc_idx <<= 1;
142 break;
143 case HW_DMA_CAP_64B_PTP:
144 desc_idx *= 3;
145 break;
146 default:
147 break;
148 }
149#endif
150 return desc_idx;
151}
152
153#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
154static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
155{
156 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
157 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
158 return NULL;
159}
160#endif
161
162
163static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
164{
165 return index & (bp->tx_ring_size - 1);
166}
167
168static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
169 unsigned int index)
170{
171 index = macb_tx_ring_wrap(queue->bp, index);
172 index = macb_adj_dma_desc_idx(queue->bp, index);
173 return &queue->tx_ring[index];
174}
175
176static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
177 unsigned int index)
178{
179 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
180}
181
182static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
183{
184 dma_addr_t offset;
185
186 offset = macb_tx_ring_wrap(queue->bp, index) *
187 macb_dma_desc_get_size(queue->bp);
188
189 return queue->tx_ring_dma + offset;
190}
191
192static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
193{
194 return index & (bp->rx_ring_size - 1);
195}
196
197static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
198{
199 index = macb_rx_ring_wrap(queue->bp, index);
200 index = macb_adj_dma_desc_idx(queue->bp, index);
201 return &queue->rx_ring[index];
202}
203
204static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
205{
206 return queue->rx_buffers + queue->bp->rx_buffer_size *
207 macb_rx_ring_wrap(queue->bp, index);
208}
209
210
211static u32 hw_readl_native(struct macb *bp, int offset)
212{
213 return __raw_readl(bp->regs + offset);
214}
215
216static void hw_writel_native(struct macb *bp, int offset, u32 value)
217{
218 __raw_writel(value, bp->regs + offset);
219}
220
221static u32 hw_readl(struct macb *bp, int offset)
222{
223 return readl_relaxed(bp->regs + offset);
224}
225
226static void hw_writel(struct macb *bp, int offset, u32 value)
227{
228 writel_relaxed(value, bp->regs + offset);
229}
230
231
232
233
234
235static bool hw_is_native_io(void __iomem *addr)
236{
237 u32 value = MACB_BIT(LLB);
238
239 __raw_writel(value, addr + MACB_NCR);
240 value = __raw_readl(addr + MACB_NCR);
241
242
243 __raw_writel(0, addr + MACB_NCR);
244
245 return value == MACB_BIT(LLB);
246}
247
248static bool hw_is_gem(void __iomem *addr, bool native_io)
249{
250 u32 id;
251
252 if (native_io)
253 id = __raw_readl(addr + MACB_MID);
254 else
255 id = readl_relaxed(addr + MACB_MID);
256
257 return MACB_BFEXT(IDNUM, id) >= 0x2;
258}
259
260static void macb_set_hwaddr(struct macb *bp)
261{
262 u32 bottom;
263 u16 top;
264
265 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
266 macb_or_gem_writel(bp, SA1B, bottom);
267 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
268 macb_or_gem_writel(bp, SA1T, top);
269
270
271 macb_or_gem_writel(bp, SA2B, 0);
272 macb_or_gem_writel(bp, SA2T, 0);
273 macb_or_gem_writel(bp, SA3B, 0);
274 macb_or_gem_writel(bp, SA3T, 0);
275 macb_or_gem_writel(bp, SA4B, 0);
276 macb_or_gem_writel(bp, SA4T, 0);
277}
278
279static void macb_get_hwaddr(struct macb *bp)
280{
281 struct macb_platform_data *pdata;
282 u32 bottom;
283 u16 top;
284 u8 addr[6];
285 int i;
286
287 pdata = dev_get_platdata(&bp->pdev->dev);
288
289
290 for (i = 0; i < 4; i++) {
291 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
292 top = macb_or_gem_readl(bp, SA1T + i * 8);
293
294 if (pdata && pdata->rev_eth_addr) {
295 addr[5] = bottom & 0xff;
296 addr[4] = (bottom >> 8) & 0xff;
297 addr[3] = (bottom >> 16) & 0xff;
298 addr[2] = (bottom >> 24) & 0xff;
299 addr[1] = top & 0xff;
300 addr[0] = (top & 0xff00) >> 8;
301 } else {
302 addr[0] = bottom & 0xff;
303 addr[1] = (bottom >> 8) & 0xff;
304 addr[2] = (bottom >> 16) & 0xff;
305 addr[3] = (bottom >> 24) & 0xff;
306 addr[4] = top & 0xff;
307 addr[5] = (top >> 8) & 0xff;
308 }
309
310 if (is_valid_ether_addr(addr)) {
311 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
312 return;
313 }
314 }
315
316 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
317 eth_hw_addr_random(bp->dev);
318}
319
320static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
321{
322 struct macb *bp = bus->priv;
323 int value;
324
325 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
326 | MACB_BF(RW, MACB_MAN_READ)
327 | MACB_BF(PHYA, mii_id)
328 | MACB_BF(REGA, regnum)
329 | MACB_BF(CODE, MACB_MAN_CODE)));
330
331
332 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
333 cpu_relax();
334
335 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
336
337 return value;
338}
339
340static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
341 u16 value)
342{
343 struct macb *bp = bus->priv;
344
345 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
346 | MACB_BF(RW, MACB_MAN_WRITE)
347 | MACB_BF(PHYA, mii_id)
348 | MACB_BF(REGA, regnum)
349 | MACB_BF(CODE, MACB_MAN_CODE)
350 | MACB_BF(DATA, value)));
351
352
353 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
354 cpu_relax();
355
356 return 0;
357}
358
359
360
361
362
363
364
365static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
366{
367 long ferr, rate, rate_rounded;
368
369 if (!clk)
370 return;
371
372 switch (speed) {
373 case SPEED_10:
374 rate = 2500000;
375 break;
376 case SPEED_100:
377 rate = 25000000;
378 break;
379 case SPEED_1000:
380 rate = 125000000;
381 break;
382 default:
383 return;
384 }
385
386 rate_rounded = clk_round_rate(clk, rate);
387 if (rate_rounded < 0)
388 return;
389
390
391
392
393 ferr = abs(rate_rounded - rate);
394 ferr = DIV_ROUND_UP(ferr, rate / 100000);
395 if (ferr > 5)
396 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
397 rate);
398
399 if (clk_set_rate(clk, rate_rounded))
400 netdev_err(dev, "adjusting tx_clk failed.\n");
401}
402
403static void macb_handle_link_change(struct net_device *dev)
404{
405 struct macb *bp = netdev_priv(dev);
406 struct phy_device *phydev = dev->phydev;
407 unsigned long flags;
408 int status_change = 0;
409
410 spin_lock_irqsave(&bp->lock, flags);
411
412 if (phydev->link) {
413 if ((bp->speed != phydev->speed) ||
414 (bp->duplex != phydev->duplex)) {
415 u32 reg;
416
417 reg = macb_readl(bp, NCFGR);
418 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
419 if (macb_is_gem(bp))
420 reg &= ~GEM_BIT(GBE);
421
422 if (phydev->duplex)
423 reg |= MACB_BIT(FD);
424 if (phydev->speed == SPEED_100)
425 reg |= MACB_BIT(SPD);
426 if (phydev->speed == SPEED_1000 &&
427 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
428 reg |= GEM_BIT(GBE);
429
430 macb_or_gem_writel(bp, NCFGR, reg);
431
432 bp->speed = phydev->speed;
433 bp->duplex = phydev->duplex;
434 status_change = 1;
435 }
436 }
437
438 if (phydev->link != bp->link) {
439 if (!phydev->link) {
440 bp->speed = 0;
441 bp->duplex = -1;
442 }
443 bp->link = phydev->link;
444
445 status_change = 1;
446 }
447
448 spin_unlock_irqrestore(&bp->lock, flags);
449
450 if (status_change) {
451 if (phydev->link) {
452
453
454
455 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
456
457 netif_carrier_on(dev);
458 netdev_info(dev, "link up (%d/%s)\n",
459 phydev->speed,
460 phydev->duplex == DUPLEX_FULL ?
461 "Full" : "Half");
462 } else {
463 netif_carrier_off(dev);
464 netdev_info(dev, "link down\n");
465 }
466 }
467}
468
469
470static int macb_mii_probe(struct net_device *dev)
471{
472 struct macb *bp = netdev_priv(dev);
473 struct macb_platform_data *pdata;
474 struct phy_device *phydev;
475 struct device_node *np;
476 int phy_irq, ret, i;
477
478 pdata = dev_get_platdata(&bp->pdev->dev);
479 np = bp->pdev->dev.of_node;
480 ret = 0;
481
482 if (np) {
483 if (of_phy_is_fixed_link(np)) {
484 if (of_phy_register_fixed_link(np) < 0) {
485 dev_err(&bp->pdev->dev,
486 "broken fixed-link specification\n");
487 return -ENODEV;
488 }
489 bp->phy_node = of_node_get(np);
490 } else {
491 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
492
493
494
495
496 if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
497 for (i = 0; i < PHY_MAX_ADDR; i++) {
498 struct phy_device *phydev;
499
500 phydev = mdiobus_scan(bp->mii_bus, i);
501 if (IS_ERR(phydev) &&
502 PTR_ERR(phydev) != -ENODEV) {
503 ret = PTR_ERR(phydev);
504 break;
505 }
506 }
507
508 if (ret)
509 return -ENODEV;
510 }
511 }
512 }
513
514 if (bp->phy_node) {
515 phydev = of_phy_connect(dev, bp->phy_node,
516 &macb_handle_link_change, 0,
517 bp->phy_interface);
518 if (!phydev)
519 return -ENODEV;
520 } else {
521 phydev = phy_find_first(bp->mii_bus);
522 if (!phydev) {
523 netdev_err(dev, "no PHY found\n");
524 return -ENXIO;
525 }
526
527 if (pdata) {
528 if (gpio_is_valid(pdata->phy_irq_pin)) {
529 ret = devm_gpio_request(&bp->pdev->dev,
530 pdata->phy_irq_pin, "phy int");
531 if (!ret) {
532 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
533 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
534 }
535 } else {
536 phydev->irq = PHY_POLL;
537 }
538 }
539
540
541 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
542 bp->phy_interface);
543 if (ret) {
544 netdev_err(dev, "Could not attach to PHY\n");
545 return ret;
546 }
547 }
548
549
550 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
551 phydev->supported &= PHY_GBIT_FEATURES;
552 else
553 phydev->supported &= PHY_BASIC_FEATURES;
554
555 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
556 phydev->supported &= ~SUPPORTED_1000baseT_Half;
557
558 phydev->advertising = phydev->supported;
559
560 bp->link = 0;
561 bp->speed = 0;
562 bp->duplex = -1;
563
564 return 0;
565}
566
567static int macb_mii_init(struct macb *bp)
568{
569 struct macb_platform_data *pdata;
570 struct device_node *np;
571 int err;
572
573
574 macb_writel(bp, NCR, MACB_BIT(MPE));
575
576 bp->mii_bus = mdiobus_alloc();
577 if (!bp->mii_bus) {
578 err = -ENOMEM;
579 goto err_out;
580 }
581
582 bp->mii_bus->name = "MACB_mii_bus";
583 bp->mii_bus->read = &macb_mdio_read;
584 bp->mii_bus->write = &macb_mdio_write;
585 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
586 bp->pdev->name, bp->pdev->id);
587 bp->mii_bus->priv = bp;
588 bp->mii_bus->parent = &bp->pdev->dev;
589 pdata = dev_get_platdata(&bp->pdev->dev);
590
591 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
592
593 np = bp->pdev->dev.of_node;
594
595 if (np) {
596 err = of_mdiobus_register(bp->mii_bus, np);
597 } else {
598 if (pdata)
599 bp->mii_bus->phy_mask = pdata->phy_mask;
600
601 err = mdiobus_register(bp->mii_bus);
602 }
603
604 if (err)
605 goto err_out_free_mdiobus;
606
607 err = macb_mii_probe(bp->dev);
608 if (err)
609 goto err_out_unregister_bus;
610
611 return 0;
612
613err_out_unregister_bus:
614 mdiobus_unregister(bp->mii_bus);
615 if (np && of_phy_is_fixed_link(np))
616 of_phy_deregister_fixed_link(np);
617err_out_free_mdiobus:
618 of_node_put(bp->phy_node);
619 mdiobus_free(bp->mii_bus);
620err_out:
621 return err;
622}
623
624static void macb_update_stats(struct macb *bp)
625{
626 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
627 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
628 int offset = MACB_PFR;
629
630 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
631
632 for (; p < end; p++, offset += 4)
633 *p += bp->macb_reg_readl(bp, offset);
634}
635
636static int macb_halt_tx(struct macb *bp)
637{
638 unsigned long halt_time, timeout;
639 u32 status;
640
641 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
642
643 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
644 do {
645 halt_time = jiffies;
646 status = macb_readl(bp, TSR);
647 if (!(status & MACB_BIT(TGO)))
648 return 0;
649
650 usleep_range(10, 250);
651 } while (time_before(halt_time, timeout));
652
653 return -ETIMEDOUT;
654}
655
656static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
657{
658 if (tx_skb->mapping) {
659 if (tx_skb->mapped_as_page)
660 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
661 tx_skb->size, DMA_TO_DEVICE);
662 else
663 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
664 tx_skb->size, DMA_TO_DEVICE);
665 tx_skb->mapping = 0;
666 }
667
668 if (tx_skb->skb) {
669 dev_kfree_skb_any(tx_skb->skb);
670 tx_skb->skb = NULL;
671 }
672}
673
674static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
675{
676#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
677 struct macb_dma_desc_64 *desc_64;
678
679 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
680 desc_64 = macb_64b_desc(bp, desc);
681 desc_64->addrh = upper_32_bits(addr);
682 }
683#endif
684 desc->addr = lower_32_bits(addr);
685}
686
687static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
688{
689 dma_addr_t addr = 0;
690#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
691 struct macb_dma_desc_64 *desc_64;
692
693 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
694 desc_64 = macb_64b_desc(bp, desc);
695 addr = ((u64)(desc_64->addrh) << 32);
696 }
697#endif
698 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
699 return addr;
700}
701
702static void macb_tx_error_task(struct work_struct *work)
703{
704 struct macb_queue *queue = container_of(work, struct macb_queue,
705 tx_error_task);
706 struct macb *bp = queue->bp;
707 struct macb_tx_skb *tx_skb;
708 struct macb_dma_desc *desc;
709 struct sk_buff *skb;
710 unsigned int tail;
711 unsigned long flags;
712
713 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
714 (unsigned int)(queue - bp->queues),
715 queue->tx_tail, queue->tx_head);
716
717
718
719
720
721
722
723 spin_lock_irqsave(&bp->lock, flags);
724
725
726 netif_tx_stop_all_queues(bp->dev);
727
728
729
730
731
732 if (macb_halt_tx(bp))
733
734 netdev_err(bp->dev, "BUG: halt tx timed out\n");
735
736
737
738
739 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
740 u32 ctrl;
741
742 desc = macb_tx_desc(queue, tail);
743 ctrl = desc->ctrl;
744 tx_skb = macb_tx_skb(queue, tail);
745 skb = tx_skb->skb;
746
747 if (ctrl & MACB_BIT(TX_USED)) {
748
749 while (!skb) {
750 macb_tx_unmap(bp, tx_skb);
751 tail++;
752 tx_skb = macb_tx_skb(queue, tail);
753 skb = tx_skb->skb;
754 }
755
756
757
758
759 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
760 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
761 macb_tx_ring_wrap(bp, tail),
762 skb->data);
763 bp->dev->stats.tx_packets++;
764 queue->stats.tx_packets++;
765 bp->dev->stats.tx_bytes += skb->len;
766 queue->stats.tx_bytes += skb->len;
767 }
768 } else {
769
770
771
772
773 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
774 netdev_err(bp->dev,
775 "BUG: TX buffers exhausted mid-frame\n");
776
777 desc->ctrl = ctrl | MACB_BIT(TX_USED);
778 }
779
780 macb_tx_unmap(bp, tx_skb);
781 }
782
783
784 desc = macb_tx_desc(queue, 0);
785 macb_set_addr(bp, desc, 0);
786 desc->ctrl = MACB_BIT(TX_USED);
787
788
789 wmb();
790
791
792 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
793#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
794 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
795 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
796#endif
797
798 queue->tx_head = 0;
799 queue->tx_tail = 0;
800
801
802 macb_writel(bp, TSR, macb_readl(bp, TSR));
803 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
804
805
806 netif_tx_start_all_queues(bp->dev);
807 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
808
809 spin_unlock_irqrestore(&bp->lock, flags);
810}
811
812static void macb_tx_interrupt(struct macb_queue *queue)
813{
814 unsigned int tail;
815 unsigned int head;
816 u32 status;
817 struct macb *bp = queue->bp;
818 u16 queue_index = queue - bp->queues;
819
820 status = macb_readl(bp, TSR);
821 macb_writel(bp, TSR, status);
822
823 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
824 queue_writel(queue, ISR, MACB_BIT(TCOMP));
825
826 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
827 (unsigned long)status);
828
829 head = queue->tx_head;
830 for (tail = queue->tx_tail; tail != head; tail++) {
831 struct macb_tx_skb *tx_skb;
832 struct sk_buff *skb;
833 struct macb_dma_desc *desc;
834 u32 ctrl;
835
836 desc = macb_tx_desc(queue, tail);
837
838
839 rmb();
840
841 ctrl = desc->ctrl;
842
843
844
845
846 if (!(ctrl & MACB_BIT(TX_USED)))
847 break;
848
849
850 for (;; tail++) {
851 tx_skb = macb_tx_skb(queue, tail);
852 skb = tx_skb->skb;
853
854
855 if (skb) {
856 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
857
858
859
860 tx_skb->skb = NULL;
861 }
862 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
863 macb_tx_ring_wrap(bp, tail),
864 skb->data);
865 bp->dev->stats.tx_packets++;
866 queue->stats.tx_packets++;
867 bp->dev->stats.tx_bytes += skb->len;
868 queue->stats.tx_bytes += skb->len;
869 }
870
871
872 macb_tx_unmap(bp, tx_skb);
873
874
875
876
877
878 if (skb)
879 break;
880 }
881 }
882
883 queue->tx_tail = tail;
884 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
885 CIRC_CNT(queue->tx_head, queue->tx_tail,
886 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
887 netif_wake_subqueue(bp->dev, queue_index);
888}
889
890static void gem_rx_refill(struct macb_queue *queue)
891{
892 unsigned int entry;
893 struct sk_buff *skb;
894 dma_addr_t paddr;
895 struct macb *bp = queue->bp;
896 struct macb_dma_desc *desc;
897
898 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
899 bp->rx_ring_size) > 0) {
900 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
901
902
903 rmb();
904
905 queue->rx_prepared_head++;
906 desc = macb_rx_desc(queue, entry);
907
908 if (!queue->rx_skbuff[entry]) {
909
910 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
911 if (unlikely(!skb)) {
912 netdev_err(bp->dev,
913 "Unable to allocate sk_buff\n");
914 break;
915 }
916
917
918 paddr = dma_map_single(&bp->pdev->dev, skb->data,
919 bp->rx_buffer_size,
920 DMA_FROM_DEVICE);
921 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
922 dev_kfree_skb(skb);
923 break;
924 }
925
926 queue->rx_skbuff[entry] = skb;
927
928 if (entry == bp->rx_ring_size - 1)
929 paddr |= MACB_BIT(RX_WRAP);
930 macb_set_addr(bp, desc, paddr);
931 desc->ctrl = 0;
932
933
934 skb_reserve(skb, NET_IP_ALIGN);
935 } else {
936 desc->addr &= ~MACB_BIT(RX_USED);
937 desc->ctrl = 0;
938 }
939 }
940
941
942 wmb();
943
944 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
945 queue, queue->rx_prepared_head, queue->rx_tail);
946}
947
948
949static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
950 unsigned int end)
951{
952 unsigned int frag;
953
954 for (frag = begin; frag != end; frag++) {
955 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
956
957 desc->addr &= ~MACB_BIT(RX_USED);
958 }
959
960
961 wmb();
962
963
964
965
966
967}
968
969static int gem_rx(struct macb_queue *queue, int budget)
970{
971 struct macb *bp = queue->bp;
972 unsigned int len;
973 unsigned int entry;
974 struct sk_buff *skb;
975 struct macb_dma_desc *desc;
976 int count = 0;
977
978 while (count < budget) {
979 u32 ctrl;
980 dma_addr_t addr;
981 bool rxused;
982
983 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
984 desc = macb_rx_desc(queue, entry);
985
986
987 rmb();
988
989 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
990 addr = macb_get_addr(bp, desc);
991 ctrl = desc->ctrl;
992
993 if (!rxused)
994 break;
995
996 queue->rx_tail++;
997 count++;
998
999 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1000 netdev_err(bp->dev,
1001 "not whole frame pointed by descriptor\n");
1002 bp->dev->stats.rx_dropped++;
1003 queue->stats.rx_dropped++;
1004 break;
1005 }
1006 skb = queue->rx_skbuff[entry];
1007 if (unlikely(!skb)) {
1008 netdev_err(bp->dev,
1009 "inconsistent Rx descriptor chain\n");
1010 bp->dev->stats.rx_dropped++;
1011 queue->stats.rx_dropped++;
1012 break;
1013 }
1014
1015 queue->rx_skbuff[entry] = NULL;
1016 len = ctrl & bp->rx_frm_len_mask;
1017
1018 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1019
1020 skb_put(skb, len);
1021 dma_unmap_single(&bp->pdev->dev, addr,
1022 bp->rx_buffer_size, DMA_FROM_DEVICE);
1023
1024 skb->protocol = eth_type_trans(skb, bp->dev);
1025 skb_checksum_none_assert(skb);
1026 if (bp->dev->features & NETIF_F_RXCSUM &&
1027 !(bp->dev->flags & IFF_PROMISC) &&
1028 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1029 skb->ip_summed = CHECKSUM_UNNECESSARY;
1030
1031 bp->dev->stats.rx_packets++;
1032 queue->stats.rx_packets++;
1033 bp->dev->stats.rx_bytes += skb->len;
1034 queue->stats.rx_bytes += skb->len;
1035
1036 gem_ptp_do_rxstamp(bp, skb, desc);
1037
1038#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1039 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1040 skb->len, skb->csum);
1041 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1042 skb_mac_header(skb), 16, true);
1043 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1044 skb->data, 32, true);
1045#endif
1046
1047 netif_receive_skb(skb);
1048 }
1049
1050 gem_rx_refill(queue);
1051
1052 return count;
1053}
1054
1055static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
1056 unsigned int last_frag)
1057{
1058 unsigned int len;
1059 unsigned int frag;
1060 unsigned int offset;
1061 struct sk_buff *skb;
1062 struct macb_dma_desc *desc;
1063 struct macb *bp = queue->bp;
1064
1065 desc = macb_rx_desc(queue, last_frag);
1066 len = desc->ctrl & bp->rx_frm_len_mask;
1067
1068 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1069 macb_rx_ring_wrap(bp, first_frag),
1070 macb_rx_ring_wrap(bp, last_frag), len);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1081 if (!skb) {
1082 bp->dev->stats.rx_dropped++;
1083 for (frag = first_frag; ; frag++) {
1084 desc = macb_rx_desc(queue, frag);
1085 desc->addr &= ~MACB_BIT(RX_USED);
1086 if (frag == last_frag)
1087 break;
1088 }
1089
1090
1091 wmb();
1092
1093 return 1;
1094 }
1095
1096 offset = 0;
1097 len += NET_IP_ALIGN;
1098 skb_checksum_none_assert(skb);
1099 skb_put(skb, len);
1100
1101 for (frag = first_frag; ; frag++) {
1102 unsigned int frag_len = bp->rx_buffer_size;
1103
1104 if (offset + frag_len > len) {
1105 if (unlikely(frag != last_frag)) {
1106 dev_kfree_skb_any(skb);
1107 return -1;
1108 }
1109 frag_len = len - offset;
1110 }
1111 skb_copy_to_linear_data_offset(skb, offset,
1112 macb_rx_buffer(queue, frag),
1113 frag_len);
1114 offset += bp->rx_buffer_size;
1115 desc = macb_rx_desc(queue, frag);
1116 desc->addr &= ~MACB_BIT(RX_USED);
1117
1118 if (frag == last_frag)
1119 break;
1120 }
1121
1122
1123 wmb();
1124
1125 __skb_pull(skb, NET_IP_ALIGN);
1126 skb->protocol = eth_type_trans(skb, bp->dev);
1127
1128 bp->dev->stats.rx_packets++;
1129 bp->dev->stats.rx_bytes += skb->len;
1130 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1131 skb->len, skb->csum);
1132 netif_receive_skb(skb);
1133
1134 return 0;
1135}
1136
1137static inline void macb_init_rx_ring(struct macb_queue *queue)
1138{
1139 struct macb *bp = queue->bp;
1140 dma_addr_t addr;
1141 struct macb_dma_desc *desc = NULL;
1142 int i;
1143
1144 addr = queue->rx_buffers_dma;
1145 for (i = 0; i < bp->rx_ring_size; i++) {
1146 desc = macb_rx_desc(queue, i);
1147 macb_set_addr(bp, desc, addr);
1148 desc->ctrl = 0;
1149 addr += bp->rx_buffer_size;
1150 }
1151 desc->addr |= MACB_BIT(RX_WRAP);
1152 queue->rx_tail = 0;
1153}
1154
1155static int macb_rx(struct macb_queue *queue, int budget)
1156{
1157 struct macb *bp = queue->bp;
1158 bool reset_rx_queue = false;
1159 int received = 0;
1160 unsigned int tail;
1161 int first_frag = -1;
1162
1163 for (tail = queue->rx_tail; budget > 0; tail++) {
1164 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1165 u32 ctrl;
1166
1167
1168 rmb();
1169
1170 ctrl = desc->ctrl;
1171
1172 if (!(desc->addr & MACB_BIT(RX_USED)))
1173 break;
1174
1175 if (ctrl & MACB_BIT(RX_SOF)) {
1176 if (first_frag != -1)
1177 discard_partial_frame(queue, first_frag, tail);
1178 first_frag = tail;
1179 }
1180
1181 if (ctrl & MACB_BIT(RX_EOF)) {
1182 int dropped;
1183
1184 if (unlikely(first_frag == -1)) {
1185 reset_rx_queue = true;
1186 continue;
1187 }
1188
1189 dropped = macb_rx_frame(queue, first_frag, tail);
1190 first_frag = -1;
1191 if (unlikely(dropped < 0)) {
1192 reset_rx_queue = true;
1193 continue;
1194 }
1195 if (!dropped) {
1196 received++;
1197 budget--;
1198 }
1199 }
1200 }
1201
1202 if (unlikely(reset_rx_queue)) {
1203 unsigned long flags;
1204 u32 ctrl;
1205
1206 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1207
1208 spin_lock_irqsave(&bp->lock, flags);
1209
1210 ctrl = macb_readl(bp, NCR);
1211 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1212
1213 macb_init_rx_ring(queue);
1214 queue_writel(queue, RBQP, queue->rx_ring_dma);
1215
1216 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1217
1218 spin_unlock_irqrestore(&bp->lock, flags);
1219 return received;
1220 }
1221
1222 if (first_frag != -1)
1223 queue->rx_tail = first_frag;
1224 else
1225 queue->rx_tail = tail;
1226
1227 return received;
1228}
1229
1230static int macb_poll(struct napi_struct *napi, int budget)
1231{
1232 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1233 struct macb *bp = queue->bp;
1234 int work_done;
1235 u32 status;
1236
1237 status = macb_readl(bp, RSR);
1238 macb_writel(bp, RSR, status);
1239
1240 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1241 (unsigned long)status, budget);
1242
1243 work_done = bp->macbgem_ops.mog_rx(queue, budget);
1244 if (work_done < budget) {
1245 napi_complete_done(napi, work_done);
1246
1247
1248 status = macb_readl(bp, RSR);
1249 if (status) {
1250 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1251 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1252 napi_reschedule(napi);
1253 } else {
1254 queue_writel(queue, IER, MACB_RX_INT_FLAGS);
1255 }
1256 }
1257
1258
1259
1260 return work_done;
1261}
1262
1263static void macb_hresp_error_task(unsigned long data)
1264{
1265 struct macb *bp = (struct macb *)data;
1266 struct net_device *dev = bp->dev;
1267 struct macb_queue *queue = bp->queues;
1268 unsigned int q;
1269 u32 ctrl;
1270
1271 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1272 queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
1273 MACB_TX_INT_FLAGS |
1274 MACB_BIT(HRESP));
1275 }
1276 ctrl = macb_readl(bp, NCR);
1277 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1278 macb_writel(bp, NCR, ctrl);
1279
1280 netif_tx_stop_all_queues(dev);
1281 netif_carrier_off(dev);
1282
1283 bp->macbgem_ops.mog_init_rings(bp);
1284
1285
1286 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1287 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1288#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1289 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1290 queue_writel(queue, RBQPH,
1291 upper_32_bits(queue->rx_ring_dma));
1292#endif
1293 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1294#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1295 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1296 queue_writel(queue, TBQPH,
1297 upper_32_bits(queue->tx_ring_dma));
1298#endif
1299
1300
1301 queue_writel(queue, IER,
1302 MACB_RX_INT_FLAGS |
1303 MACB_TX_INT_FLAGS |
1304 MACB_BIT(HRESP));
1305 }
1306
1307 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1308 macb_writel(bp, NCR, ctrl);
1309
1310 netif_carrier_on(dev);
1311 netif_tx_start_all_queues(dev);
1312}
1313
1314static irqreturn_t macb_interrupt(int irq, void *dev_id)
1315{
1316 struct macb_queue *queue = dev_id;
1317 struct macb *bp = queue->bp;
1318 struct net_device *dev = bp->dev;
1319 u32 status, ctrl;
1320
1321 status = queue_readl(queue, ISR);
1322
1323 if (unlikely(!status))
1324 return IRQ_NONE;
1325
1326 spin_lock(&bp->lock);
1327
1328 while (status) {
1329
1330 if (unlikely(!netif_running(dev))) {
1331 queue_writel(queue, IDR, -1);
1332 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1333 queue_writel(queue, ISR, -1);
1334 break;
1335 }
1336
1337 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1338 (unsigned int)(queue - bp->queues),
1339 (unsigned long)status);
1340
1341 if (status & MACB_RX_INT_FLAGS) {
1342
1343
1344
1345
1346
1347
1348 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1349 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1350 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1351
1352 if (napi_schedule_prep(&queue->napi)) {
1353 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1354 __napi_schedule(&queue->napi);
1355 }
1356 }
1357
1358 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1359 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1360 schedule_work(&queue->tx_error_task);
1361
1362 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1363 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1364
1365 break;
1366 }
1367
1368 if (status & MACB_BIT(TCOMP))
1369 macb_tx_interrupt(queue);
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (status & MACB_BIT(RXUBR)) {
1382 ctrl = macb_readl(bp, NCR);
1383 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1384 wmb();
1385 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1386
1387 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1388 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1389 }
1390
1391 if (status & MACB_BIT(ISR_ROVR)) {
1392
1393 if (macb_is_gem(bp))
1394 bp->hw_stats.gem.rx_overruns++;
1395 else
1396 bp->hw_stats.macb.rx_overruns++;
1397
1398 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1399 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1400 }
1401
1402 if (status & MACB_BIT(HRESP)) {
1403 tasklet_schedule(&bp->hresp_err_tasklet);
1404 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1405
1406 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1407 queue_writel(queue, ISR, MACB_BIT(HRESP));
1408 }
1409 status = queue_readl(queue, ISR);
1410 }
1411
1412 spin_unlock(&bp->lock);
1413
1414 return IRQ_HANDLED;
1415}
1416
1417#ifdef CONFIG_NET_POLL_CONTROLLER
1418
1419
1420
1421static void macb_poll_controller(struct net_device *dev)
1422{
1423 struct macb *bp = netdev_priv(dev);
1424 struct macb_queue *queue;
1425 unsigned long flags;
1426 unsigned int q;
1427
1428 local_irq_save(flags);
1429 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1430 macb_interrupt(dev->irq, queue);
1431 local_irq_restore(flags);
1432}
1433#endif
1434
1435static unsigned int macb_tx_map(struct macb *bp,
1436 struct macb_queue *queue,
1437 struct sk_buff *skb,
1438 unsigned int hdrlen)
1439{
1440 dma_addr_t mapping;
1441 unsigned int len, entry, i, tx_head = queue->tx_head;
1442 struct macb_tx_skb *tx_skb = NULL;
1443 struct macb_dma_desc *desc;
1444 unsigned int offset, size, count = 0;
1445 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1446 unsigned int eof = 1, mss_mfs = 0;
1447 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1448
1449
1450 if (skb_shinfo(skb)->gso_size != 0) {
1451 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1452
1453 lso_ctrl = MACB_LSO_UFO_ENABLE;
1454 else
1455
1456 lso_ctrl = MACB_LSO_TSO_ENABLE;
1457 }
1458
1459
1460 len = skb_headlen(skb);
1461
1462
1463 size = hdrlen;
1464
1465 offset = 0;
1466 while (len) {
1467 entry = macb_tx_ring_wrap(bp, tx_head);
1468 tx_skb = &queue->tx_skb[entry];
1469
1470 mapping = dma_map_single(&bp->pdev->dev,
1471 skb->data + offset,
1472 size, DMA_TO_DEVICE);
1473 if (dma_mapping_error(&bp->pdev->dev, mapping))
1474 goto dma_error;
1475
1476
1477 tx_skb->skb = NULL;
1478 tx_skb->mapping = mapping;
1479 tx_skb->size = size;
1480 tx_skb->mapped_as_page = false;
1481
1482 len -= size;
1483 offset += size;
1484 count++;
1485 tx_head++;
1486
1487 size = min(len, bp->max_tx_length);
1488 }
1489
1490
1491 for (f = 0; f < nr_frags; f++) {
1492 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1493
1494 len = skb_frag_size(frag);
1495 offset = 0;
1496 while (len) {
1497 size = min(len, bp->max_tx_length);
1498 entry = macb_tx_ring_wrap(bp, tx_head);
1499 tx_skb = &queue->tx_skb[entry];
1500
1501 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1502 offset, size, DMA_TO_DEVICE);
1503 if (dma_mapping_error(&bp->pdev->dev, mapping))
1504 goto dma_error;
1505
1506
1507 tx_skb->skb = NULL;
1508 tx_skb->mapping = mapping;
1509 tx_skb->size = size;
1510 tx_skb->mapped_as_page = true;
1511
1512 len -= size;
1513 offset += size;
1514 count++;
1515 tx_head++;
1516 }
1517 }
1518
1519
1520 if (unlikely(!tx_skb)) {
1521 netdev_err(bp->dev, "BUG! empty skb!\n");
1522 return 0;
1523 }
1524
1525
1526 tx_skb->skb = skb;
1527
1528
1529
1530
1531
1532
1533
1534
1535 i = tx_head;
1536 entry = macb_tx_ring_wrap(bp, i);
1537 ctrl = MACB_BIT(TX_USED);
1538 desc = macb_tx_desc(queue, entry);
1539 desc->ctrl = ctrl;
1540
1541 if (lso_ctrl) {
1542 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1543
1544 mss_mfs = skb_shinfo(skb)->gso_size +
1545 skb_transport_offset(skb) +
1546 ETH_FCS_LEN;
1547 else {
1548 mss_mfs = skb_shinfo(skb)->gso_size;
1549
1550
1551
1552 seq_ctrl = 0;
1553 }
1554 }
1555
1556 do {
1557 i--;
1558 entry = macb_tx_ring_wrap(bp, i);
1559 tx_skb = &queue->tx_skb[entry];
1560 desc = macb_tx_desc(queue, entry);
1561
1562 ctrl = (u32)tx_skb->size;
1563 if (eof) {
1564 ctrl |= MACB_BIT(TX_LAST);
1565 eof = 0;
1566 }
1567 if (unlikely(entry == (bp->tx_ring_size - 1)))
1568 ctrl |= MACB_BIT(TX_WRAP);
1569
1570
1571 if (i == queue->tx_head) {
1572 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1573 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1574 } else
1575
1576
1577
1578 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1579
1580
1581 macb_set_addr(bp, desc, tx_skb->mapping);
1582
1583
1584
1585 wmb();
1586 desc->ctrl = ctrl;
1587 } while (i != queue->tx_head);
1588
1589 queue->tx_head = tx_head;
1590
1591 return count;
1592
1593dma_error:
1594 netdev_err(bp->dev, "TX DMA map failed\n");
1595
1596 for (i = queue->tx_head; i != tx_head; i++) {
1597 tx_skb = macb_tx_skb(queue, i);
1598
1599 macb_tx_unmap(bp, tx_skb);
1600 }
1601
1602 return 0;
1603}
1604
1605static netdev_features_t macb_features_check(struct sk_buff *skb,
1606 struct net_device *dev,
1607 netdev_features_t features)
1608{
1609 unsigned int nr_frags, f;
1610 unsigned int hdrlen;
1611
1612
1613
1614
1615 if (!skb_is_nonlinear(skb))
1616 return features;
1617
1618
1619 hdrlen = skb_transport_offset(skb);
1620 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1621 hdrlen += tcp_hdrlen(skb);
1622
1623
1624
1625
1626
1627 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1628 return features & ~MACB_NETIF_LSO;
1629
1630 nr_frags = skb_shinfo(skb)->nr_frags;
1631
1632 nr_frags--;
1633 for (f = 0; f < nr_frags; f++) {
1634 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1635
1636 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1637 return features & ~MACB_NETIF_LSO;
1638 }
1639 return features;
1640}
1641
1642static inline int macb_clear_csum(struct sk_buff *skb)
1643{
1644
1645 if (skb->ip_summed != CHECKSUM_PARTIAL)
1646 return 0;
1647
1648
1649 if (unlikely(skb_cow_head(skb, 0)))
1650 return -1;
1651
1652
1653
1654
1655
1656 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1657 return 0;
1658}
1659
1660static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1661{
1662 u16 queue_index = skb_get_queue_mapping(skb);
1663 struct macb *bp = netdev_priv(dev);
1664 struct macb_queue *queue = &bp->queues[queue_index];
1665 unsigned long flags;
1666 unsigned int desc_cnt, nr_frags, frag_size, f;
1667 unsigned int hdrlen;
1668 bool is_lso, is_udp = 0;
1669
1670 is_lso = (skb_shinfo(skb)->gso_size != 0);
1671
1672 if (is_lso) {
1673 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1674
1675
1676 if (is_udp)
1677
1678 hdrlen = skb_transport_offset(skb);
1679 else
1680 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1681 if (skb_headlen(skb) < hdrlen) {
1682 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1683
1684 return NETDEV_TX_BUSY;
1685 }
1686 } else
1687 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1688
1689#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1690 netdev_vdbg(bp->dev,
1691 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1692 queue_index, skb->len, skb->head, skb->data,
1693 skb_tail_pointer(skb), skb_end_pointer(skb));
1694 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1695 skb->data, 16, true);
1696#endif
1697
1698
1699
1700
1701
1702 if (is_lso && (skb_headlen(skb) > hdrlen))
1703
1704 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1705 else
1706 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1707 nr_frags = skb_shinfo(skb)->nr_frags;
1708 for (f = 0; f < nr_frags; f++) {
1709 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1710 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1711 }
1712
1713 spin_lock_irqsave(&bp->lock, flags);
1714
1715
1716 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1717 bp->tx_ring_size) < desc_cnt) {
1718 netif_stop_subqueue(dev, queue_index);
1719 spin_unlock_irqrestore(&bp->lock, flags);
1720 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1721 queue->tx_head, queue->tx_tail);
1722 return NETDEV_TX_BUSY;
1723 }
1724
1725 if (macb_clear_csum(skb)) {
1726 dev_kfree_skb_any(skb);
1727 goto unlock;
1728 }
1729
1730
1731 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1732 dev_kfree_skb_any(skb);
1733 goto unlock;
1734 }
1735
1736
1737 wmb();
1738 skb_tx_timestamp(skb);
1739
1740 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1741
1742 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1743 netif_stop_subqueue(dev, queue_index);
1744
1745unlock:
1746 spin_unlock_irqrestore(&bp->lock, flags);
1747
1748 return NETDEV_TX_OK;
1749}
1750
1751static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1752{
1753 if (!macb_is_gem(bp)) {
1754 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1755 } else {
1756 bp->rx_buffer_size = size;
1757
1758 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1759 netdev_dbg(bp->dev,
1760 "RX buffer must be multiple of %d bytes, expanding\n",
1761 RX_BUFFER_MULTIPLE);
1762 bp->rx_buffer_size =
1763 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1764 }
1765 }
1766
1767 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
1768 bp->dev->mtu, bp->rx_buffer_size);
1769}
1770
1771static void gem_free_rx_buffers(struct macb *bp)
1772{
1773 struct sk_buff *skb;
1774 struct macb_dma_desc *desc;
1775 struct macb_queue *queue;
1776 dma_addr_t addr;
1777 unsigned int q;
1778 int i;
1779
1780 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1781 if (!queue->rx_skbuff)
1782 continue;
1783
1784 for (i = 0; i < bp->rx_ring_size; i++) {
1785 skb = queue->rx_skbuff[i];
1786
1787 if (!skb)
1788 continue;
1789
1790 desc = macb_rx_desc(queue, i);
1791 addr = macb_get_addr(bp, desc);
1792
1793 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1794 DMA_FROM_DEVICE);
1795 dev_kfree_skb_any(skb);
1796 skb = NULL;
1797 }
1798
1799 kfree(queue->rx_skbuff);
1800 queue->rx_skbuff = NULL;
1801 }
1802}
1803
1804static void macb_free_rx_buffers(struct macb *bp)
1805{
1806 struct macb_queue *queue = &bp->queues[0];
1807
1808 if (queue->rx_buffers) {
1809 dma_free_coherent(&bp->pdev->dev,
1810 bp->rx_ring_size * bp->rx_buffer_size,
1811 queue->rx_buffers, queue->rx_buffers_dma);
1812 queue->rx_buffers = NULL;
1813 }
1814}
1815
1816static void macb_free_consistent(struct macb *bp)
1817{
1818 struct macb_queue *queue;
1819 unsigned int q;
1820
1821 queue = &bp->queues[0];
1822 bp->macbgem_ops.mog_free_rx_buffers(bp);
1823 if (queue->rx_ring) {
1824 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1825 queue->rx_ring, queue->rx_ring_dma);
1826 queue->rx_ring = NULL;
1827 }
1828
1829 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1830 kfree(queue->tx_skb);
1831 queue->tx_skb = NULL;
1832 if (queue->tx_ring) {
1833 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
1834 queue->tx_ring, queue->tx_ring_dma);
1835 queue->tx_ring = NULL;
1836 }
1837 }
1838}
1839
1840static int gem_alloc_rx_buffers(struct macb *bp)
1841{
1842 struct macb_queue *queue;
1843 unsigned int q;
1844 int size;
1845
1846 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1847 size = bp->rx_ring_size * sizeof(struct sk_buff *);
1848 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
1849 if (!queue->rx_skbuff)
1850 return -ENOMEM;
1851 else
1852 netdev_dbg(bp->dev,
1853 "Allocated %d RX struct sk_buff entries at %p\n",
1854 bp->rx_ring_size, queue->rx_skbuff);
1855 }
1856 return 0;
1857}
1858
1859static int macb_alloc_rx_buffers(struct macb *bp)
1860{
1861 struct macb_queue *queue = &bp->queues[0];
1862 int size;
1863
1864 size = bp->rx_ring_size * bp->rx_buffer_size;
1865 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1866 &queue->rx_buffers_dma, GFP_KERNEL);
1867 if (!queue->rx_buffers)
1868 return -ENOMEM;
1869
1870 netdev_dbg(bp->dev,
1871 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1872 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
1873 return 0;
1874}
1875
1876static int macb_alloc_consistent(struct macb *bp)
1877{
1878 struct macb_queue *queue;
1879 unsigned int q;
1880 int size;
1881
1882 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1883 size = TX_RING_BYTES(bp);
1884 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1885 &queue->tx_ring_dma,
1886 GFP_KERNEL);
1887 if (!queue->tx_ring)
1888 goto out_err;
1889 netdev_dbg(bp->dev,
1890 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1891 q, size, (unsigned long)queue->tx_ring_dma,
1892 queue->tx_ring);
1893
1894 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1895 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1896 if (!queue->tx_skb)
1897 goto out_err;
1898
1899 size = RX_RING_BYTES(bp);
1900 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1901 &queue->rx_ring_dma, GFP_KERNEL);
1902 if (!queue->rx_ring)
1903 goto out_err;
1904 netdev_dbg(bp->dev,
1905 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1906 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
1907 }
1908 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1909 goto out_err;
1910
1911 return 0;
1912
1913out_err:
1914 macb_free_consistent(bp);
1915 return -ENOMEM;
1916}
1917
1918static void gem_init_rings(struct macb *bp)
1919{
1920 struct macb_queue *queue;
1921 struct macb_dma_desc *desc = NULL;
1922 unsigned int q;
1923 int i;
1924
1925 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1926 for (i = 0; i < bp->tx_ring_size; i++) {
1927 desc = macb_tx_desc(queue, i);
1928 macb_set_addr(bp, desc, 0);
1929 desc->ctrl = MACB_BIT(TX_USED);
1930 }
1931 desc->ctrl |= MACB_BIT(TX_WRAP);
1932 queue->tx_head = 0;
1933 queue->tx_tail = 0;
1934
1935 queue->rx_tail = 0;
1936 queue->rx_prepared_head = 0;
1937
1938 gem_rx_refill(queue);
1939 }
1940
1941}
1942
1943static void macb_init_rings(struct macb *bp)
1944{
1945 int i;
1946 struct macb_dma_desc *desc = NULL;
1947
1948 macb_init_rx_ring(&bp->queues[0]);
1949
1950 for (i = 0; i < bp->tx_ring_size; i++) {
1951 desc = macb_tx_desc(&bp->queues[0], i);
1952 macb_set_addr(bp, desc, 0);
1953 desc->ctrl = MACB_BIT(TX_USED);
1954 }
1955 bp->queues[0].tx_head = 0;
1956 bp->queues[0].tx_tail = 0;
1957 desc->ctrl |= MACB_BIT(TX_WRAP);
1958}
1959
1960static void macb_reset_hw(struct macb *bp)
1961{
1962 struct macb_queue *queue;
1963 unsigned int q;
1964
1965
1966
1967
1968 macb_writel(bp, NCR, 0);
1969
1970
1971 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1972
1973
1974 macb_writel(bp, TSR, -1);
1975 macb_writel(bp, RSR, -1);
1976
1977
1978 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1979 queue_writel(queue, IDR, -1);
1980 queue_readl(queue, ISR);
1981 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1982 queue_writel(queue, ISR, -1);
1983 }
1984}
1985
1986static u32 gem_mdc_clk_div(struct macb *bp)
1987{
1988 u32 config;
1989 unsigned long pclk_hz = clk_get_rate(bp->pclk);
1990
1991 if (pclk_hz <= 20000000)
1992 config = GEM_BF(CLK, GEM_CLK_DIV8);
1993 else if (pclk_hz <= 40000000)
1994 config = GEM_BF(CLK, GEM_CLK_DIV16);
1995 else if (pclk_hz <= 80000000)
1996 config = GEM_BF(CLK, GEM_CLK_DIV32);
1997 else if (pclk_hz <= 120000000)
1998 config = GEM_BF(CLK, GEM_CLK_DIV48);
1999 else if (pclk_hz <= 160000000)
2000 config = GEM_BF(CLK, GEM_CLK_DIV64);
2001 else
2002 config = GEM_BF(CLK, GEM_CLK_DIV96);
2003
2004 return config;
2005}
2006
2007static u32 macb_mdc_clk_div(struct macb *bp)
2008{
2009 u32 config;
2010 unsigned long pclk_hz;
2011
2012 if (macb_is_gem(bp))
2013 return gem_mdc_clk_div(bp);
2014
2015 pclk_hz = clk_get_rate(bp->pclk);
2016 if (pclk_hz <= 20000000)
2017 config = MACB_BF(CLK, MACB_CLK_DIV8);
2018 else if (pclk_hz <= 40000000)
2019 config = MACB_BF(CLK, MACB_CLK_DIV16);
2020 else if (pclk_hz <= 80000000)
2021 config = MACB_BF(CLK, MACB_CLK_DIV32);
2022 else
2023 config = MACB_BF(CLK, MACB_CLK_DIV64);
2024
2025 return config;
2026}
2027
2028
2029
2030
2031
2032static u32 macb_dbw(struct macb *bp)
2033{
2034 if (!macb_is_gem(bp))
2035 return 0;
2036
2037 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2038 case 4:
2039 return GEM_BF(DBW, GEM_DBW128);
2040 case 2:
2041 return GEM_BF(DBW, GEM_DBW64);
2042 case 1:
2043 default:
2044 return GEM_BF(DBW, GEM_DBW32);
2045 }
2046}
2047
2048
2049
2050
2051
2052
2053
2054
2055static void macb_configure_dma(struct macb *bp)
2056{
2057 struct macb_queue *queue;
2058 u32 buffer_size;
2059 unsigned int q;
2060 u32 dmacfg;
2061
2062 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2063 if (macb_is_gem(bp)) {
2064 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2065 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2066 if (q)
2067 queue_writel(queue, RBQS, buffer_size);
2068 else
2069 dmacfg |= GEM_BF(RXBS, buffer_size);
2070 }
2071 if (bp->dma_burst_length)
2072 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2073 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2074 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2075
2076 if (bp->native_io)
2077 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2078 else
2079 dmacfg |= GEM_BIT(ENDIA_DESC);
2080
2081 if (bp->dev->features & NETIF_F_HW_CSUM)
2082 dmacfg |= GEM_BIT(TXCOEN);
2083 else
2084 dmacfg &= ~GEM_BIT(TXCOEN);
2085
2086#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2087 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2088 dmacfg |= GEM_BIT(ADDR64);
2089#endif
2090#ifdef CONFIG_MACB_USE_HWSTAMP
2091 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2092 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2093#endif
2094 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2095 dmacfg);
2096 gem_writel(bp, DMACFG, dmacfg);
2097 }
2098}
2099
2100static void macb_init_hw(struct macb *bp)
2101{
2102 struct macb_queue *queue;
2103 unsigned int q;
2104
2105 u32 config;
2106
2107 macb_reset_hw(bp);
2108 macb_set_hwaddr(bp);
2109
2110 config = macb_mdc_clk_div(bp);
2111 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2112 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2113 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2114 config |= MACB_BIT(PAE);
2115 config |= MACB_BIT(DRFCS);
2116 if (bp->caps & MACB_CAPS_JUMBO)
2117 config |= MACB_BIT(JFRAME);
2118 else
2119 config |= MACB_BIT(BIG);
2120 if (bp->dev->flags & IFF_PROMISC)
2121 config |= MACB_BIT(CAF);
2122 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2123 config |= GEM_BIT(RXCOEN);
2124 if (!(bp->dev->flags & IFF_BROADCAST))
2125 config |= MACB_BIT(NBC);
2126 config |= macb_dbw(bp);
2127 macb_writel(bp, NCFGR, config);
2128 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2129 gem_writel(bp, JML, bp->jumbo_max_len);
2130 bp->speed = SPEED_10;
2131 bp->duplex = DUPLEX_HALF;
2132 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2133 if (bp->caps & MACB_CAPS_JUMBO)
2134 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2135
2136 macb_configure_dma(bp);
2137
2138
2139 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2140 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2141#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2142 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2143 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2144#endif
2145 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2146#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2147 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2148 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2149#endif
2150
2151
2152 queue_writel(queue, IER,
2153 MACB_RX_INT_FLAGS |
2154 MACB_TX_INT_FLAGS |
2155 MACB_BIT(HRESP));
2156 }
2157
2158
2159 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
2160}
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195static inline int hash_bit_value(int bitnr, __u8 *addr)
2196{
2197 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2198 return 1;
2199 return 0;
2200}
2201
2202
2203static int hash_get_index(__u8 *addr)
2204{
2205 int i, j, bitval;
2206 int hash_index = 0;
2207
2208 for (j = 0; j < 6; j++) {
2209 for (i = 0, bitval = 0; i < 8; i++)
2210 bitval ^= hash_bit_value(i * 6 + j, addr);
2211
2212 hash_index |= (bitval << j);
2213 }
2214
2215 return hash_index;
2216}
2217
2218
2219static void macb_sethashtable(struct net_device *dev)
2220{
2221 struct netdev_hw_addr *ha;
2222 unsigned long mc_filter[2];
2223 unsigned int bitnr;
2224 struct macb *bp = netdev_priv(dev);
2225
2226 mc_filter[0] = 0;
2227 mc_filter[1] = 0;
2228
2229 netdev_for_each_mc_addr(ha, dev) {
2230 bitnr = hash_get_index(ha->addr);
2231 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2232 }
2233
2234 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2235 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2236}
2237
2238
2239static void macb_set_rx_mode(struct net_device *dev)
2240{
2241 unsigned long cfg;
2242 struct macb *bp = netdev_priv(dev);
2243
2244 cfg = macb_readl(bp, NCFGR);
2245
2246 if (dev->flags & IFF_PROMISC) {
2247
2248 cfg |= MACB_BIT(CAF);
2249
2250
2251 if (macb_is_gem(bp))
2252 cfg &= ~GEM_BIT(RXCOEN);
2253 } else {
2254
2255 cfg &= ~MACB_BIT(CAF);
2256
2257
2258 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2259 cfg |= GEM_BIT(RXCOEN);
2260 }
2261
2262 if (dev->flags & IFF_ALLMULTI) {
2263
2264 macb_or_gem_writel(bp, HRB, -1);
2265 macb_or_gem_writel(bp, HRT, -1);
2266 cfg |= MACB_BIT(NCFGR_MTI);
2267 } else if (!netdev_mc_empty(dev)) {
2268
2269 macb_sethashtable(dev);
2270 cfg |= MACB_BIT(NCFGR_MTI);
2271 } else if (dev->flags & (~IFF_ALLMULTI)) {
2272
2273 macb_or_gem_writel(bp, HRB, 0);
2274 macb_or_gem_writel(bp, HRT, 0);
2275 cfg &= ~MACB_BIT(NCFGR_MTI);
2276 }
2277
2278 macb_writel(bp, NCFGR, cfg);
2279}
2280
2281static int macb_open(struct net_device *dev)
2282{
2283 struct macb *bp = netdev_priv(dev);
2284 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2285 struct macb_queue *queue;
2286 unsigned int q;
2287 int err;
2288
2289 netdev_dbg(bp->dev, "open\n");
2290
2291
2292 netif_carrier_off(dev);
2293
2294
2295 if (!dev->phydev)
2296 return -EAGAIN;
2297
2298
2299 macb_init_rx_buffer_size(bp, bufsz);
2300
2301 err = macb_alloc_consistent(bp);
2302 if (err) {
2303 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2304 err);
2305 return err;
2306 }
2307
2308 bp->macbgem_ops.mog_init_rings(bp);
2309 macb_init_hw(bp);
2310
2311 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2312 napi_enable(&queue->napi);
2313
2314
2315 phy_start(dev->phydev);
2316
2317 netif_tx_start_all_queues(dev);
2318
2319 if (bp->ptp_info)
2320 bp->ptp_info->ptp_init(dev);
2321
2322 return 0;
2323}
2324
2325static int macb_close(struct net_device *dev)
2326{
2327 struct macb *bp = netdev_priv(dev);
2328 struct macb_queue *queue;
2329 unsigned long flags;
2330 unsigned int q;
2331
2332 netif_tx_stop_all_queues(dev);
2333
2334 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2335 napi_disable(&queue->napi);
2336
2337 if (dev->phydev)
2338 phy_stop(dev->phydev);
2339
2340 spin_lock_irqsave(&bp->lock, flags);
2341 macb_reset_hw(bp);
2342 netif_carrier_off(dev);
2343 spin_unlock_irqrestore(&bp->lock, flags);
2344
2345 macb_free_consistent(bp);
2346
2347 if (bp->ptp_info)
2348 bp->ptp_info->ptp_remove(dev);
2349
2350 return 0;
2351}
2352
2353static int macb_change_mtu(struct net_device *dev, int new_mtu)
2354{
2355 if (netif_running(dev))
2356 return -EBUSY;
2357
2358 dev->mtu = new_mtu;
2359
2360 return 0;
2361}
2362
2363static void gem_update_stats(struct macb *bp)
2364{
2365 struct macb_queue *queue;
2366 unsigned int i, q, idx;
2367 unsigned long *stat;
2368
2369 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2370
2371 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2372 u32 offset = gem_statistics[i].offset;
2373 u64 val = bp->macb_reg_readl(bp, offset);
2374
2375 bp->ethtool_stats[i] += val;
2376 *p += val;
2377
2378 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2379
2380 val = bp->macb_reg_readl(bp, offset + 4);
2381 bp->ethtool_stats[i] += ((u64)val) << 32;
2382 *(++p) += val;
2383 }
2384 }
2385
2386 idx = GEM_STATS_LEN;
2387 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2388 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2389 bp->ethtool_stats[idx++] = *stat;
2390}
2391
2392static struct net_device_stats *gem_get_stats(struct macb *bp)
2393{
2394 struct gem_stats *hwstat = &bp->hw_stats.gem;
2395 struct net_device_stats *nstat = &bp->dev->stats;
2396
2397 gem_update_stats(bp);
2398
2399 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2400 hwstat->rx_alignment_errors +
2401 hwstat->rx_resource_errors +
2402 hwstat->rx_overruns +
2403 hwstat->rx_oversize_frames +
2404 hwstat->rx_jabbers +
2405 hwstat->rx_undersized_frames +
2406 hwstat->rx_length_field_frame_errors);
2407 nstat->tx_errors = (hwstat->tx_late_collisions +
2408 hwstat->tx_excessive_collisions +
2409 hwstat->tx_underrun +
2410 hwstat->tx_carrier_sense_errors);
2411 nstat->multicast = hwstat->rx_multicast_frames;
2412 nstat->collisions = (hwstat->tx_single_collision_frames +
2413 hwstat->tx_multiple_collision_frames +
2414 hwstat->tx_excessive_collisions);
2415 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2416 hwstat->rx_jabbers +
2417 hwstat->rx_undersized_frames +
2418 hwstat->rx_length_field_frame_errors);
2419 nstat->rx_over_errors = hwstat->rx_resource_errors;
2420 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2421 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2422 nstat->rx_fifo_errors = hwstat->rx_overruns;
2423 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2424 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2425 nstat->tx_fifo_errors = hwstat->tx_underrun;
2426
2427 return nstat;
2428}
2429
2430static void gem_get_ethtool_stats(struct net_device *dev,
2431 struct ethtool_stats *stats, u64 *data)
2432{
2433 struct macb *bp;
2434
2435 bp = netdev_priv(dev);
2436 gem_update_stats(bp);
2437 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2438 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2439}
2440
2441static int gem_get_sset_count(struct net_device *dev, int sset)
2442{
2443 struct macb *bp = netdev_priv(dev);
2444
2445 switch (sset) {
2446 case ETH_SS_STATS:
2447 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2448 default:
2449 return -EOPNOTSUPP;
2450 }
2451}
2452
2453static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2454{
2455 char stat_string[ETH_GSTRING_LEN];
2456 struct macb *bp = netdev_priv(dev);
2457 struct macb_queue *queue;
2458 unsigned int i;
2459 unsigned int q;
2460
2461 switch (sset) {
2462 case ETH_SS_STATS:
2463 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2464 memcpy(p, gem_statistics[i].stat_string,
2465 ETH_GSTRING_LEN);
2466
2467 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2468 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2469 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2470 q, queue_statistics[i].stat_string);
2471 memcpy(p, stat_string, ETH_GSTRING_LEN);
2472 }
2473 }
2474 break;
2475 }
2476}
2477
2478static struct net_device_stats *macb_get_stats(struct net_device *dev)
2479{
2480 struct macb *bp = netdev_priv(dev);
2481 struct net_device_stats *nstat = &bp->dev->stats;
2482 struct macb_stats *hwstat = &bp->hw_stats.macb;
2483
2484 if (macb_is_gem(bp))
2485 return gem_get_stats(bp);
2486
2487
2488 macb_update_stats(bp);
2489
2490
2491 nstat->rx_errors = (hwstat->rx_fcs_errors +
2492 hwstat->rx_align_errors +
2493 hwstat->rx_resource_errors +
2494 hwstat->rx_overruns +
2495 hwstat->rx_oversize_pkts +
2496 hwstat->rx_jabbers +
2497 hwstat->rx_undersize_pkts +
2498 hwstat->rx_length_mismatch);
2499 nstat->tx_errors = (hwstat->tx_late_cols +
2500 hwstat->tx_excessive_cols +
2501 hwstat->tx_underruns +
2502 hwstat->tx_carrier_errors +
2503 hwstat->sqe_test_errors);
2504 nstat->collisions = (hwstat->tx_single_cols +
2505 hwstat->tx_multiple_cols +
2506 hwstat->tx_excessive_cols);
2507 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2508 hwstat->rx_jabbers +
2509 hwstat->rx_undersize_pkts +
2510 hwstat->rx_length_mismatch);
2511 nstat->rx_over_errors = hwstat->rx_resource_errors +
2512 hwstat->rx_overruns;
2513 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2514 nstat->rx_frame_errors = hwstat->rx_align_errors;
2515 nstat->rx_fifo_errors = hwstat->rx_overruns;
2516
2517 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2518 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2519 nstat->tx_fifo_errors = hwstat->tx_underruns;
2520
2521
2522 return nstat;
2523}
2524
2525static int macb_get_regs_len(struct net_device *netdev)
2526{
2527 return MACB_GREGS_NBR * sizeof(u32);
2528}
2529
2530static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2531 void *p)
2532{
2533 struct macb *bp = netdev_priv(dev);
2534 unsigned int tail, head;
2535 u32 *regs_buff = p;
2536
2537 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2538 | MACB_GREGS_VERSION;
2539
2540 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2541 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2542
2543 regs_buff[0] = macb_readl(bp, NCR);
2544 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2545 regs_buff[2] = macb_readl(bp, NSR);
2546 regs_buff[3] = macb_readl(bp, TSR);
2547 regs_buff[4] = macb_readl(bp, RBQP);
2548 regs_buff[5] = macb_readl(bp, TBQP);
2549 regs_buff[6] = macb_readl(bp, RSR);
2550 regs_buff[7] = macb_readl(bp, IMR);
2551
2552 regs_buff[8] = tail;
2553 regs_buff[9] = head;
2554 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2555 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2556
2557 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2558 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2559 if (macb_is_gem(bp))
2560 regs_buff[13] = gem_readl(bp, DMACFG);
2561}
2562
2563static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2564{
2565 struct macb *bp = netdev_priv(netdev);
2566
2567 wol->supported = 0;
2568 wol->wolopts = 0;
2569
2570 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2571 wol->supported = WAKE_MAGIC;
2572
2573 if (bp->wol & MACB_WOL_ENABLED)
2574 wol->wolopts |= WAKE_MAGIC;
2575 }
2576}
2577
2578static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2579{
2580 struct macb *bp = netdev_priv(netdev);
2581
2582 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2583 (wol->wolopts & ~WAKE_MAGIC))
2584 return -EOPNOTSUPP;
2585
2586 if (wol->wolopts & WAKE_MAGIC)
2587 bp->wol |= MACB_WOL_ENABLED;
2588 else
2589 bp->wol &= ~MACB_WOL_ENABLED;
2590
2591 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2592
2593 return 0;
2594}
2595
2596static void macb_get_ringparam(struct net_device *netdev,
2597 struct ethtool_ringparam *ring)
2598{
2599 struct macb *bp = netdev_priv(netdev);
2600
2601 ring->rx_max_pending = MAX_RX_RING_SIZE;
2602 ring->tx_max_pending = MAX_TX_RING_SIZE;
2603
2604 ring->rx_pending = bp->rx_ring_size;
2605 ring->tx_pending = bp->tx_ring_size;
2606}
2607
2608static int macb_set_ringparam(struct net_device *netdev,
2609 struct ethtool_ringparam *ring)
2610{
2611 struct macb *bp = netdev_priv(netdev);
2612 u32 new_rx_size, new_tx_size;
2613 unsigned int reset = 0;
2614
2615 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2616 return -EINVAL;
2617
2618 new_rx_size = clamp_t(u32, ring->rx_pending,
2619 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2620 new_rx_size = roundup_pow_of_two(new_rx_size);
2621
2622 new_tx_size = clamp_t(u32, ring->tx_pending,
2623 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2624 new_tx_size = roundup_pow_of_two(new_tx_size);
2625
2626 if ((new_tx_size == bp->tx_ring_size) &&
2627 (new_rx_size == bp->rx_ring_size)) {
2628
2629 return 0;
2630 }
2631
2632 if (netif_running(bp->dev)) {
2633 reset = 1;
2634 macb_close(bp->dev);
2635 }
2636
2637 bp->rx_ring_size = new_rx_size;
2638 bp->tx_ring_size = new_tx_size;
2639
2640 if (reset)
2641 macb_open(bp->dev);
2642
2643 return 0;
2644}
2645
2646#ifdef CONFIG_MACB_USE_HWSTAMP
2647static unsigned int gem_get_tsu_rate(struct macb *bp)
2648{
2649 struct clk *tsu_clk;
2650 unsigned int tsu_rate;
2651
2652 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2653 if (!IS_ERR(tsu_clk))
2654 tsu_rate = clk_get_rate(tsu_clk);
2655
2656 else if (!IS_ERR(bp->pclk)) {
2657 tsu_clk = bp->pclk;
2658 tsu_rate = clk_get_rate(tsu_clk);
2659 } else
2660 return -ENOTSUPP;
2661 return tsu_rate;
2662}
2663
2664static s32 gem_get_ptp_max_adj(void)
2665{
2666 return 64000000;
2667}
2668
2669static int gem_get_ts_info(struct net_device *dev,
2670 struct ethtool_ts_info *info)
2671{
2672 struct macb *bp = netdev_priv(dev);
2673
2674 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2675 ethtool_op_get_ts_info(dev, info);
2676 return 0;
2677 }
2678
2679 info->so_timestamping =
2680 SOF_TIMESTAMPING_TX_SOFTWARE |
2681 SOF_TIMESTAMPING_RX_SOFTWARE |
2682 SOF_TIMESTAMPING_SOFTWARE |
2683 SOF_TIMESTAMPING_TX_HARDWARE |
2684 SOF_TIMESTAMPING_RX_HARDWARE |
2685 SOF_TIMESTAMPING_RAW_HARDWARE;
2686 info->tx_types =
2687 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2688 (1 << HWTSTAMP_TX_OFF) |
2689 (1 << HWTSTAMP_TX_ON);
2690 info->rx_filters =
2691 (1 << HWTSTAMP_FILTER_NONE) |
2692 (1 << HWTSTAMP_FILTER_ALL);
2693
2694 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2695
2696 return 0;
2697}
2698
2699static struct macb_ptp_info gem_ptp_info = {
2700 .ptp_init = gem_ptp_init,
2701 .ptp_remove = gem_ptp_remove,
2702 .get_ptp_max_adj = gem_get_ptp_max_adj,
2703 .get_tsu_rate = gem_get_tsu_rate,
2704 .get_ts_info = gem_get_ts_info,
2705 .get_hwtst = gem_get_hwtst,
2706 .set_hwtst = gem_set_hwtst,
2707};
2708#endif
2709
2710static int macb_get_ts_info(struct net_device *netdev,
2711 struct ethtool_ts_info *info)
2712{
2713 struct macb *bp = netdev_priv(netdev);
2714
2715 if (bp->ptp_info)
2716 return bp->ptp_info->get_ts_info(netdev, info);
2717
2718 return ethtool_op_get_ts_info(netdev, info);
2719}
2720
2721static void gem_enable_flow_filters(struct macb *bp, bool enable)
2722{
2723 struct ethtool_rx_fs_item *item;
2724 u32 t2_scr;
2725 int num_t2_scr;
2726
2727 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
2728
2729 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2730 struct ethtool_rx_flow_spec *fs = &item->fs;
2731 struct ethtool_tcpip4_spec *tp4sp_m;
2732
2733 if (fs->location >= num_t2_scr)
2734 continue;
2735
2736 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
2737
2738
2739 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
2740
2741
2742 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2743
2744 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
2745 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
2746 else
2747 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
2748
2749 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
2750 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
2751 else
2752 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
2753
2754 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
2755 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
2756 else
2757 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
2758
2759 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
2760 }
2761}
2762
2763static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
2764{
2765 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
2766 uint16_t index = fs->location;
2767 u32 w0, w1, t2_scr;
2768 bool cmp_a = false;
2769 bool cmp_b = false;
2770 bool cmp_c = false;
2771
2772 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
2773 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2774
2775
2776 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
2777
2778 w0 = 0;
2779 w1 = 0;
2780 w0 = tp4sp_v->ip4src;
2781 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2782 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2783 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
2784 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
2785 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
2786 cmp_a = true;
2787 }
2788
2789
2790 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
2791
2792 w0 = 0;
2793 w1 = 0;
2794 w0 = tp4sp_v->ip4dst;
2795 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2796 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2797 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
2798 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
2799 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
2800 cmp_b = true;
2801 }
2802
2803
2804 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
2805
2806 w0 = 0;
2807 w1 = 0;
2808 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
2809 if (tp4sp_m->psrc == tp4sp_m->pdst) {
2810 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
2811 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2812 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2813 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2814 } else {
2815
2816 w1 = GEM_BFINS(T2DISMSK, 0, w1);
2817 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
2818 if (tp4sp_m->psrc == 0xFFFF) {
2819 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
2820 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2821 } else {
2822 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2823 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
2824 }
2825 }
2826 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
2827 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
2828 cmp_c = true;
2829 }
2830
2831 t2_scr = 0;
2832 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
2833 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
2834 if (cmp_a)
2835 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
2836 if (cmp_b)
2837 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
2838 if (cmp_c)
2839 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
2840 gem_writel_n(bp, SCRT2, index, t2_scr);
2841}
2842
2843static int gem_add_flow_filter(struct net_device *netdev,
2844 struct ethtool_rxnfc *cmd)
2845{
2846 struct macb *bp = netdev_priv(netdev);
2847 struct ethtool_rx_flow_spec *fs = &cmd->fs;
2848 struct ethtool_rx_fs_item *item, *newfs;
2849 unsigned long flags;
2850 int ret = -EINVAL;
2851 bool added = false;
2852
2853 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
2854 if (newfs == NULL)
2855 return -ENOMEM;
2856 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
2857
2858 netdev_dbg(netdev,
2859 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2860 fs->flow_type, (int)fs->ring_cookie, fs->location,
2861 htonl(fs->h_u.tcp_ip4_spec.ip4src),
2862 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2863 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
2864
2865 spin_lock_irqsave(&bp->rx_fs_lock, flags);
2866
2867
2868 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2869 if (item->fs.location > newfs->fs.location) {
2870 list_add_tail(&newfs->list, &item->list);
2871 added = true;
2872 break;
2873 } else if (item->fs.location == fs->location) {
2874 netdev_err(netdev, "Rule not added: location %d not free!\n",
2875 fs->location);
2876 ret = -EBUSY;
2877 goto err;
2878 }
2879 }
2880 if (!added)
2881 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
2882
2883 gem_prog_cmp_regs(bp, fs);
2884 bp->rx_fs_list.count++;
2885
2886 if (netdev->features & NETIF_F_NTUPLE)
2887 gem_enable_flow_filters(bp, 1);
2888
2889 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2890 return 0;
2891
2892err:
2893 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2894 kfree(newfs);
2895 return ret;
2896}
2897
2898static int gem_del_flow_filter(struct net_device *netdev,
2899 struct ethtool_rxnfc *cmd)
2900{
2901 struct macb *bp = netdev_priv(netdev);
2902 struct ethtool_rx_fs_item *item;
2903 struct ethtool_rx_flow_spec *fs;
2904 unsigned long flags;
2905
2906 spin_lock_irqsave(&bp->rx_fs_lock, flags);
2907
2908 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2909 if (item->fs.location == cmd->fs.location) {
2910
2911 fs = &(item->fs);
2912 netdev_dbg(netdev,
2913 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2914 fs->flow_type, (int)fs->ring_cookie, fs->location,
2915 htonl(fs->h_u.tcp_ip4_spec.ip4src),
2916 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2917 htons(fs->h_u.tcp_ip4_spec.psrc),
2918 htons(fs->h_u.tcp_ip4_spec.pdst));
2919
2920 gem_writel_n(bp, SCRT2, fs->location, 0);
2921
2922 list_del(&item->list);
2923 bp->rx_fs_list.count--;
2924 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2925 kfree(item);
2926 return 0;
2927 }
2928 }
2929
2930 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2931 return -EINVAL;
2932}
2933
2934static int gem_get_flow_entry(struct net_device *netdev,
2935 struct ethtool_rxnfc *cmd)
2936{
2937 struct macb *bp = netdev_priv(netdev);
2938 struct ethtool_rx_fs_item *item;
2939
2940 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2941 if (item->fs.location == cmd->fs.location) {
2942 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
2943 return 0;
2944 }
2945 }
2946 return -EINVAL;
2947}
2948
2949static int gem_get_all_flow_entries(struct net_device *netdev,
2950 struct ethtool_rxnfc *cmd, u32 *rule_locs)
2951{
2952 struct macb *bp = netdev_priv(netdev);
2953 struct ethtool_rx_fs_item *item;
2954 uint32_t cnt = 0;
2955
2956 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2957 if (cnt == cmd->rule_cnt)
2958 return -EMSGSIZE;
2959 rule_locs[cnt] = item->fs.location;
2960 cnt++;
2961 }
2962 cmd->data = bp->max_tuples;
2963 cmd->rule_cnt = cnt;
2964
2965 return 0;
2966}
2967
2968static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
2969 u32 *rule_locs)
2970{
2971 struct macb *bp = netdev_priv(netdev);
2972 int ret = 0;
2973
2974 switch (cmd->cmd) {
2975 case ETHTOOL_GRXRINGS:
2976 cmd->data = bp->num_queues;
2977 break;
2978 case ETHTOOL_GRXCLSRLCNT:
2979 cmd->rule_cnt = bp->rx_fs_list.count;
2980 break;
2981 case ETHTOOL_GRXCLSRULE:
2982 ret = gem_get_flow_entry(netdev, cmd);
2983 break;
2984 case ETHTOOL_GRXCLSRLALL:
2985 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
2986 break;
2987 default:
2988 netdev_err(netdev,
2989 "Command parameter %d is not supported\n", cmd->cmd);
2990 ret = -EOPNOTSUPP;
2991 }
2992
2993 return ret;
2994}
2995
2996static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
2997{
2998 struct macb *bp = netdev_priv(netdev);
2999 int ret;
3000
3001 switch (cmd->cmd) {
3002 case ETHTOOL_SRXCLSRLINS:
3003 if ((cmd->fs.location >= bp->max_tuples)
3004 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3005 ret = -EINVAL;
3006 break;
3007 }
3008 ret = gem_add_flow_filter(netdev, cmd);
3009 break;
3010 case ETHTOOL_SRXCLSRLDEL:
3011 ret = gem_del_flow_filter(netdev, cmd);
3012 break;
3013 default:
3014 netdev_err(netdev,
3015 "Command parameter %d is not supported\n", cmd->cmd);
3016 ret = -EOPNOTSUPP;
3017 }
3018
3019 return ret;
3020}
3021
3022static const struct ethtool_ops macb_ethtool_ops = {
3023 .get_regs_len = macb_get_regs_len,
3024 .get_regs = macb_get_regs,
3025 .get_link = ethtool_op_get_link,
3026 .get_ts_info = ethtool_op_get_ts_info,
3027 .get_wol = macb_get_wol,
3028 .set_wol = macb_set_wol,
3029 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3030 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3031 .get_ringparam = macb_get_ringparam,
3032 .set_ringparam = macb_set_ringparam,
3033};
3034
3035static const struct ethtool_ops gem_ethtool_ops = {
3036 .get_regs_len = macb_get_regs_len,
3037 .get_regs = macb_get_regs,
3038 .get_link = ethtool_op_get_link,
3039 .get_ts_info = macb_get_ts_info,
3040 .get_ethtool_stats = gem_get_ethtool_stats,
3041 .get_strings = gem_get_ethtool_strings,
3042 .get_sset_count = gem_get_sset_count,
3043 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3044 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3045 .get_ringparam = macb_get_ringparam,
3046 .set_ringparam = macb_set_ringparam,
3047 .get_rxnfc = gem_get_rxnfc,
3048 .set_rxnfc = gem_set_rxnfc,
3049};
3050
3051static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3052{
3053 struct phy_device *phydev = dev->phydev;
3054 struct macb *bp = netdev_priv(dev);
3055
3056 if (!netif_running(dev))
3057 return -EINVAL;
3058
3059 if (!phydev)
3060 return -ENODEV;
3061
3062 if (!bp->ptp_info)
3063 return phy_mii_ioctl(phydev, rq, cmd);
3064
3065 switch (cmd) {
3066 case SIOCSHWTSTAMP:
3067 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3068 case SIOCGHWTSTAMP:
3069 return bp->ptp_info->get_hwtst(dev, rq);
3070 default:
3071 return phy_mii_ioctl(phydev, rq, cmd);
3072 }
3073}
3074
3075static int macb_set_features(struct net_device *netdev,
3076 netdev_features_t features)
3077{
3078 struct macb *bp = netdev_priv(netdev);
3079 netdev_features_t changed = features ^ netdev->features;
3080
3081
3082 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
3083 u32 dmacfg;
3084
3085 dmacfg = gem_readl(bp, DMACFG);
3086 if (features & NETIF_F_HW_CSUM)
3087 dmacfg |= GEM_BIT(TXCOEN);
3088 else
3089 dmacfg &= ~GEM_BIT(TXCOEN);
3090 gem_writel(bp, DMACFG, dmacfg);
3091 }
3092
3093
3094 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
3095 u32 netcfg;
3096
3097 netcfg = gem_readl(bp, NCFGR);
3098 if (features & NETIF_F_RXCSUM &&
3099 !(netdev->flags & IFF_PROMISC))
3100 netcfg |= GEM_BIT(RXCOEN);
3101 else
3102 netcfg &= ~GEM_BIT(RXCOEN);
3103 gem_writel(bp, NCFGR, netcfg);
3104 }
3105
3106
3107 if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
3108 bool turn_on = features & NETIF_F_NTUPLE;
3109
3110 gem_enable_flow_filters(bp, turn_on);
3111 }
3112 return 0;
3113}
3114
3115static const struct net_device_ops macb_netdev_ops = {
3116 .ndo_open = macb_open,
3117 .ndo_stop = macb_close,
3118 .ndo_start_xmit = macb_start_xmit,
3119 .ndo_set_rx_mode = macb_set_rx_mode,
3120 .ndo_get_stats = macb_get_stats,
3121 .ndo_do_ioctl = macb_ioctl,
3122 .ndo_validate_addr = eth_validate_addr,
3123 .ndo_change_mtu = macb_change_mtu,
3124 .ndo_set_mac_address = eth_mac_addr,
3125#ifdef CONFIG_NET_POLL_CONTROLLER
3126 .ndo_poll_controller = macb_poll_controller,
3127#endif
3128 .ndo_set_features = macb_set_features,
3129 .ndo_features_check = macb_features_check,
3130};
3131
3132
3133
3134
3135static void macb_configure_caps(struct macb *bp,
3136 const struct macb_config *dt_conf)
3137{
3138 u32 dcfg;
3139
3140 if (dt_conf)
3141 bp->caps = dt_conf->caps;
3142
3143 if (hw_is_gem(bp->regs, bp->native_io)) {
3144 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3145
3146 dcfg = gem_readl(bp, DCFG1);
3147 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3148 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3149 dcfg = gem_readl(bp, DCFG2);
3150 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3151 bp->caps |= MACB_CAPS_FIFO_MODE;
3152#ifdef CONFIG_MACB_USE_HWSTAMP
3153 if (gem_has_ptp(bp)) {
3154 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3155 pr_err("GEM doesn't support hardware ptp.\n");
3156 else {
3157 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3158 bp->ptp_info = &gem_ptp_info;
3159 }
3160 }
3161#endif
3162 }
3163
3164 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3165}
3166
3167static void macb_probe_queues(void __iomem *mem,
3168 bool native_io,
3169 unsigned int *queue_mask,
3170 unsigned int *num_queues)
3171{
3172 unsigned int hw_q;
3173
3174 *queue_mask = 0x1;
3175 *num_queues = 1;
3176
3177
3178
3179
3180
3181
3182
3183 if (!hw_is_gem(mem, native_io))
3184 return;
3185
3186
3187 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3188
3189 *queue_mask |= 0x1;
3190
3191 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3192 if (*queue_mask & (1 << hw_q))
3193 (*num_queues)++;
3194}
3195
3196static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3197 struct clk **hclk, struct clk **tx_clk,
3198 struct clk **rx_clk)
3199{
3200 struct macb_platform_data *pdata;
3201 int err;
3202
3203 pdata = dev_get_platdata(&pdev->dev);
3204 if (pdata) {
3205 *pclk = pdata->pclk;
3206 *hclk = pdata->hclk;
3207 } else {
3208 *pclk = devm_clk_get(&pdev->dev, "pclk");
3209 *hclk = devm_clk_get(&pdev->dev, "hclk");
3210 }
3211
3212 if (IS_ERR(*pclk)) {
3213 err = PTR_ERR(*pclk);
3214 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3215 return err;
3216 }
3217
3218 if (IS_ERR(*hclk)) {
3219 err = PTR_ERR(*hclk);
3220 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3221 return err;
3222 }
3223
3224 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3225 if (IS_ERR(*tx_clk))
3226 *tx_clk = NULL;
3227
3228 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3229 if (IS_ERR(*rx_clk))
3230 *rx_clk = NULL;
3231
3232 err = clk_prepare_enable(*pclk);
3233 if (err) {
3234 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3235 return err;
3236 }
3237
3238 err = clk_prepare_enable(*hclk);
3239 if (err) {
3240 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
3241 goto err_disable_pclk;
3242 }
3243
3244 err = clk_prepare_enable(*tx_clk);
3245 if (err) {
3246 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
3247 goto err_disable_hclk;
3248 }
3249
3250 err = clk_prepare_enable(*rx_clk);
3251 if (err) {
3252 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
3253 goto err_disable_txclk;
3254 }
3255
3256 return 0;
3257
3258err_disable_txclk:
3259 clk_disable_unprepare(*tx_clk);
3260
3261err_disable_hclk:
3262 clk_disable_unprepare(*hclk);
3263
3264err_disable_pclk:
3265 clk_disable_unprepare(*pclk);
3266
3267 return err;
3268}
3269
3270static int macb_init(struct platform_device *pdev)
3271{
3272 struct net_device *dev = platform_get_drvdata(pdev);
3273 unsigned int hw_q, q;
3274 struct macb *bp = netdev_priv(dev);
3275 struct macb_queue *queue;
3276 int err;
3277 u32 val, reg;
3278
3279 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3280 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3281
3282
3283
3284
3285
3286 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3287 if (!(bp->queue_mask & (1 << hw_q)))
3288 continue;
3289
3290 queue = &bp->queues[q];
3291 queue->bp = bp;
3292 netif_napi_add(dev, &queue->napi, macb_poll, 64);
3293 if (hw_q) {
3294 queue->ISR = GEM_ISR(hw_q - 1);
3295 queue->IER = GEM_IER(hw_q - 1);
3296 queue->IDR = GEM_IDR(hw_q - 1);
3297 queue->IMR = GEM_IMR(hw_q - 1);
3298 queue->TBQP = GEM_TBQP(hw_q - 1);
3299 queue->RBQP = GEM_RBQP(hw_q - 1);
3300 queue->RBQS = GEM_RBQS(hw_q - 1);
3301#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3302 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3303 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3304 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3305 }
3306#endif
3307 } else {
3308
3309 queue->ISR = MACB_ISR;
3310 queue->IER = MACB_IER;
3311 queue->IDR = MACB_IDR;
3312 queue->IMR = MACB_IMR;
3313 queue->TBQP = MACB_TBQP;
3314 queue->RBQP = MACB_RBQP;
3315#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3316 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3317 queue->TBQPH = MACB_TBQPH;
3318 queue->RBQPH = MACB_RBQPH;
3319 }
3320#endif
3321 }
3322
3323
3324
3325
3326
3327
3328 queue->irq = platform_get_irq(pdev, q);
3329 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3330 IRQF_SHARED, dev->name, queue);
3331 if (err) {
3332 dev_err(&pdev->dev,
3333 "Unable to request IRQ %d (error %d)\n",
3334 queue->irq, err);
3335 return err;
3336 }
3337
3338 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3339 q++;
3340 }
3341
3342 dev->netdev_ops = &macb_netdev_ops;
3343
3344
3345 if (macb_is_gem(bp)) {
3346 bp->max_tx_length = GEM_MAX_TX_LEN;
3347 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3348 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3349 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3350 bp->macbgem_ops.mog_rx = gem_rx;
3351 dev->ethtool_ops = &gem_ethtool_ops;
3352 } else {
3353 bp->max_tx_length = MACB_MAX_TX_LEN;
3354 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3355 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3356 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3357 bp->macbgem_ops.mog_rx = macb_rx;
3358 dev->ethtool_ops = &macb_ethtool_ops;
3359 }
3360
3361
3362 dev->hw_features = NETIF_F_SG;
3363
3364
3365 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3366 dev->hw_features |= MACB_NETIF_LSO;
3367
3368
3369 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3370 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3371 if (bp->caps & MACB_CAPS_SG_DISABLED)
3372 dev->hw_features &= ~NETIF_F_SG;
3373 dev->features = dev->hw_features;
3374
3375
3376
3377
3378
3379 reg = gem_readl(bp, DCFG8);
3380 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3381 GEM_BFEXT(T2SCR, reg));
3382 if (bp->max_tuples > 0) {
3383
3384 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3385
3386 reg = 0;
3387 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3388 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3389
3390 dev->hw_features |= NETIF_F_NTUPLE;
3391
3392 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3393 bp->rx_fs_list.count = 0;
3394 spin_lock_init(&bp->rx_fs_lock);
3395 } else
3396 bp->max_tuples = 0;
3397 }
3398
3399 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3400 val = 0;
3401 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3402 val = GEM_BIT(RGMII);
3403 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3404 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3405 val = MACB_BIT(RMII);
3406 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3407 val = MACB_BIT(MII);
3408
3409 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3410 val |= MACB_BIT(CLKEN);
3411
3412 macb_or_gem_writel(bp, USRIO, val);
3413 }
3414
3415
3416 val = macb_mdc_clk_div(bp);
3417 val |= macb_dbw(bp);
3418 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3419 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3420 macb_writel(bp, NCFGR, val);
3421
3422 return 0;
3423}
3424
3425#if defined(CONFIG_OF)
3426
3427#define AT91ETHER_MAX_RBUFF_SZ 0x600
3428
3429#define AT91ETHER_MAX_RX_DESCR 9
3430
3431
3432static int at91ether_start(struct net_device *dev)
3433{
3434 struct macb *lp = netdev_priv(dev);
3435 struct macb_queue *q = &lp->queues[0];
3436 struct macb_dma_desc *desc;
3437 dma_addr_t addr;
3438 u32 ctl;
3439 int i;
3440
3441 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3442 (AT91ETHER_MAX_RX_DESCR *
3443 macb_dma_desc_get_size(lp)),
3444 &q->rx_ring_dma, GFP_KERNEL);
3445 if (!q->rx_ring)
3446 return -ENOMEM;
3447
3448 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3449 AT91ETHER_MAX_RX_DESCR *
3450 AT91ETHER_MAX_RBUFF_SZ,
3451 &q->rx_buffers_dma, GFP_KERNEL);
3452 if (!q->rx_buffers) {
3453 dma_free_coherent(&lp->pdev->dev,
3454 AT91ETHER_MAX_RX_DESCR *
3455 macb_dma_desc_get_size(lp),
3456 q->rx_ring, q->rx_ring_dma);
3457 q->rx_ring = NULL;
3458 return -ENOMEM;
3459 }
3460
3461 addr = q->rx_buffers_dma;
3462 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3463 desc = macb_rx_desc(q, i);
3464 macb_set_addr(lp, desc, addr);
3465 desc->ctrl = 0;
3466 addr += AT91ETHER_MAX_RBUFF_SZ;
3467 }
3468
3469
3470 desc->addr |= MACB_BIT(RX_WRAP);
3471
3472
3473 q->rx_tail = 0;
3474
3475
3476 macb_writel(lp, RBQP, q->rx_ring_dma);
3477
3478
3479 ctl = macb_readl(lp, NCR);
3480 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3481
3482 return 0;
3483}
3484
3485
3486static int at91ether_open(struct net_device *dev)
3487{
3488 struct macb *lp = netdev_priv(dev);
3489 u32 ctl;
3490 int ret;
3491
3492
3493 ctl = macb_readl(lp, NCR);
3494 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3495
3496 macb_set_hwaddr(lp);
3497
3498 ret = at91ether_start(dev);
3499 if (ret)
3500 return ret;
3501
3502
3503 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3504 MACB_BIT(RXUBR) |
3505 MACB_BIT(ISR_TUND) |
3506 MACB_BIT(ISR_RLE) |
3507 MACB_BIT(TCOMP) |
3508 MACB_BIT(ISR_ROVR) |
3509 MACB_BIT(HRESP));
3510
3511
3512 phy_start(dev->phydev);
3513
3514 netif_start_queue(dev);
3515
3516 return 0;
3517}
3518
3519
3520static int at91ether_close(struct net_device *dev)
3521{
3522 struct macb *lp = netdev_priv(dev);
3523 struct macb_queue *q = &lp->queues[0];
3524 u32 ctl;
3525
3526
3527 ctl = macb_readl(lp, NCR);
3528 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3529
3530
3531 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3532 MACB_BIT(RXUBR) |
3533 MACB_BIT(ISR_TUND) |
3534 MACB_BIT(ISR_RLE) |
3535 MACB_BIT(TCOMP) |
3536 MACB_BIT(ISR_ROVR) |
3537 MACB_BIT(HRESP));
3538
3539 netif_stop_queue(dev);
3540
3541 dma_free_coherent(&lp->pdev->dev,
3542 AT91ETHER_MAX_RX_DESCR *
3543 macb_dma_desc_get_size(lp),
3544 q->rx_ring, q->rx_ring_dma);
3545 q->rx_ring = NULL;
3546
3547 dma_free_coherent(&lp->pdev->dev,
3548 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3549 q->rx_buffers, q->rx_buffers_dma);
3550 q->rx_buffers = NULL;
3551
3552 return 0;
3553}
3554
3555
3556static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
3557{
3558 struct macb *lp = netdev_priv(dev);
3559
3560 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3561 netif_stop_queue(dev);
3562
3563
3564 lp->skb = skb;
3565 lp->skb_length = skb->len;
3566 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3567 DMA_TO_DEVICE);
3568 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
3569 dev_kfree_skb_any(skb);
3570 dev->stats.tx_dropped++;
3571 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3572 return NETDEV_TX_OK;
3573 }
3574
3575
3576 macb_writel(lp, TAR, lp->skb_physaddr);
3577
3578 macb_writel(lp, TCR, skb->len);
3579
3580 } else {
3581 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3582 return NETDEV_TX_BUSY;
3583 }
3584
3585 return NETDEV_TX_OK;
3586}
3587
3588
3589
3590
3591static void at91ether_rx(struct net_device *dev)
3592{
3593 struct macb *lp = netdev_priv(dev);
3594 struct macb_queue *q = &lp->queues[0];
3595 struct macb_dma_desc *desc;
3596 unsigned char *p_recv;
3597 struct sk_buff *skb;
3598 unsigned int pktlen;
3599
3600 desc = macb_rx_desc(q, q->rx_tail);
3601 while (desc->addr & MACB_BIT(RX_USED)) {
3602 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3603 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3604 skb = netdev_alloc_skb(dev, pktlen + 2);
3605 if (skb) {
3606 skb_reserve(skb, 2);
3607 skb_put_data(skb, p_recv, pktlen);
3608
3609 skb->protocol = eth_type_trans(skb, dev);
3610 dev->stats.rx_packets++;
3611 dev->stats.rx_bytes += pktlen;
3612 netif_rx(skb);
3613 } else {
3614 dev->stats.rx_dropped++;
3615 }
3616
3617 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3618 dev->stats.multicast++;
3619
3620
3621 desc->addr &= ~MACB_BIT(RX_USED);
3622
3623
3624 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3625 q->rx_tail = 0;
3626 else
3627 q->rx_tail++;
3628
3629 desc = macb_rx_desc(q, q->rx_tail);
3630 }
3631}
3632
3633
3634static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3635{
3636 struct net_device *dev = dev_id;
3637 struct macb *lp = netdev_priv(dev);
3638 u32 intstatus, ctl;
3639
3640
3641
3642
3643 intstatus = macb_readl(lp, ISR);
3644
3645
3646 if (intstatus & MACB_BIT(RCOMP))
3647 at91ether_rx(dev);
3648
3649
3650 if (intstatus & MACB_BIT(TCOMP)) {
3651
3652 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3653 dev->stats.tx_errors++;
3654
3655 if (lp->skb) {
3656 dev_kfree_skb_irq(lp->skb);
3657 lp->skb = NULL;
3658 dma_unmap_single(NULL, lp->skb_physaddr,
3659 lp->skb_length, DMA_TO_DEVICE);
3660 dev->stats.tx_packets++;
3661 dev->stats.tx_bytes += lp->skb_length;
3662 }
3663 netif_wake_queue(dev);
3664 }
3665
3666
3667 if (intstatus & MACB_BIT(RXUBR)) {
3668 ctl = macb_readl(lp, NCR);
3669 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3670 wmb();
3671 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3672 }
3673
3674 if (intstatus & MACB_BIT(ISR_ROVR))
3675 netdev_err(dev, "ROVR error\n");
3676
3677 return IRQ_HANDLED;
3678}
3679
3680#ifdef CONFIG_NET_POLL_CONTROLLER
3681static void at91ether_poll_controller(struct net_device *dev)
3682{
3683 unsigned long flags;
3684
3685 local_irq_save(flags);
3686 at91ether_interrupt(dev->irq, dev);
3687 local_irq_restore(flags);
3688}
3689#endif
3690
3691static const struct net_device_ops at91ether_netdev_ops = {
3692 .ndo_open = at91ether_open,
3693 .ndo_stop = at91ether_close,
3694 .ndo_start_xmit = at91ether_start_xmit,
3695 .ndo_get_stats = macb_get_stats,
3696 .ndo_set_rx_mode = macb_set_rx_mode,
3697 .ndo_set_mac_address = eth_mac_addr,
3698 .ndo_do_ioctl = macb_ioctl,
3699 .ndo_validate_addr = eth_validate_addr,
3700#ifdef CONFIG_NET_POLL_CONTROLLER
3701 .ndo_poll_controller = at91ether_poll_controller,
3702#endif
3703};
3704
3705static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3706 struct clk **hclk, struct clk **tx_clk,
3707 struct clk **rx_clk)
3708{
3709 int err;
3710
3711 *hclk = NULL;
3712 *tx_clk = NULL;
3713 *rx_clk = NULL;
3714
3715 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3716 if (IS_ERR(*pclk))
3717 return PTR_ERR(*pclk);
3718
3719 err = clk_prepare_enable(*pclk);
3720 if (err) {
3721 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3722 return err;
3723 }
3724
3725 return 0;
3726}
3727
3728static int at91ether_init(struct platform_device *pdev)
3729{
3730 struct net_device *dev = platform_get_drvdata(pdev);
3731 struct macb *bp = netdev_priv(dev);
3732 int err;
3733 u32 reg;
3734
3735 dev->netdev_ops = &at91ether_netdev_ops;
3736 dev->ethtool_ops = &macb_ethtool_ops;
3737
3738 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3739 0, dev->name, dev);
3740 if (err)
3741 return err;
3742
3743 macb_writel(bp, NCR, 0);
3744
3745 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3746 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3747 reg |= MACB_BIT(RM9200_RMII);
3748
3749 macb_writel(bp, NCFGR, reg);
3750
3751 return 0;
3752}
3753
3754static const struct macb_config at91sam9260_config = {
3755 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3756 .clk_init = macb_clk_init,
3757 .init = macb_init,
3758};
3759
3760static const struct macb_config pc302gem_config = {
3761 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3762 .dma_burst_length = 16,
3763 .clk_init = macb_clk_init,
3764 .init = macb_init,
3765};
3766
3767static const struct macb_config sama5d2_config = {
3768 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3769 .dma_burst_length = 16,
3770 .clk_init = macb_clk_init,
3771 .init = macb_init,
3772};
3773
3774static const struct macb_config sama5d3_config = {
3775 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3776 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
3777 .dma_burst_length = 16,
3778 .clk_init = macb_clk_init,
3779 .init = macb_init,
3780 .jumbo_max_len = 10240,
3781};
3782
3783static const struct macb_config sama5d4_config = {
3784 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3785 .dma_burst_length = 4,
3786 .clk_init = macb_clk_init,
3787 .init = macb_init,
3788};
3789
3790static const struct macb_config emac_config = {
3791 .clk_init = at91ether_clk_init,
3792 .init = at91ether_init,
3793};
3794
3795static const struct macb_config np4_config = {
3796 .caps = MACB_CAPS_USRIO_DISABLED,
3797 .clk_init = macb_clk_init,
3798 .init = macb_init,
3799};
3800
3801static const struct macb_config zynqmp_config = {
3802 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3803 MACB_CAPS_JUMBO |
3804 MACB_CAPS_GEM_HAS_PTP,
3805 .dma_burst_length = 16,
3806 .clk_init = macb_clk_init,
3807 .init = macb_init,
3808 .jumbo_max_len = 10240,
3809};
3810
3811static const struct macb_config zynq_config = {
3812 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3813 .dma_burst_length = 16,
3814 .clk_init = macb_clk_init,
3815 .init = macb_init,
3816};
3817
3818static const struct of_device_id macb_dt_ids[] = {
3819 { .compatible = "cdns,at32ap7000-macb" },
3820 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3821 { .compatible = "cdns,macb" },
3822 { .compatible = "cdns,np4-macb", .data = &np4_config },
3823 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3824 { .compatible = "cdns,gem", .data = &pc302gem_config },
3825 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3826 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3827 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3828 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3829 { .compatible = "cdns,emac", .data = &emac_config },
3830 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3831 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
3832 { }
3833};
3834MODULE_DEVICE_TABLE(of, macb_dt_ids);
3835#endif
3836
3837static const struct macb_config default_gem_config = {
3838 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3839 MACB_CAPS_JUMBO |
3840 MACB_CAPS_GEM_HAS_PTP,
3841 .dma_burst_length = 16,
3842 .clk_init = macb_clk_init,
3843 .init = macb_init,
3844 .jumbo_max_len = 10240,
3845};
3846
3847static int macb_probe(struct platform_device *pdev)
3848{
3849 const struct macb_config *macb_config = &default_gem_config;
3850 int (*clk_init)(struct platform_device *, struct clk **,
3851 struct clk **, struct clk **, struct clk **)
3852 = macb_config->clk_init;
3853 int (*init)(struct platform_device *) = macb_config->init;
3854 struct device_node *np = pdev->dev.of_node;
3855 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3856 unsigned int queue_mask, num_queues;
3857 struct macb_platform_data *pdata;
3858 bool native_io;
3859 struct phy_device *phydev;
3860 struct net_device *dev;
3861 struct resource *regs;
3862 void __iomem *mem;
3863 const char *mac;
3864 struct macb *bp;
3865 int err;
3866
3867 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3868 mem = devm_ioremap_resource(&pdev->dev, regs);
3869 if (IS_ERR(mem))
3870 return PTR_ERR(mem);
3871
3872 if (np) {
3873 const struct of_device_id *match;
3874
3875 match = of_match_node(macb_dt_ids, np);
3876 if (match && match->data) {
3877 macb_config = match->data;
3878 clk_init = macb_config->clk_init;
3879 init = macb_config->init;
3880 }
3881 }
3882
3883 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
3884 if (err)
3885 return err;
3886
3887 native_io = hw_is_native_io(mem);
3888
3889 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3890 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3891 if (!dev) {
3892 err = -ENOMEM;
3893 goto err_disable_clocks;
3894 }
3895
3896 dev->base_addr = regs->start;
3897
3898 SET_NETDEV_DEV(dev, &pdev->dev);
3899
3900 bp = netdev_priv(dev);
3901 bp->pdev = pdev;
3902 bp->dev = dev;
3903 bp->regs = mem;
3904 bp->native_io = native_io;
3905 if (native_io) {
3906 bp->macb_reg_readl = hw_readl_native;
3907 bp->macb_reg_writel = hw_writel_native;
3908 } else {
3909 bp->macb_reg_readl = hw_readl;
3910 bp->macb_reg_writel = hw_writel;
3911 }
3912 bp->num_queues = num_queues;
3913 bp->queue_mask = queue_mask;
3914 if (macb_config)
3915 bp->dma_burst_length = macb_config->dma_burst_length;
3916 bp->pclk = pclk;
3917 bp->hclk = hclk;
3918 bp->tx_clk = tx_clk;
3919 bp->rx_clk = rx_clk;
3920 if (macb_config)
3921 bp->jumbo_max_len = macb_config->jumbo_max_len;
3922
3923 bp->wol = 0;
3924 if (of_get_property(np, "magic-packet", NULL))
3925 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3926 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3927
3928 spin_lock_init(&bp->lock);
3929
3930
3931 macb_configure_caps(bp, macb_config);
3932
3933#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3934 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3935 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3936 bp->hw_dma_cap |= HW_DMA_CAP_64B;
3937 }
3938#endif
3939 platform_set_drvdata(pdev, dev);
3940
3941 dev->irq = platform_get_irq(pdev, 0);
3942 if (dev->irq < 0) {
3943 err = dev->irq;
3944 goto err_out_free_netdev;
3945 }
3946
3947
3948 dev->min_mtu = GEM_MTU_MIN_SIZE;
3949 if (bp->caps & MACB_CAPS_JUMBO)
3950 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3951 else
3952 dev->max_mtu = ETH_DATA_LEN;
3953
3954 mac = of_get_mac_address(np);
3955 if (mac) {
3956 ether_addr_copy(bp->dev->dev_addr, mac);
3957 } else {
3958 err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
3959 if (err) {
3960 if (err == -EPROBE_DEFER)
3961 goto err_out_free_netdev;
3962 macb_get_hwaddr(bp);
3963 }
3964 }
3965
3966 err = of_get_phy_mode(np);
3967 if (err < 0) {
3968 pdata = dev_get_platdata(&pdev->dev);
3969 if (pdata && pdata->is_rmii)
3970 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3971 else
3972 bp->phy_interface = PHY_INTERFACE_MODE_MII;
3973 } else {
3974 bp->phy_interface = err;
3975 }
3976
3977
3978 err = init(pdev);
3979 if (err)
3980 goto err_out_free_netdev;
3981
3982 err = macb_mii_init(bp);
3983 if (err)
3984 goto err_out_free_netdev;
3985
3986 phydev = dev->phydev;
3987
3988 netif_carrier_off(dev);
3989
3990 err = register_netdev(dev);
3991 if (err) {
3992 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3993 goto err_out_unregister_mdio;
3994 }
3995
3996 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
3997 (unsigned long)bp);
3998
3999 phy_attached_info(phydev);
4000
4001 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4002 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4003 dev->base_addr, dev->irq, dev->dev_addr);
4004
4005 return 0;
4006
4007err_out_unregister_mdio:
4008 phy_disconnect(dev->phydev);
4009 mdiobus_unregister(bp->mii_bus);
4010 of_node_put(bp->phy_node);
4011 if (np && of_phy_is_fixed_link(np))
4012 of_phy_deregister_fixed_link(np);
4013 mdiobus_free(bp->mii_bus);
4014
4015err_out_free_netdev:
4016 free_netdev(dev);
4017
4018err_disable_clocks:
4019 clk_disable_unprepare(tx_clk);
4020 clk_disable_unprepare(hclk);
4021 clk_disable_unprepare(pclk);
4022 clk_disable_unprepare(rx_clk);
4023
4024 return err;
4025}
4026
4027static int macb_remove(struct platform_device *pdev)
4028{
4029 struct net_device *dev;
4030 struct macb *bp;
4031 struct device_node *np = pdev->dev.of_node;
4032
4033 dev = platform_get_drvdata(pdev);
4034
4035 if (dev) {
4036 bp = netdev_priv(dev);
4037 if (dev->phydev)
4038 phy_disconnect(dev->phydev);
4039 mdiobus_unregister(bp->mii_bus);
4040 if (np && of_phy_is_fixed_link(np))
4041 of_phy_deregister_fixed_link(np);
4042 dev->phydev = NULL;
4043 mdiobus_free(bp->mii_bus);
4044
4045 unregister_netdev(dev);
4046 clk_disable_unprepare(bp->tx_clk);
4047 clk_disable_unprepare(bp->hclk);
4048 clk_disable_unprepare(bp->pclk);
4049 clk_disable_unprepare(bp->rx_clk);
4050 of_node_put(bp->phy_node);
4051 free_netdev(dev);
4052 }
4053
4054 return 0;
4055}
4056
4057static int __maybe_unused macb_suspend(struct device *dev)
4058{
4059 struct platform_device *pdev = to_platform_device(dev);
4060 struct net_device *netdev = platform_get_drvdata(pdev);
4061 struct macb *bp = netdev_priv(netdev);
4062
4063 netif_carrier_off(netdev);
4064 netif_device_detach(netdev);
4065
4066 if (bp->wol & MACB_WOL_ENABLED) {
4067 macb_writel(bp, IER, MACB_BIT(WOL));
4068 macb_writel(bp, WOL, MACB_BIT(MAG));
4069 enable_irq_wake(bp->queues[0].irq);
4070 } else {
4071 clk_disable_unprepare(bp->tx_clk);
4072 clk_disable_unprepare(bp->hclk);
4073 clk_disable_unprepare(bp->pclk);
4074 clk_disable_unprepare(bp->rx_clk);
4075 }
4076
4077 return 0;
4078}
4079
4080static int __maybe_unused macb_resume(struct device *dev)
4081{
4082 struct platform_device *pdev = to_platform_device(dev);
4083 struct net_device *netdev = platform_get_drvdata(pdev);
4084 struct macb *bp = netdev_priv(netdev);
4085
4086 if (bp->wol & MACB_WOL_ENABLED) {
4087 macb_writel(bp, IDR, MACB_BIT(WOL));
4088 macb_writel(bp, WOL, 0);
4089 disable_irq_wake(bp->queues[0].irq);
4090 } else {
4091 clk_prepare_enable(bp->pclk);
4092 clk_prepare_enable(bp->hclk);
4093 clk_prepare_enable(bp->tx_clk);
4094 clk_prepare_enable(bp->rx_clk);
4095 }
4096
4097 netif_device_attach(netdev);
4098
4099 return 0;
4100}
4101
4102static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
4103
4104static struct platform_driver macb_driver = {
4105 .probe = macb_probe,
4106 .remove = macb_remove,
4107 .driver = {
4108 .name = "macb",
4109 .of_match_table = of_match_ptr(macb_dt_ids),
4110 .pm = &macb_pm_ops,
4111 },
4112};
4113
4114module_platform_driver(macb_driver);
4115
4116MODULE_LICENSE("GPL");
4117MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4118MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4119MODULE_ALIAS("platform:macb");
4120