1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/circ_buf.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/gpio/consumer.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_data/macb.h>
28#include <linux/platform_device.h>
29#include <linux/phy.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/of_gpio.h>
33#include <linux/of_mdio.h>
34#include <linux/of_net.h>
35#include <linux/ip.h>
36#include <linux/udp.h>
37#include <linux/tcp.h>
38#include "macb.h"
39
40#define MACB_RX_BUFFER_SIZE 128
41#define RX_BUFFER_MULTIPLE 64
42
43#define DEFAULT_RX_RING_SIZE 512
44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192
46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
47 * (bp)->rx_ring_size)
48
49#define DEFAULT_TX_RING_SIZE 512
50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096
52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
53 * (bp)->tx_ring_size)
54
55
56#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
57
58#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
59 | MACB_BIT(ISR_ROVR))
60#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
61 | MACB_BIT(ISR_RLE) \
62 | MACB_BIT(TXERR))
63#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
64
65
66#define MACB_TX_LEN_ALIGN 8
67#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
68#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
69
70#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
71#define MACB_NETIF_LSO NETIF_F_TSO
72
73#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
74#define MACB_WOL_ENABLED (0x1 << 1)
75
76
77
78
79#define MACB_HALT_TIMEOUT 1230
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108static unsigned int macb_dma_desc_get_size(struct macb *bp)
109{
110#ifdef MACB_EXT_DESC
111 unsigned int desc_size;
112
113 switch (bp->hw_dma_cap) {
114 case HW_DMA_CAP_64B:
115 desc_size = sizeof(struct macb_dma_desc)
116 + sizeof(struct macb_dma_desc_64);
117 break;
118 case HW_DMA_CAP_PTP:
119 desc_size = sizeof(struct macb_dma_desc)
120 + sizeof(struct macb_dma_desc_ptp);
121 break;
122 case HW_DMA_CAP_64B_PTP:
123 desc_size = sizeof(struct macb_dma_desc)
124 + sizeof(struct macb_dma_desc_64)
125 + sizeof(struct macb_dma_desc_ptp);
126 break;
127 default:
128 desc_size = sizeof(struct macb_dma_desc);
129 }
130 return desc_size;
131#endif
132 return sizeof(struct macb_dma_desc);
133}
134
135static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
136{
137#ifdef MACB_EXT_DESC
138 switch (bp->hw_dma_cap) {
139 case HW_DMA_CAP_64B:
140 case HW_DMA_CAP_PTP:
141 desc_idx <<= 1;
142 break;
143 case HW_DMA_CAP_64B_PTP:
144 desc_idx *= 3;
145 break;
146 default:
147 break;
148 }
149#endif
150 return desc_idx;
151}
152
153#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
154static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
155{
156 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
157 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
158 return NULL;
159}
160#endif
161
162
163static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
164{
165 return index & (bp->tx_ring_size - 1);
166}
167
168static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
169 unsigned int index)
170{
171 index = macb_tx_ring_wrap(queue->bp, index);
172 index = macb_adj_dma_desc_idx(queue->bp, index);
173 return &queue->tx_ring[index];
174}
175
176static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
177 unsigned int index)
178{
179 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
180}
181
182static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
183{
184 dma_addr_t offset;
185
186 offset = macb_tx_ring_wrap(queue->bp, index) *
187 macb_dma_desc_get_size(queue->bp);
188
189 return queue->tx_ring_dma + offset;
190}
191
192static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
193{
194 return index & (bp->rx_ring_size - 1);
195}
196
197static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
198{
199 index = macb_rx_ring_wrap(queue->bp, index);
200 index = macb_adj_dma_desc_idx(queue->bp, index);
201 return &queue->rx_ring[index];
202}
203
204static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
205{
206 return queue->rx_buffers + queue->bp->rx_buffer_size *
207 macb_rx_ring_wrap(queue->bp, index);
208}
209
210
211static u32 hw_readl_native(struct macb *bp, int offset)
212{
213 return __raw_readl(bp->regs + offset);
214}
215
216static void hw_writel_native(struct macb *bp, int offset, u32 value)
217{
218 __raw_writel(value, bp->regs + offset);
219}
220
221static u32 hw_readl(struct macb *bp, int offset)
222{
223 return readl_relaxed(bp->regs + offset);
224}
225
226static void hw_writel(struct macb *bp, int offset, u32 value)
227{
228 writel_relaxed(value, bp->regs + offset);
229}
230
231
232
233
234
235static bool hw_is_native_io(void __iomem *addr)
236{
237 u32 value = MACB_BIT(LLB);
238
239 __raw_writel(value, addr + MACB_NCR);
240 value = __raw_readl(addr + MACB_NCR);
241
242
243 __raw_writel(0, addr + MACB_NCR);
244
245 return value == MACB_BIT(LLB);
246}
247
248static bool hw_is_gem(void __iomem *addr, bool native_io)
249{
250 u32 id;
251
252 if (native_io)
253 id = __raw_readl(addr + MACB_MID);
254 else
255 id = readl_relaxed(addr + MACB_MID);
256
257 return MACB_BFEXT(IDNUM, id) >= 0x2;
258}
259
260static void macb_set_hwaddr(struct macb *bp)
261{
262 u32 bottom;
263 u16 top;
264
265 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
266 macb_or_gem_writel(bp, SA1B, bottom);
267 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
268 macb_or_gem_writel(bp, SA1T, top);
269
270
271 macb_or_gem_writel(bp, SA2B, 0);
272 macb_or_gem_writel(bp, SA2T, 0);
273 macb_or_gem_writel(bp, SA3B, 0);
274 macb_or_gem_writel(bp, SA3T, 0);
275 macb_or_gem_writel(bp, SA4B, 0);
276 macb_or_gem_writel(bp, SA4T, 0);
277}
278
279static void macb_get_hwaddr(struct macb *bp)
280{
281 struct macb_platform_data *pdata;
282 u32 bottom;
283 u16 top;
284 u8 addr[6];
285 int i;
286
287 pdata = dev_get_platdata(&bp->pdev->dev);
288
289
290 for (i = 0; i < 4; i++) {
291 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
292 top = macb_or_gem_readl(bp, SA1T + i * 8);
293
294 if (pdata && pdata->rev_eth_addr) {
295 addr[5] = bottom & 0xff;
296 addr[4] = (bottom >> 8) & 0xff;
297 addr[3] = (bottom >> 16) & 0xff;
298 addr[2] = (bottom >> 24) & 0xff;
299 addr[1] = top & 0xff;
300 addr[0] = (top & 0xff00) >> 8;
301 } else {
302 addr[0] = bottom & 0xff;
303 addr[1] = (bottom >> 8) & 0xff;
304 addr[2] = (bottom >> 16) & 0xff;
305 addr[3] = (bottom >> 24) & 0xff;
306 addr[4] = top & 0xff;
307 addr[5] = (top >> 8) & 0xff;
308 }
309
310 if (is_valid_ether_addr(addr)) {
311 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
312 return;
313 }
314 }
315
316 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
317 eth_hw_addr_random(bp->dev);
318}
319
320static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
321{
322 struct macb *bp = bus->priv;
323 int value;
324
325 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
326 | MACB_BF(RW, MACB_MAN_READ)
327 | MACB_BF(PHYA, mii_id)
328 | MACB_BF(REGA, regnum)
329 | MACB_BF(CODE, MACB_MAN_CODE)));
330
331
332 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
333 cpu_relax();
334
335 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
336
337 return value;
338}
339
340static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
341 u16 value)
342{
343 struct macb *bp = bus->priv;
344
345 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
346 | MACB_BF(RW, MACB_MAN_WRITE)
347 | MACB_BF(PHYA, mii_id)
348 | MACB_BF(REGA, regnum)
349 | MACB_BF(CODE, MACB_MAN_CODE)
350 | MACB_BF(DATA, value)));
351
352
353 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
354 cpu_relax();
355
356 return 0;
357}
358
359
360
361
362
363
364
365static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
366{
367 long ferr, rate, rate_rounded;
368
369 if (!clk)
370 return;
371
372 switch (speed) {
373 case SPEED_10:
374 rate = 2500000;
375 break;
376 case SPEED_100:
377 rate = 25000000;
378 break;
379 case SPEED_1000:
380 rate = 125000000;
381 break;
382 default:
383 return;
384 }
385
386 rate_rounded = clk_round_rate(clk, rate);
387 if (rate_rounded < 0)
388 return;
389
390
391
392
393 ferr = abs(rate_rounded - rate);
394 ferr = DIV_ROUND_UP(ferr, rate / 100000);
395 if (ferr > 5)
396 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
397 rate);
398
399 if (clk_set_rate(clk, rate_rounded))
400 netdev_err(dev, "adjusting tx_clk failed.\n");
401}
402
403static void macb_handle_link_change(struct net_device *dev)
404{
405 struct macb *bp = netdev_priv(dev);
406 struct phy_device *phydev = dev->phydev;
407 unsigned long flags;
408 int status_change = 0;
409
410 spin_lock_irqsave(&bp->lock, flags);
411
412 if (phydev->link) {
413 if ((bp->speed != phydev->speed) ||
414 (bp->duplex != phydev->duplex)) {
415 u32 reg;
416
417 reg = macb_readl(bp, NCFGR);
418 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
419 if (macb_is_gem(bp))
420 reg &= ~GEM_BIT(GBE);
421
422 if (phydev->duplex)
423 reg |= MACB_BIT(FD);
424 if (phydev->speed == SPEED_100)
425 reg |= MACB_BIT(SPD);
426 if (phydev->speed == SPEED_1000 &&
427 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
428 reg |= GEM_BIT(GBE);
429
430 macb_or_gem_writel(bp, NCFGR, reg);
431
432 bp->speed = phydev->speed;
433 bp->duplex = phydev->duplex;
434 status_change = 1;
435 }
436 }
437
438 if (phydev->link != bp->link) {
439 if (!phydev->link) {
440 bp->speed = 0;
441 bp->duplex = -1;
442 }
443 bp->link = phydev->link;
444
445 status_change = 1;
446 }
447
448 spin_unlock_irqrestore(&bp->lock, flags);
449
450 if (status_change) {
451 if (phydev->link) {
452
453
454
455 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
456
457 netif_carrier_on(dev);
458 netdev_info(dev, "link up (%d/%s)\n",
459 phydev->speed,
460 phydev->duplex == DUPLEX_FULL ?
461 "Full" : "Half");
462 } else {
463 netif_carrier_off(dev);
464 netdev_info(dev, "link down\n");
465 }
466 }
467}
468
469
470static int macb_mii_probe(struct net_device *dev)
471{
472 struct macb *bp = netdev_priv(dev);
473 struct macb_platform_data *pdata;
474 struct phy_device *phydev;
475 struct device_node *np;
476 int phy_irq, ret, i;
477
478 pdata = dev_get_platdata(&bp->pdev->dev);
479 np = bp->pdev->dev.of_node;
480 ret = 0;
481
482 if (np) {
483 if (of_phy_is_fixed_link(np)) {
484 bp->phy_node = of_node_get(np);
485 } else {
486 bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
487
488
489
490
491 if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
492 for (i = 0; i < PHY_MAX_ADDR; i++) {
493 struct phy_device *phydev;
494
495 phydev = mdiobus_scan(bp->mii_bus, i);
496 if (IS_ERR(phydev) &&
497 PTR_ERR(phydev) != -ENODEV) {
498 ret = PTR_ERR(phydev);
499 break;
500 }
501 }
502
503 if (ret)
504 return -ENODEV;
505 }
506 }
507 }
508
509 if (bp->phy_node) {
510 phydev = of_phy_connect(dev, bp->phy_node,
511 &macb_handle_link_change, 0,
512 bp->phy_interface);
513 if (!phydev)
514 return -ENODEV;
515 } else {
516 phydev = phy_find_first(bp->mii_bus);
517 if (!phydev) {
518 netdev_err(dev, "no PHY found\n");
519 return -ENXIO;
520 }
521
522 if (pdata) {
523 if (gpio_is_valid(pdata->phy_irq_pin)) {
524 ret = devm_gpio_request(&bp->pdev->dev,
525 pdata->phy_irq_pin, "phy int");
526 if (!ret) {
527 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
528 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
529 }
530 } else {
531 phydev->irq = PHY_POLL;
532 }
533 }
534
535
536 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
537 bp->phy_interface);
538 if (ret) {
539 netdev_err(dev, "Could not attach to PHY\n");
540 return ret;
541 }
542 }
543
544
545 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
546 phy_set_max_speed(phydev, SPEED_1000);
547 else
548 phy_set_max_speed(phydev, SPEED_100);
549
550 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
551 phy_remove_link_mode(phydev,
552 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
553
554 bp->link = 0;
555 bp->speed = 0;
556 bp->duplex = -1;
557
558 return 0;
559}
560
561static int macb_mii_init(struct macb *bp)
562{
563 struct macb_platform_data *pdata;
564 struct device_node *np;
565 int err = -ENXIO;
566
567
568 macb_writel(bp, NCR, MACB_BIT(MPE));
569
570 bp->mii_bus = mdiobus_alloc();
571 if (!bp->mii_bus) {
572 err = -ENOMEM;
573 goto err_out;
574 }
575
576 bp->mii_bus->name = "MACB_mii_bus";
577 bp->mii_bus->read = &macb_mdio_read;
578 bp->mii_bus->write = &macb_mdio_write;
579 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
580 bp->pdev->name, bp->pdev->id);
581 bp->mii_bus->priv = bp;
582 bp->mii_bus->parent = &bp->pdev->dev;
583 pdata = dev_get_platdata(&bp->pdev->dev);
584
585 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
586
587 np = bp->pdev->dev.of_node;
588 if (np && of_phy_is_fixed_link(np)) {
589 if (of_phy_register_fixed_link(np) < 0) {
590 dev_err(&bp->pdev->dev,
591 "broken fixed-link specification %pOF\n", np);
592 goto err_out_free_mdiobus;
593 }
594
595 err = mdiobus_register(bp->mii_bus);
596 } else {
597 if (pdata)
598 bp->mii_bus->phy_mask = pdata->phy_mask;
599
600 err = of_mdiobus_register(bp->mii_bus, np);
601 }
602
603 if (err)
604 goto err_out_free_fixed_link;
605
606 err = macb_mii_probe(bp->dev);
607 if (err)
608 goto err_out_unregister_bus;
609
610 return 0;
611
612err_out_unregister_bus:
613 mdiobus_unregister(bp->mii_bus);
614err_out_free_fixed_link:
615 if (np && of_phy_is_fixed_link(np))
616 of_phy_deregister_fixed_link(np);
617err_out_free_mdiobus:
618 of_node_put(bp->phy_node);
619 mdiobus_free(bp->mii_bus);
620err_out:
621 return err;
622}
623
624static void macb_update_stats(struct macb *bp)
625{
626 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
627 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
628 int offset = MACB_PFR;
629
630 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
631
632 for (; p < end; p++, offset += 4)
633 *p += bp->macb_reg_readl(bp, offset);
634}
635
636static int macb_halt_tx(struct macb *bp)
637{
638 unsigned long halt_time, timeout;
639 u32 status;
640
641 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
642
643 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
644 do {
645 halt_time = jiffies;
646 status = macb_readl(bp, TSR);
647 if (!(status & MACB_BIT(TGO)))
648 return 0;
649
650 usleep_range(10, 250);
651 } while (time_before(halt_time, timeout));
652
653 return -ETIMEDOUT;
654}
655
656static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
657{
658 if (tx_skb->mapping) {
659 if (tx_skb->mapped_as_page)
660 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
661 tx_skb->size, DMA_TO_DEVICE);
662 else
663 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
664 tx_skb->size, DMA_TO_DEVICE);
665 tx_skb->mapping = 0;
666 }
667
668 if (tx_skb->skb) {
669 dev_kfree_skb_any(tx_skb->skb);
670 tx_skb->skb = NULL;
671 }
672}
673
674static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
675{
676#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
677 struct macb_dma_desc_64 *desc_64;
678
679 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
680 desc_64 = macb_64b_desc(bp, desc);
681 desc_64->addrh = upper_32_bits(addr);
682 }
683#endif
684 desc->addr = lower_32_bits(addr);
685}
686
687static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
688{
689 dma_addr_t addr = 0;
690#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
691 struct macb_dma_desc_64 *desc_64;
692
693 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
694 desc_64 = macb_64b_desc(bp, desc);
695 addr = ((u64)(desc_64->addrh) << 32);
696 }
697#endif
698 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
699 return addr;
700}
701
702static void macb_tx_error_task(struct work_struct *work)
703{
704 struct macb_queue *queue = container_of(work, struct macb_queue,
705 tx_error_task);
706 struct macb *bp = queue->bp;
707 struct macb_tx_skb *tx_skb;
708 struct macb_dma_desc *desc;
709 struct sk_buff *skb;
710 unsigned int tail;
711 unsigned long flags;
712
713 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
714 (unsigned int)(queue - bp->queues),
715 queue->tx_tail, queue->tx_head);
716
717
718
719
720
721
722
723 spin_lock_irqsave(&bp->lock, flags);
724
725
726 netif_tx_stop_all_queues(bp->dev);
727
728
729
730
731
732 if (macb_halt_tx(bp))
733
734 netdev_err(bp->dev, "BUG: halt tx timed out\n");
735
736
737
738
739 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
740 u32 ctrl;
741
742 desc = macb_tx_desc(queue, tail);
743 ctrl = desc->ctrl;
744 tx_skb = macb_tx_skb(queue, tail);
745 skb = tx_skb->skb;
746
747 if (ctrl & MACB_BIT(TX_USED)) {
748
749 while (!skb) {
750 macb_tx_unmap(bp, tx_skb);
751 tail++;
752 tx_skb = macb_tx_skb(queue, tail);
753 skb = tx_skb->skb;
754 }
755
756
757
758
759 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
760 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
761 macb_tx_ring_wrap(bp, tail),
762 skb->data);
763 bp->dev->stats.tx_packets++;
764 queue->stats.tx_packets++;
765 bp->dev->stats.tx_bytes += skb->len;
766 queue->stats.tx_bytes += skb->len;
767 }
768 } else {
769
770
771
772
773 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
774 netdev_err(bp->dev,
775 "BUG: TX buffers exhausted mid-frame\n");
776
777 desc->ctrl = ctrl | MACB_BIT(TX_USED);
778 }
779
780 macb_tx_unmap(bp, tx_skb);
781 }
782
783
784 desc = macb_tx_desc(queue, 0);
785 macb_set_addr(bp, desc, 0);
786 desc->ctrl = MACB_BIT(TX_USED);
787
788
789 wmb();
790
791
792 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
793#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
794 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
795 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
796#endif
797
798 queue->tx_head = 0;
799 queue->tx_tail = 0;
800
801
802 macb_writel(bp, TSR, macb_readl(bp, TSR));
803 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
804
805
806 netif_tx_start_all_queues(bp->dev);
807 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
808
809 spin_unlock_irqrestore(&bp->lock, flags);
810}
811
812static void macb_tx_interrupt(struct macb_queue *queue)
813{
814 unsigned int tail;
815 unsigned int head;
816 u32 status;
817 struct macb *bp = queue->bp;
818 u16 queue_index = queue - bp->queues;
819
820 status = macb_readl(bp, TSR);
821 macb_writel(bp, TSR, status);
822
823 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
824 queue_writel(queue, ISR, MACB_BIT(TCOMP));
825
826 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
827 (unsigned long)status);
828
829 head = queue->tx_head;
830 for (tail = queue->tx_tail; tail != head; tail++) {
831 struct macb_tx_skb *tx_skb;
832 struct sk_buff *skb;
833 struct macb_dma_desc *desc;
834 u32 ctrl;
835
836 desc = macb_tx_desc(queue, tail);
837
838
839 rmb();
840
841 ctrl = desc->ctrl;
842
843
844
845
846 if (!(ctrl & MACB_BIT(TX_USED)))
847 break;
848
849
850 for (;; tail++) {
851 tx_skb = macb_tx_skb(queue, tail);
852 skb = tx_skb->skb;
853
854
855 if (skb) {
856 if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
857
858
859
860 tx_skb->skb = NULL;
861 }
862 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
863 macb_tx_ring_wrap(bp, tail),
864 skb->data);
865 bp->dev->stats.tx_packets++;
866 queue->stats.tx_packets++;
867 bp->dev->stats.tx_bytes += skb->len;
868 queue->stats.tx_bytes += skb->len;
869 }
870
871
872 macb_tx_unmap(bp, tx_skb);
873
874
875
876
877
878 if (skb)
879 break;
880 }
881 }
882
883 queue->tx_tail = tail;
884 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
885 CIRC_CNT(queue->tx_head, queue->tx_tail,
886 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
887 netif_wake_subqueue(bp->dev, queue_index);
888}
889
890static void gem_rx_refill(struct macb_queue *queue)
891{
892 unsigned int entry;
893 struct sk_buff *skb;
894 dma_addr_t paddr;
895 struct macb *bp = queue->bp;
896 struct macb_dma_desc *desc;
897
898 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
899 bp->rx_ring_size) > 0) {
900 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
901
902
903 rmb();
904
905 queue->rx_prepared_head++;
906 desc = macb_rx_desc(queue, entry);
907
908 if (!queue->rx_skbuff[entry]) {
909
910 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
911 if (unlikely(!skb)) {
912 netdev_err(bp->dev,
913 "Unable to allocate sk_buff\n");
914 break;
915 }
916
917
918 paddr = dma_map_single(&bp->pdev->dev, skb->data,
919 bp->rx_buffer_size,
920 DMA_FROM_DEVICE);
921 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
922 dev_kfree_skb(skb);
923 break;
924 }
925
926 queue->rx_skbuff[entry] = skb;
927
928 if (entry == bp->rx_ring_size - 1)
929 paddr |= MACB_BIT(RX_WRAP);
930 macb_set_addr(bp, desc, paddr);
931 desc->ctrl = 0;
932
933
934 skb_reserve(skb, NET_IP_ALIGN);
935 } else {
936 desc->addr &= ~MACB_BIT(RX_USED);
937 desc->ctrl = 0;
938 }
939 }
940
941
942 wmb();
943
944 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
945 queue, queue->rx_prepared_head, queue->rx_tail);
946}
947
948
949static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
950 unsigned int end)
951{
952 unsigned int frag;
953
954 for (frag = begin; frag != end; frag++) {
955 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
956
957 desc->addr &= ~MACB_BIT(RX_USED);
958 }
959
960
961 wmb();
962
963
964
965
966
967}
968
969static int gem_rx(struct macb_queue *queue, int budget)
970{
971 struct macb *bp = queue->bp;
972 unsigned int len;
973 unsigned int entry;
974 struct sk_buff *skb;
975 struct macb_dma_desc *desc;
976 int count = 0;
977
978 while (count < budget) {
979 u32 ctrl;
980 dma_addr_t addr;
981 bool rxused;
982
983 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
984 desc = macb_rx_desc(queue, entry);
985
986
987 rmb();
988
989 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
990 addr = macb_get_addr(bp, desc);
991 ctrl = desc->ctrl;
992
993 if (!rxused)
994 break;
995
996 queue->rx_tail++;
997 count++;
998
999 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1000 netdev_err(bp->dev,
1001 "not whole frame pointed by descriptor\n");
1002 bp->dev->stats.rx_dropped++;
1003 queue->stats.rx_dropped++;
1004 break;
1005 }
1006 skb = queue->rx_skbuff[entry];
1007 if (unlikely(!skb)) {
1008 netdev_err(bp->dev,
1009 "inconsistent Rx descriptor chain\n");
1010 bp->dev->stats.rx_dropped++;
1011 queue->stats.rx_dropped++;
1012 break;
1013 }
1014
1015 queue->rx_skbuff[entry] = NULL;
1016 len = ctrl & bp->rx_frm_len_mask;
1017
1018 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1019
1020 skb_put(skb, len);
1021 dma_unmap_single(&bp->pdev->dev, addr,
1022 bp->rx_buffer_size, DMA_FROM_DEVICE);
1023
1024 skb->protocol = eth_type_trans(skb, bp->dev);
1025 skb_checksum_none_assert(skb);
1026 if (bp->dev->features & NETIF_F_RXCSUM &&
1027 !(bp->dev->flags & IFF_PROMISC) &&
1028 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1029 skb->ip_summed = CHECKSUM_UNNECESSARY;
1030
1031 bp->dev->stats.rx_packets++;
1032 queue->stats.rx_packets++;
1033 bp->dev->stats.rx_bytes += skb->len;
1034 queue->stats.rx_bytes += skb->len;
1035
1036 gem_ptp_do_rxstamp(bp, skb, desc);
1037
1038#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1039 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1040 skb->len, skb->csum);
1041 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1042 skb_mac_header(skb), 16, true);
1043 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1044 skb->data, 32, true);
1045#endif
1046
1047 netif_receive_skb(skb);
1048 }
1049
1050 gem_rx_refill(queue);
1051
1052 return count;
1053}
1054
1055static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
1056 unsigned int last_frag)
1057{
1058 unsigned int len;
1059 unsigned int frag;
1060 unsigned int offset;
1061 struct sk_buff *skb;
1062 struct macb_dma_desc *desc;
1063 struct macb *bp = queue->bp;
1064
1065 desc = macb_rx_desc(queue, last_frag);
1066 len = desc->ctrl & bp->rx_frm_len_mask;
1067
1068 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1069 macb_rx_ring_wrap(bp, first_frag),
1070 macb_rx_ring_wrap(bp, last_frag), len);
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1081 if (!skb) {
1082 bp->dev->stats.rx_dropped++;
1083 for (frag = first_frag; ; frag++) {
1084 desc = macb_rx_desc(queue, frag);
1085 desc->addr &= ~MACB_BIT(RX_USED);
1086 if (frag == last_frag)
1087 break;
1088 }
1089
1090
1091 wmb();
1092
1093 return 1;
1094 }
1095
1096 offset = 0;
1097 len += NET_IP_ALIGN;
1098 skb_checksum_none_assert(skb);
1099 skb_put(skb, len);
1100
1101 for (frag = first_frag; ; frag++) {
1102 unsigned int frag_len = bp->rx_buffer_size;
1103
1104 if (offset + frag_len > len) {
1105 if (unlikely(frag != last_frag)) {
1106 dev_kfree_skb_any(skb);
1107 return -1;
1108 }
1109 frag_len = len - offset;
1110 }
1111 skb_copy_to_linear_data_offset(skb, offset,
1112 macb_rx_buffer(queue, frag),
1113 frag_len);
1114 offset += bp->rx_buffer_size;
1115 desc = macb_rx_desc(queue, frag);
1116 desc->addr &= ~MACB_BIT(RX_USED);
1117
1118 if (frag == last_frag)
1119 break;
1120 }
1121
1122
1123 wmb();
1124
1125 __skb_pull(skb, NET_IP_ALIGN);
1126 skb->protocol = eth_type_trans(skb, bp->dev);
1127
1128 bp->dev->stats.rx_packets++;
1129 bp->dev->stats.rx_bytes += skb->len;
1130 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1131 skb->len, skb->csum);
1132 netif_receive_skb(skb);
1133
1134 return 0;
1135}
1136
1137static inline void macb_init_rx_ring(struct macb_queue *queue)
1138{
1139 struct macb *bp = queue->bp;
1140 dma_addr_t addr;
1141 struct macb_dma_desc *desc = NULL;
1142 int i;
1143
1144 addr = queue->rx_buffers_dma;
1145 for (i = 0; i < bp->rx_ring_size; i++) {
1146 desc = macb_rx_desc(queue, i);
1147 macb_set_addr(bp, desc, addr);
1148 desc->ctrl = 0;
1149 addr += bp->rx_buffer_size;
1150 }
1151 desc->addr |= MACB_BIT(RX_WRAP);
1152 queue->rx_tail = 0;
1153}
1154
1155static int macb_rx(struct macb_queue *queue, int budget)
1156{
1157 struct macb *bp = queue->bp;
1158 bool reset_rx_queue = false;
1159 int received = 0;
1160 unsigned int tail;
1161 int first_frag = -1;
1162
1163 for (tail = queue->rx_tail; budget > 0; tail++) {
1164 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1165 u32 ctrl;
1166
1167
1168 rmb();
1169
1170 ctrl = desc->ctrl;
1171
1172 if (!(desc->addr & MACB_BIT(RX_USED)))
1173 break;
1174
1175 if (ctrl & MACB_BIT(RX_SOF)) {
1176 if (first_frag != -1)
1177 discard_partial_frame(queue, first_frag, tail);
1178 first_frag = tail;
1179 }
1180
1181 if (ctrl & MACB_BIT(RX_EOF)) {
1182 int dropped;
1183
1184 if (unlikely(first_frag == -1)) {
1185 reset_rx_queue = true;
1186 continue;
1187 }
1188
1189 dropped = macb_rx_frame(queue, first_frag, tail);
1190 first_frag = -1;
1191 if (unlikely(dropped < 0)) {
1192 reset_rx_queue = true;
1193 continue;
1194 }
1195 if (!dropped) {
1196 received++;
1197 budget--;
1198 }
1199 }
1200 }
1201
1202 if (unlikely(reset_rx_queue)) {
1203 unsigned long flags;
1204 u32 ctrl;
1205
1206 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1207
1208 spin_lock_irqsave(&bp->lock, flags);
1209
1210 ctrl = macb_readl(bp, NCR);
1211 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1212
1213 macb_init_rx_ring(queue);
1214 queue_writel(queue, RBQP, queue->rx_ring_dma);
1215
1216 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1217
1218 spin_unlock_irqrestore(&bp->lock, flags);
1219 return received;
1220 }
1221
1222 if (first_frag != -1)
1223 queue->rx_tail = first_frag;
1224 else
1225 queue->rx_tail = tail;
1226
1227 return received;
1228}
1229
1230static int macb_poll(struct napi_struct *napi, int budget)
1231{
1232 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1233 struct macb *bp = queue->bp;
1234 int work_done;
1235 u32 status;
1236
1237 status = macb_readl(bp, RSR);
1238 macb_writel(bp, RSR, status);
1239
1240 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1241 (unsigned long)status, budget);
1242
1243 work_done = bp->macbgem_ops.mog_rx(queue, budget);
1244 if (work_done < budget) {
1245 napi_complete_done(napi, work_done);
1246
1247
1248 status = macb_readl(bp, RSR);
1249 if (status) {
1250 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1251 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1252 napi_reschedule(napi);
1253 } else {
1254 queue_writel(queue, IER, MACB_RX_INT_FLAGS);
1255 }
1256 }
1257
1258
1259
1260 return work_done;
1261}
1262
1263static void macb_hresp_error_task(unsigned long data)
1264{
1265 struct macb *bp = (struct macb *)data;
1266 struct net_device *dev = bp->dev;
1267 struct macb_queue *queue = bp->queues;
1268 unsigned int q;
1269 u32 ctrl;
1270
1271 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1272 queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
1273 MACB_TX_INT_FLAGS |
1274 MACB_BIT(HRESP));
1275 }
1276 ctrl = macb_readl(bp, NCR);
1277 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1278 macb_writel(bp, NCR, ctrl);
1279
1280 netif_tx_stop_all_queues(dev);
1281 netif_carrier_off(dev);
1282
1283 bp->macbgem_ops.mog_init_rings(bp);
1284
1285
1286 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1287 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
1288#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1289 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1290 queue_writel(queue, RBQPH,
1291 upper_32_bits(queue->rx_ring_dma));
1292#endif
1293 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1294#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1295 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
1296 queue_writel(queue, TBQPH,
1297 upper_32_bits(queue->tx_ring_dma));
1298#endif
1299
1300
1301 queue_writel(queue, IER,
1302 MACB_RX_INT_FLAGS |
1303 MACB_TX_INT_FLAGS |
1304 MACB_BIT(HRESP));
1305 }
1306
1307 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1308 macb_writel(bp, NCR, ctrl);
1309
1310 netif_carrier_on(dev);
1311 netif_tx_start_all_queues(dev);
1312}
1313
1314static irqreturn_t macb_interrupt(int irq, void *dev_id)
1315{
1316 struct macb_queue *queue = dev_id;
1317 struct macb *bp = queue->bp;
1318 struct net_device *dev = bp->dev;
1319 u32 status, ctrl;
1320
1321 status = queue_readl(queue, ISR);
1322
1323 if (unlikely(!status))
1324 return IRQ_NONE;
1325
1326 spin_lock(&bp->lock);
1327
1328 while (status) {
1329
1330 if (unlikely(!netif_running(dev))) {
1331 queue_writel(queue, IDR, -1);
1332 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1333 queue_writel(queue, ISR, -1);
1334 break;
1335 }
1336
1337 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1338 (unsigned int)(queue - bp->queues),
1339 (unsigned long)status);
1340
1341 if (status & MACB_RX_INT_FLAGS) {
1342
1343
1344
1345
1346
1347
1348 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1349 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1350 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1351
1352 if (napi_schedule_prep(&queue->napi)) {
1353 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1354 __napi_schedule(&queue->napi);
1355 }
1356 }
1357
1358 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1359 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1360 schedule_work(&queue->tx_error_task);
1361
1362 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1363 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1364
1365 break;
1366 }
1367
1368 if (status & MACB_BIT(TCOMP))
1369 macb_tx_interrupt(queue);
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 if (status & MACB_BIT(RXUBR)) {
1382 ctrl = macb_readl(bp, NCR);
1383 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1384 wmb();
1385 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1386
1387 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1388 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1389 }
1390
1391 if (status & MACB_BIT(ISR_ROVR)) {
1392
1393 if (macb_is_gem(bp))
1394 bp->hw_stats.gem.rx_overruns++;
1395 else
1396 bp->hw_stats.macb.rx_overruns++;
1397
1398 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1399 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1400 }
1401
1402 if (status & MACB_BIT(HRESP)) {
1403 tasklet_schedule(&bp->hresp_err_tasklet);
1404 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1405
1406 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1407 queue_writel(queue, ISR, MACB_BIT(HRESP));
1408 }
1409 status = queue_readl(queue, ISR);
1410 }
1411
1412 spin_unlock(&bp->lock);
1413
1414 return IRQ_HANDLED;
1415}
1416
1417#ifdef CONFIG_NET_POLL_CONTROLLER
1418
1419
1420
1421static void macb_poll_controller(struct net_device *dev)
1422{
1423 struct macb *bp = netdev_priv(dev);
1424 struct macb_queue *queue;
1425 unsigned long flags;
1426 unsigned int q;
1427
1428 local_irq_save(flags);
1429 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1430 macb_interrupt(dev->irq, queue);
1431 local_irq_restore(flags);
1432}
1433#endif
1434
1435static unsigned int macb_tx_map(struct macb *bp,
1436 struct macb_queue *queue,
1437 struct sk_buff *skb,
1438 unsigned int hdrlen)
1439{
1440 dma_addr_t mapping;
1441 unsigned int len, entry, i, tx_head = queue->tx_head;
1442 struct macb_tx_skb *tx_skb = NULL;
1443 struct macb_dma_desc *desc;
1444 unsigned int offset, size, count = 0;
1445 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1446 unsigned int eof = 1, mss_mfs = 0;
1447 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1448
1449
1450 if (skb_shinfo(skb)->gso_size != 0) {
1451 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1452
1453 lso_ctrl = MACB_LSO_UFO_ENABLE;
1454 else
1455
1456 lso_ctrl = MACB_LSO_TSO_ENABLE;
1457 }
1458
1459
1460 len = skb_headlen(skb);
1461
1462
1463 size = hdrlen;
1464
1465 offset = 0;
1466 while (len) {
1467 entry = macb_tx_ring_wrap(bp, tx_head);
1468 tx_skb = &queue->tx_skb[entry];
1469
1470 mapping = dma_map_single(&bp->pdev->dev,
1471 skb->data + offset,
1472 size, DMA_TO_DEVICE);
1473 if (dma_mapping_error(&bp->pdev->dev, mapping))
1474 goto dma_error;
1475
1476
1477 tx_skb->skb = NULL;
1478 tx_skb->mapping = mapping;
1479 tx_skb->size = size;
1480 tx_skb->mapped_as_page = false;
1481
1482 len -= size;
1483 offset += size;
1484 count++;
1485 tx_head++;
1486
1487 size = min(len, bp->max_tx_length);
1488 }
1489
1490
1491 for (f = 0; f < nr_frags; f++) {
1492 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1493
1494 len = skb_frag_size(frag);
1495 offset = 0;
1496 while (len) {
1497 size = min(len, bp->max_tx_length);
1498 entry = macb_tx_ring_wrap(bp, tx_head);
1499 tx_skb = &queue->tx_skb[entry];
1500
1501 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1502 offset, size, DMA_TO_DEVICE);
1503 if (dma_mapping_error(&bp->pdev->dev, mapping))
1504 goto dma_error;
1505
1506
1507 tx_skb->skb = NULL;
1508 tx_skb->mapping = mapping;
1509 tx_skb->size = size;
1510 tx_skb->mapped_as_page = true;
1511
1512 len -= size;
1513 offset += size;
1514 count++;
1515 tx_head++;
1516 }
1517 }
1518
1519
1520 if (unlikely(!tx_skb)) {
1521 netdev_err(bp->dev, "BUG! empty skb!\n");
1522 return 0;
1523 }
1524
1525
1526 tx_skb->skb = skb;
1527
1528
1529
1530
1531
1532
1533
1534
1535 i = tx_head;
1536 entry = macb_tx_ring_wrap(bp, i);
1537 ctrl = MACB_BIT(TX_USED);
1538 desc = macb_tx_desc(queue, entry);
1539 desc->ctrl = ctrl;
1540
1541 if (lso_ctrl) {
1542 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1543
1544 mss_mfs = skb_shinfo(skb)->gso_size +
1545 skb_transport_offset(skb) +
1546 ETH_FCS_LEN;
1547 else {
1548 mss_mfs = skb_shinfo(skb)->gso_size;
1549
1550
1551
1552 seq_ctrl = 0;
1553 }
1554 }
1555
1556 do {
1557 i--;
1558 entry = macb_tx_ring_wrap(bp, i);
1559 tx_skb = &queue->tx_skb[entry];
1560 desc = macb_tx_desc(queue, entry);
1561
1562 ctrl = (u32)tx_skb->size;
1563 if (eof) {
1564 ctrl |= MACB_BIT(TX_LAST);
1565 eof = 0;
1566 }
1567 if (unlikely(entry == (bp->tx_ring_size - 1)))
1568 ctrl |= MACB_BIT(TX_WRAP);
1569
1570
1571 if (i == queue->tx_head) {
1572 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1573 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1574 } else
1575
1576
1577
1578 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1579
1580
1581 macb_set_addr(bp, desc, tx_skb->mapping);
1582
1583
1584
1585 wmb();
1586 desc->ctrl = ctrl;
1587 } while (i != queue->tx_head);
1588
1589 queue->tx_head = tx_head;
1590
1591 return count;
1592
1593dma_error:
1594 netdev_err(bp->dev, "TX DMA map failed\n");
1595
1596 for (i = queue->tx_head; i != tx_head; i++) {
1597 tx_skb = macb_tx_skb(queue, i);
1598
1599 macb_tx_unmap(bp, tx_skb);
1600 }
1601
1602 return 0;
1603}
1604
1605static netdev_features_t macb_features_check(struct sk_buff *skb,
1606 struct net_device *dev,
1607 netdev_features_t features)
1608{
1609 unsigned int nr_frags, f;
1610 unsigned int hdrlen;
1611
1612
1613
1614
1615 if (!skb_is_nonlinear(skb))
1616 return features;
1617
1618
1619 hdrlen = skb_transport_offset(skb);
1620 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1621 hdrlen += tcp_hdrlen(skb);
1622
1623
1624
1625
1626
1627 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1628 return features & ~MACB_NETIF_LSO;
1629
1630 nr_frags = skb_shinfo(skb)->nr_frags;
1631
1632 nr_frags--;
1633 for (f = 0; f < nr_frags; f++) {
1634 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1635
1636 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1637 return features & ~MACB_NETIF_LSO;
1638 }
1639 return features;
1640}
1641
1642static inline int macb_clear_csum(struct sk_buff *skb)
1643{
1644
1645 if (skb->ip_summed != CHECKSUM_PARTIAL)
1646 return 0;
1647
1648
1649 if (unlikely(skb_cow_head(skb, 0)))
1650 return -1;
1651
1652
1653
1654
1655
1656 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1657 return 0;
1658}
1659
1660static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1661{
1662 u16 queue_index = skb_get_queue_mapping(skb);
1663 struct macb *bp = netdev_priv(dev);
1664 struct macb_queue *queue = &bp->queues[queue_index];
1665 unsigned long flags;
1666 unsigned int desc_cnt, nr_frags, frag_size, f;
1667 unsigned int hdrlen;
1668 bool is_lso, is_udp = 0;
1669
1670 is_lso = (skb_shinfo(skb)->gso_size != 0);
1671
1672 if (is_lso) {
1673 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1674
1675
1676 if (is_udp)
1677
1678 hdrlen = skb_transport_offset(skb);
1679 else
1680 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1681 if (skb_headlen(skb) < hdrlen) {
1682 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1683
1684 return NETDEV_TX_BUSY;
1685 }
1686 } else
1687 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1688
1689#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1690 netdev_vdbg(bp->dev,
1691 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1692 queue_index, skb->len, skb->head, skb->data,
1693 skb_tail_pointer(skb), skb_end_pointer(skb));
1694 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1695 skb->data, 16, true);
1696#endif
1697
1698
1699
1700
1701
1702 if (is_lso && (skb_headlen(skb) > hdrlen))
1703
1704 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1705 else
1706 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1707 nr_frags = skb_shinfo(skb)->nr_frags;
1708 for (f = 0; f < nr_frags; f++) {
1709 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1710 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1711 }
1712
1713 spin_lock_irqsave(&bp->lock, flags);
1714
1715
1716 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1717 bp->tx_ring_size) < desc_cnt) {
1718 netif_stop_subqueue(dev, queue_index);
1719 spin_unlock_irqrestore(&bp->lock, flags);
1720 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1721 queue->tx_head, queue->tx_tail);
1722 return NETDEV_TX_BUSY;
1723 }
1724
1725 if (macb_clear_csum(skb)) {
1726 dev_kfree_skb_any(skb);
1727 goto unlock;
1728 }
1729
1730
1731 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1732 dev_kfree_skb_any(skb);
1733 goto unlock;
1734 }
1735
1736
1737 wmb();
1738 skb_tx_timestamp(skb);
1739
1740 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1741
1742 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1743 netif_stop_subqueue(dev, queue_index);
1744
1745unlock:
1746 spin_unlock_irqrestore(&bp->lock, flags);
1747
1748 return NETDEV_TX_OK;
1749}
1750
1751static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1752{
1753 if (!macb_is_gem(bp)) {
1754 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1755 } else {
1756 bp->rx_buffer_size = size;
1757
1758 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1759 netdev_dbg(bp->dev,
1760 "RX buffer must be multiple of %d bytes, expanding\n",
1761 RX_BUFFER_MULTIPLE);
1762 bp->rx_buffer_size =
1763 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1764 }
1765 }
1766
1767 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
1768 bp->dev->mtu, bp->rx_buffer_size);
1769}
1770
1771static void gem_free_rx_buffers(struct macb *bp)
1772{
1773 struct sk_buff *skb;
1774 struct macb_dma_desc *desc;
1775 struct macb_queue *queue;
1776 dma_addr_t addr;
1777 unsigned int q;
1778 int i;
1779
1780 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1781 if (!queue->rx_skbuff)
1782 continue;
1783
1784 for (i = 0; i < bp->rx_ring_size; i++) {
1785 skb = queue->rx_skbuff[i];
1786
1787 if (!skb)
1788 continue;
1789
1790 desc = macb_rx_desc(queue, i);
1791 addr = macb_get_addr(bp, desc);
1792
1793 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1794 DMA_FROM_DEVICE);
1795 dev_kfree_skb_any(skb);
1796 skb = NULL;
1797 }
1798
1799 kfree(queue->rx_skbuff);
1800 queue->rx_skbuff = NULL;
1801 }
1802}
1803
1804static void macb_free_rx_buffers(struct macb *bp)
1805{
1806 struct macb_queue *queue = &bp->queues[0];
1807
1808 if (queue->rx_buffers) {
1809 dma_free_coherent(&bp->pdev->dev,
1810 bp->rx_ring_size * bp->rx_buffer_size,
1811 queue->rx_buffers, queue->rx_buffers_dma);
1812 queue->rx_buffers = NULL;
1813 }
1814}
1815
1816static void macb_free_consistent(struct macb *bp)
1817{
1818 struct macb_queue *queue;
1819 unsigned int q;
1820 int size;
1821
1822 bp->macbgem_ops.mog_free_rx_buffers(bp);
1823
1824 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1825 kfree(queue->tx_skb);
1826 queue->tx_skb = NULL;
1827 if (queue->tx_ring) {
1828 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1829 dma_free_coherent(&bp->pdev->dev, size,
1830 queue->tx_ring, queue->tx_ring_dma);
1831 queue->tx_ring = NULL;
1832 }
1833 if (queue->rx_ring) {
1834 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1835 dma_free_coherent(&bp->pdev->dev, size,
1836 queue->rx_ring, queue->rx_ring_dma);
1837 queue->rx_ring = NULL;
1838 }
1839 }
1840}
1841
1842static int gem_alloc_rx_buffers(struct macb *bp)
1843{
1844 struct macb_queue *queue;
1845 unsigned int q;
1846 int size;
1847
1848 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1849 size = bp->rx_ring_size * sizeof(struct sk_buff *);
1850 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
1851 if (!queue->rx_skbuff)
1852 return -ENOMEM;
1853 else
1854 netdev_dbg(bp->dev,
1855 "Allocated %d RX struct sk_buff entries at %p\n",
1856 bp->rx_ring_size, queue->rx_skbuff);
1857 }
1858 return 0;
1859}
1860
1861static int macb_alloc_rx_buffers(struct macb *bp)
1862{
1863 struct macb_queue *queue = &bp->queues[0];
1864 int size;
1865
1866 size = bp->rx_ring_size * bp->rx_buffer_size;
1867 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1868 &queue->rx_buffers_dma, GFP_KERNEL);
1869 if (!queue->rx_buffers)
1870 return -ENOMEM;
1871
1872 netdev_dbg(bp->dev,
1873 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1874 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
1875 return 0;
1876}
1877
1878static int macb_alloc_consistent(struct macb *bp)
1879{
1880 struct macb_queue *queue;
1881 unsigned int q;
1882 int size;
1883
1884 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1885 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1886 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1887 &queue->tx_ring_dma,
1888 GFP_KERNEL);
1889 if (!queue->tx_ring)
1890 goto out_err;
1891 netdev_dbg(bp->dev,
1892 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1893 q, size, (unsigned long)queue->tx_ring_dma,
1894 queue->tx_ring);
1895
1896 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1897 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1898 if (!queue->tx_skb)
1899 goto out_err;
1900
1901 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1902 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1903 &queue->rx_ring_dma, GFP_KERNEL);
1904 if (!queue->rx_ring)
1905 goto out_err;
1906 netdev_dbg(bp->dev,
1907 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1908 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
1909 }
1910 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1911 goto out_err;
1912
1913 return 0;
1914
1915out_err:
1916 macb_free_consistent(bp);
1917 return -ENOMEM;
1918}
1919
1920static void gem_init_rings(struct macb *bp)
1921{
1922 struct macb_queue *queue;
1923 struct macb_dma_desc *desc = NULL;
1924 unsigned int q;
1925 int i;
1926
1927 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1928 for (i = 0; i < bp->tx_ring_size; i++) {
1929 desc = macb_tx_desc(queue, i);
1930 macb_set_addr(bp, desc, 0);
1931 desc->ctrl = MACB_BIT(TX_USED);
1932 }
1933 desc->ctrl |= MACB_BIT(TX_WRAP);
1934 queue->tx_head = 0;
1935 queue->tx_tail = 0;
1936
1937 queue->rx_tail = 0;
1938 queue->rx_prepared_head = 0;
1939
1940 gem_rx_refill(queue);
1941 }
1942
1943}
1944
1945static void macb_init_rings(struct macb *bp)
1946{
1947 int i;
1948 struct macb_dma_desc *desc = NULL;
1949
1950 macb_init_rx_ring(&bp->queues[0]);
1951
1952 for (i = 0; i < bp->tx_ring_size; i++) {
1953 desc = macb_tx_desc(&bp->queues[0], i);
1954 macb_set_addr(bp, desc, 0);
1955 desc->ctrl = MACB_BIT(TX_USED);
1956 }
1957 bp->queues[0].tx_head = 0;
1958 bp->queues[0].tx_tail = 0;
1959 desc->ctrl |= MACB_BIT(TX_WRAP);
1960}
1961
1962static void macb_reset_hw(struct macb *bp)
1963{
1964 struct macb_queue *queue;
1965 unsigned int q;
1966 u32 ctrl = macb_readl(bp, NCR);
1967
1968
1969
1970
1971 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1972
1973
1974 ctrl |= MACB_BIT(CLRSTAT);
1975
1976 macb_writel(bp, NCR, ctrl);
1977
1978
1979 macb_writel(bp, TSR, -1);
1980 macb_writel(bp, RSR, -1);
1981
1982
1983 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1984 queue_writel(queue, IDR, -1);
1985 queue_readl(queue, ISR);
1986 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1987 queue_writel(queue, ISR, -1);
1988 }
1989}
1990
1991static u32 gem_mdc_clk_div(struct macb *bp)
1992{
1993 u32 config;
1994 unsigned long pclk_hz = clk_get_rate(bp->pclk);
1995
1996 if (pclk_hz <= 20000000)
1997 config = GEM_BF(CLK, GEM_CLK_DIV8);
1998 else if (pclk_hz <= 40000000)
1999 config = GEM_BF(CLK, GEM_CLK_DIV16);
2000 else if (pclk_hz <= 80000000)
2001 config = GEM_BF(CLK, GEM_CLK_DIV32);
2002 else if (pclk_hz <= 120000000)
2003 config = GEM_BF(CLK, GEM_CLK_DIV48);
2004 else if (pclk_hz <= 160000000)
2005 config = GEM_BF(CLK, GEM_CLK_DIV64);
2006 else
2007 config = GEM_BF(CLK, GEM_CLK_DIV96);
2008
2009 return config;
2010}
2011
2012static u32 macb_mdc_clk_div(struct macb *bp)
2013{
2014 u32 config;
2015 unsigned long pclk_hz;
2016
2017 if (macb_is_gem(bp))
2018 return gem_mdc_clk_div(bp);
2019
2020 pclk_hz = clk_get_rate(bp->pclk);
2021 if (pclk_hz <= 20000000)
2022 config = MACB_BF(CLK, MACB_CLK_DIV8);
2023 else if (pclk_hz <= 40000000)
2024 config = MACB_BF(CLK, MACB_CLK_DIV16);
2025 else if (pclk_hz <= 80000000)
2026 config = MACB_BF(CLK, MACB_CLK_DIV32);
2027 else
2028 config = MACB_BF(CLK, MACB_CLK_DIV64);
2029
2030 return config;
2031}
2032
2033
2034
2035
2036
2037static u32 macb_dbw(struct macb *bp)
2038{
2039 if (!macb_is_gem(bp))
2040 return 0;
2041
2042 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2043 case 4:
2044 return GEM_BF(DBW, GEM_DBW128);
2045 case 2:
2046 return GEM_BF(DBW, GEM_DBW64);
2047 case 1:
2048 default:
2049 return GEM_BF(DBW, GEM_DBW32);
2050 }
2051}
2052
2053
2054
2055
2056
2057
2058
2059
2060static void macb_configure_dma(struct macb *bp)
2061{
2062 struct macb_queue *queue;
2063 u32 buffer_size;
2064 unsigned int q;
2065 u32 dmacfg;
2066
2067 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2068 if (macb_is_gem(bp)) {
2069 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2070 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2071 if (q)
2072 queue_writel(queue, RBQS, buffer_size);
2073 else
2074 dmacfg |= GEM_BF(RXBS, buffer_size);
2075 }
2076 if (bp->dma_burst_length)
2077 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2078 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2079 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2080
2081 if (bp->native_io)
2082 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2083 else
2084 dmacfg |= GEM_BIT(ENDIA_DESC);
2085
2086 if (bp->dev->features & NETIF_F_HW_CSUM)
2087 dmacfg |= GEM_BIT(TXCOEN);
2088 else
2089 dmacfg &= ~GEM_BIT(TXCOEN);
2090
2091#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2092 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2093 dmacfg |= GEM_BIT(ADDR64);
2094#endif
2095#ifdef CONFIG_MACB_USE_HWSTAMP
2096 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2097 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2098#endif
2099 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2100 dmacfg);
2101 gem_writel(bp, DMACFG, dmacfg);
2102 }
2103}
2104
2105static void macb_init_hw(struct macb *bp)
2106{
2107 struct macb_queue *queue;
2108 unsigned int q;
2109
2110 u32 config;
2111
2112 macb_reset_hw(bp);
2113 macb_set_hwaddr(bp);
2114
2115 config = macb_mdc_clk_div(bp);
2116 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2117 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2118 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2119 config |= MACB_BIT(PAE);
2120 config |= MACB_BIT(DRFCS);
2121 if (bp->caps & MACB_CAPS_JUMBO)
2122 config |= MACB_BIT(JFRAME);
2123 else
2124 config |= MACB_BIT(BIG);
2125 if (bp->dev->flags & IFF_PROMISC)
2126 config |= MACB_BIT(CAF);
2127 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2128 config |= GEM_BIT(RXCOEN);
2129 if (!(bp->dev->flags & IFF_BROADCAST))
2130 config |= MACB_BIT(NBC);
2131 config |= macb_dbw(bp);
2132 macb_writel(bp, NCFGR, config);
2133 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
2134 gem_writel(bp, JML, bp->jumbo_max_len);
2135 bp->speed = SPEED_10;
2136 bp->duplex = DUPLEX_HALF;
2137 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
2138 if (bp->caps & MACB_CAPS_JUMBO)
2139 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
2140
2141 macb_configure_dma(bp);
2142
2143
2144 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2145 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
2146#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2147 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2148 queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
2149#endif
2150 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
2151#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2152 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
2153 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
2154#endif
2155
2156
2157 queue_writel(queue, IER,
2158 MACB_RX_INT_FLAGS |
2159 MACB_TX_INT_FLAGS |
2160 MACB_BIT(HRESP));
2161 }
2162
2163
2164 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
2165}
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200static inline int hash_bit_value(int bitnr, __u8 *addr)
2201{
2202 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2203 return 1;
2204 return 0;
2205}
2206
2207
2208static int hash_get_index(__u8 *addr)
2209{
2210 int i, j, bitval;
2211 int hash_index = 0;
2212
2213 for (j = 0; j < 6; j++) {
2214 for (i = 0, bitval = 0; i < 8; i++)
2215 bitval ^= hash_bit_value(i * 6 + j, addr);
2216
2217 hash_index |= (bitval << j);
2218 }
2219
2220 return hash_index;
2221}
2222
2223
2224static void macb_sethashtable(struct net_device *dev)
2225{
2226 struct netdev_hw_addr *ha;
2227 unsigned long mc_filter[2];
2228 unsigned int bitnr;
2229 struct macb *bp = netdev_priv(dev);
2230
2231 mc_filter[0] = 0;
2232 mc_filter[1] = 0;
2233
2234 netdev_for_each_mc_addr(ha, dev) {
2235 bitnr = hash_get_index(ha->addr);
2236 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2237 }
2238
2239 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2240 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2241}
2242
2243
2244static void macb_set_rx_mode(struct net_device *dev)
2245{
2246 unsigned long cfg;
2247 struct macb *bp = netdev_priv(dev);
2248
2249 cfg = macb_readl(bp, NCFGR);
2250
2251 if (dev->flags & IFF_PROMISC) {
2252
2253 cfg |= MACB_BIT(CAF);
2254
2255
2256 if (macb_is_gem(bp))
2257 cfg &= ~GEM_BIT(RXCOEN);
2258 } else {
2259
2260 cfg &= ~MACB_BIT(CAF);
2261
2262
2263 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2264 cfg |= GEM_BIT(RXCOEN);
2265 }
2266
2267 if (dev->flags & IFF_ALLMULTI) {
2268
2269 macb_or_gem_writel(bp, HRB, -1);
2270 macb_or_gem_writel(bp, HRT, -1);
2271 cfg |= MACB_BIT(NCFGR_MTI);
2272 } else if (!netdev_mc_empty(dev)) {
2273
2274 macb_sethashtable(dev);
2275 cfg |= MACB_BIT(NCFGR_MTI);
2276 } else if (dev->flags & (~IFF_ALLMULTI)) {
2277
2278 macb_or_gem_writel(bp, HRB, 0);
2279 macb_or_gem_writel(bp, HRT, 0);
2280 cfg &= ~MACB_BIT(NCFGR_MTI);
2281 }
2282
2283 macb_writel(bp, NCFGR, cfg);
2284}
2285
2286static int macb_open(struct net_device *dev)
2287{
2288 struct macb *bp = netdev_priv(dev);
2289 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2290 struct macb_queue *queue;
2291 unsigned int q;
2292 int err;
2293
2294 netdev_dbg(bp->dev, "open\n");
2295
2296
2297 netif_carrier_off(dev);
2298
2299
2300 if (!dev->phydev)
2301 return -EAGAIN;
2302
2303
2304 macb_init_rx_buffer_size(bp, bufsz);
2305
2306 err = macb_alloc_consistent(bp);
2307 if (err) {
2308 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2309 err);
2310 return err;
2311 }
2312
2313 bp->macbgem_ops.mog_init_rings(bp);
2314 macb_init_hw(bp);
2315
2316 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2317 napi_enable(&queue->napi);
2318
2319
2320 phy_start(dev->phydev);
2321
2322 netif_tx_start_all_queues(dev);
2323
2324 if (bp->ptp_info)
2325 bp->ptp_info->ptp_init(dev);
2326
2327 return 0;
2328}
2329
2330static int macb_close(struct net_device *dev)
2331{
2332 struct macb *bp = netdev_priv(dev);
2333 struct macb_queue *queue;
2334 unsigned long flags;
2335 unsigned int q;
2336
2337 netif_tx_stop_all_queues(dev);
2338
2339 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2340 napi_disable(&queue->napi);
2341
2342 if (dev->phydev)
2343 phy_stop(dev->phydev);
2344
2345 spin_lock_irqsave(&bp->lock, flags);
2346 macb_reset_hw(bp);
2347 netif_carrier_off(dev);
2348 spin_unlock_irqrestore(&bp->lock, flags);
2349
2350 macb_free_consistent(bp);
2351
2352 if (bp->ptp_info)
2353 bp->ptp_info->ptp_remove(dev);
2354
2355 return 0;
2356}
2357
2358static int macb_change_mtu(struct net_device *dev, int new_mtu)
2359{
2360 if (netif_running(dev))
2361 return -EBUSY;
2362
2363 dev->mtu = new_mtu;
2364
2365 return 0;
2366}
2367
2368static void gem_update_stats(struct macb *bp)
2369{
2370 struct macb_queue *queue;
2371 unsigned int i, q, idx;
2372 unsigned long *stat;
2373
2374 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2375
2376 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2377 u32 offset = gem_statistics[i].offset;
2378 u64 val = bp->macb_reg_readl(bp, offset);
2379
2380 bp->ethtool_stats[i] += val;
2381 *p += val;
2382
2383 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2384
2385 val = bp->macb_reg_readl(bp, offset + 4);
2386 bp->ethtool_stats[i] += ((u64)val) << 32;
2387 *(++p) += val;
2388 }
2389 }
2390
2391 idx = GEM_STATS_LEN;
2392 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2393 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2394 bp->ethtool_stats[idx++] = *stat;
2395}
2396
2397static struct net_device_stats *gem_get_stats(struct macb *bp)
2398{
2399 struct gem_stats *hwstat = &bp->hw_stats.gem;
2400 struct net_device_stats *nstat = &bp->dev->stats;
2401
2402 gem_update_stats(bp);
2403
2404 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2405 hwstat->rx_alignment_errors +
2406 hwstat->rx_resource_errors +
2407 hwstat->rx_overruns +
2408 hwstat->rx_oversize_frames +
2409 hwstat->rx_jabbers +
2410 hwstat->rx_undersized_frames +
2411 hwstat->rx_length_field_frame_errors);
2412 nstat->tx_errors = (hwstat->tx_late_collisions +
2413 hwstat->tx_excessive_collisions +
2414 hwstat->tx_underrun +
2415 hwstat->tx_carrier_sense_errors);
2416 nstat->multicast = hwstat->rx_multicast_frames;
2417 nstat->collisions = (hwstat->tx_single_collision_frames +
2418 hwstat->tx_multiple_collision_frames +
2419 hwstat->tx_excessive_collisions);
2420 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2421 hwstat->rx_jabbers +
2422 hwstat->rx_undersized_frames +
2423 hwstat->rx_length_field_frame_errors);
2424 nstat->rx_over_errors = hwstat->rx_resource_errors;
2425 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2426 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2427 nstat->rx_fifo_errors = hwstat->rx_overruns;
2428 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2429 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2430 nstat->tx_fifo_errors = hwstat->tx_underrun;
2431
2432 return nstat;
2433}
2434
2435static void gem_get_ethtool_stats(struct net_device *dev,
2436 struct ethtool_stats *stats, u64 *data)
2437{
2438 struct macb *bp;
2439
2440 bp = netdev_priv(dev);
2441 gem_update_stats(bp);
2442 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2443 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
2444}
2445
2446static int gem_get_sset_count(struct net_device *dev, int sset)
2447{
2448 struct macb *bp = netdev_priv(dev);
2449
2450 switch (sset) {
2451 case ETH_SS_STATS:
2452 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
2453 default:
2454 return -EOPNOTSUPP;
2455 }
2456}
2457
2458static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2459{
2460 char stat_string[ETH_GSTRING_LEN];
2461 struct macb *bp = netdev_priv(dev);
2462 struct macb_queue *queue;
2463 unsigned int i;
2464 unsigned int q;
2465
2466 switch (sset) {
2467 case ETH_SS_STATS:
2468 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2469 memcpy(p, gem_statistics[i].stat_string,
2470 ETH_GSTRING_LEN);
2471
2472 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2473 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2474 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2475 q, queue_statistics[i].stat_string);
2476 memcpy(p, stat_string, ETH_GSTRING_LEN);
2477 }
2478 }
2479 break;
2480 }
2481}
2482
2483static struct net_device_stats *macb_get_stats(struct net_device *dev)
2484{
2485 struct macb *bp = netdev_priv(dev);
2486 struct net_device_stats *nstat = &bp->dev->stats;
2487 struct macb_stats *hwstat = &bp->hw_stats.macb;
2488
2489 if (macb_is_gem(bp))
2490 return gem_get_stats(bp);
2491
2492
2493 macb_update_stats(bp);
2494
2495
2496 nstat->rx_errors = (hwstat->rx_fcs_errors +
2497 hwstat->rx_align_errors +
2498 hwstat->rx_resource_errors +
2499 hwstat->rx_overruns +
2500 hwstat->rx_oversize_pkts +
2501 hwstat->rx_jabbers +
2502 hwstat->rx_undersize_pkts +
2503 hwstat->rx_length_mismatch);
2504 nstat->tx_errors = (hwstat->tx_late_cols +
2505 hwstat->tx_excessive_cols +
2506 hwstat->tx_underruns +
2507 hwstat->tx_carrier_errors +
2508 hwstat->sqe_test_errors);
2509 nstat->collisions = (hwstat->tx_single_cols +
2510 hwstat->tx_multiple_cols +
2511 hwstat->tx_excessive_cols);
2512 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2513 hwstat->rx_jabbers +
2514 hwstat->rx_undersize_pkts +
2515 hwstat->rx_length_mismatch);
2516 nstat->rx_over_errors = hwstat->rx_resource_errors +
2517 hwstat->rx_overruns;
2518 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2519 nstat->rx_frame_errors = hwstat->rx_align_errors;
2520 nstat->rx_fifo_errors = hwstat->rx_overruns;
2521
2522 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2523 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2524 nstat->tx_fifo_errors = hwstat->tx_underruns;
2525
2526
2527 return nstat;
2528}
2529
2530static int macb_get_regs_len(struct net_device *netdev)
2531{
2532 return MACB_GREGS_NBR * sizeof(u32);
2533}
2534
2535static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2536 void *p)
2537{
2538 struct macb *bp = netdev_priv(dev);
2539 unsigned int tail, head;
2540 u32 *regs_buff = p;
2541
2542 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2543 | MACB_GREGS_VERSION;
2544
2545 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2546 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2547
2548 regs_buff[0] = macb_readl(bp, NCR);
2549 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2550 regs_buff[2] = macb_readl(bp, NSR);
2551 regs_buff[3] = macb_readl(bp, TSR);
2552 regs_buff[4] = macb_readl(bp, RBQP);
2553 regs_buff[5] = macb_readl(bp, TBQP);
2554 regs_buff[6] = macb_readl(bp, RSR);
2555 regs_buff[7] = macb_readl(bp, IMR);
2556
2557 regs_buff[8] = tail;
2558 regs_buff[9] = head;
2559 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2560 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2561
2562 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2563 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2564 if (macb_is_gem(bp))
2565 regs_buff[13] = gem_readl(bp, DMACFG);
2566}
2567
2568static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2569{
2570 struct macb *bp = netdev_priv(netdev);
2571
2572 wol->supported = 0;
2573 wol->wolopts = 0;
2574
2575 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2576 wol->supported = WAKE_MAGIC;
2577
2578 if (bp->wol & MACB_WOL_ENABLED)
2579 wol->wolopts |= WAKE_MAGIC;
2580 }
2581}
2582
2583static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2584{
2585 struct macb *bp = netdev_priv(netdev);
2586
2587 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2588 (wol->wolopts & ~WAKE_MAGIC))
2589 return -EOPNOTSUPP;
2590
2591 if (wol->wolopts & WAKE_MAGIC)
2592 bp->wol |= MACB_WOL_ENABLED;
2593 else
2594 bp->wol &= ~MACB_WOL_ENABLED;
2595
2596 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2597
2598 return 0;
2599}
2600
2601static void macb_get_ringparam(struct net_device *netdev,
2602 struct ethtool_ringparam *ring)
2603{
2604 struct macb *bp = netdev_priv(netdev);
2605
2606 ring->rx_max_pending = MAX_RX_RING_SIZE;
2607 ring->tx_max_pending = MAX_TX_RING_SIZE;
2608
2609 ring->rx_pending = bp->rx_ring_size;
2610 ring->tx_pending = bp->tx_ring_size;
2611}
2612
2613static int macb_set_ringparam(struct net_device *netdev,
2614 struct ethtool_ringparam *ring)
2615{
2616 struct macb *bp = netdev_priv(netdev);
2617 u32 new_rx_size, new_tx_size;
2618 unsigned int reset = 0;
2619
2620 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2621 return -EINVAL;
2622
2623 new_rx_size = clamp_t(u32, ring->rx_pending,
2624 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2625 new_rx_size = roundup_pow_of_two(new_rx_size);
2626
2627 new_tx_size = clamp_t(u32, ring->tx_pending,
2628 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2629 new_tx_size = roundup_pow_of_two(new_tx_size);
2630
2631 if ((new_tx_size == bp->tx_ring_size) &&
2632 (new_rx_size == bp->rx_ring_size)) {
2633
2634 return 0;
2635 }
2636
2637 if (netif_running(bp->dev)) {
2638 reset = 1;
2639 macb_close(bp->dev);
2640 }
2641
2642 bp->rx_ring_size = new_rx_size;
2643 bp->tx_ring_size = new_tx_size;
2644
2645 if (reset)
2646 macb_open(bp->dev);
2647
2648 return 0;
2649}
2650
2651#ifdef CONFIG_MACB_USE_HWSTAMP
2652static unsigned int gem_get_tsu_rate(struct macb *bp)
2653{
2654 struct clk *tsu_clk;
2655 unsigned int tsu_rate;
2656
2657 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2658 if (!IS_ERR(tsu_clk))
2659 tsu_rate = clk_get_rate(tsu_clk);
2660
2661 else if (!IS_ERR(bp->pclk)) {
2662 tsu_clk = bp->pclk;
2663 tsu_rate = clk_get_rate(tsu_clk);
2664 } else
2665 return -ENOTSUPP;
2666 return tsu_rate;
2667}
2668
2669static s32 gem_get_ptp_max_adj(void)
2670{
2671 return 64000000;
2672}
2673
2674static int gem_get_ts_info(struct net_device *dev,
2675 struct ethtool_ts_info *info)
2676{
2677 struct macb *bp = netdev_priv(dev);
2678
2679 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
2680 ethtool_op_get_ts_info(dev, info);
2681 return 0;
2682 }
2683
2684 info->so_timestamping =
2685 SOF_TIMESTAMPING_TX_SOFTWARE |
2686 SOF_TIMESTAMPING_RX_SOFTWARE |
2687 SOF_TIMESTAMPING_SOFTWARE |
2688 SOF_TIMESTAMPING_TX_HARDWARE |
2689 SOF_TIMESTAMPING_RX_HARDWARE |
2690 SOF_TIMESTAMPING_RAW_HARDWARE;
2691 info->tx_types =
2692 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
2693 (1 << HWTSTAMP_TX_OFF) |
2694 (1 << HWTSTAMP_TX_ON);
2695 info->rx_filters =
2696 (1 << HWTSTAMP_FILTER_NONE) |
2697 (1 << HWTSTAMP_FILTER_ALL);
2698
2699 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
2700
2701 return 0;
2702}
2703
2704static struct macb_ptp_info gem_ptp_info = {
2705 .ptp_init = gem_ptp_init,
2706 .ptp_remove = gem_ptp_remove,
2707 .get_ptp_max_adj = gem_get_ptp_max_adj,
2708 .get_tsu_rate = gem_get_tsu_rate,
2709 .get_ts_info = gem_get_ts_info,
2710 .get_hwtst = gem_get_hwtst,
2711 .set_hwtst = gem_set_hwtst,
2712};
2713#endif
2714
2715static int macb_get_ts_info(struct net_device *netdev,
2716 struct ethtool_ts_info *info)
2717{
2718 struct macb *bp = netdev_priv(netdev);
2719
2720 if (bp->ptp_info)
2721 return bp->ptp_info->get_ts_info(netdev, info);
2722
2723 return ethtool_op_get_ts_info(netdev, info);
2724}
2725
2726static void gem_enable_flow_filters(struct macb *bp, bool enable)
2727{
2728 struct ethtool_rx_fs_item *item;
2729 u32 t2_scr;
2730 int num_t2_scr;
2731
2732 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
2733
2734 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2735 struct ethtool_rx_flow_spec *fs = &item->fs;
2736 struct ethtool_tcpip4_spec *tp4sp_m;
2737
2738 if (fs->location >= num_t2_scr)
2739 continue;
2740
2741 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
2742
2743
2744 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
2745
2746
2747 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2748
2749 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
2750 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
2751 else
2752 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
2753
2754 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
2755 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
2756 else
2757 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
2758
2759 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
2760 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
2761 else
2762 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
2763
2764 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
2765 }
2766}
2767
2768static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
2769{
2770 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
2771 uint16_t index = fs->location;
2772 u32 w0, w1, t2_scr;
2773 bool cmp_a = false;
2774 bool cmp_b = false;
2775 bool cmp_c = false;
2776
2777 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
2778 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
2779
2780
2781 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
2782
2783 w0 = 0;
2784 w1 = 0;
2785 w0 = tp4sp_v->ip4src;
2786 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2787 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2788 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
2789 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
2790 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
2791 cmp_a = true;
2792 }
2793
2794
2795 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
2796
2797 w0 = 0;
2798 w1 = 0;
2799 w0 = tp4sp_v->ip4dst;
2800 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2801 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
2802 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
2803 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
2804 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
2805 cmp_b = true;
2806 }
2807
2808
2809 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
2810
2811 w0 = 0;
2812 w1 = 0;
2813 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
2814 if (tp4sp_m->psrc == tp4sp_m->pdst) {
2815 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
2816 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2817 w1 = GEM_BFINS(T2DISMSK, 1, w1);
2818 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2819 } else {
2820
2821 w1 = GEM_BFINS(T2DISMSK, 0, w1);
2822 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
2823 if (tp4sp_m->psrc == 0xFFFF) {
2824 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
2825 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
2826 } else {
2827 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
2828 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
2829 }
2830 }
2831 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
2832 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
2833 cmp_c = true;
2834 }
2835
2836 t2_scr = 0;
2837 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
2838 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
2839 if (cmp_a)
2840 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
2841 if (cmp_b)
2842 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
2843 if (cmp_c)
2844 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
2845 gem_writel_n(bp, SCRT2, index, t2_scr);
2846}
2847
2848static int gem_add_flow_filter(struct net_device *netdev,
2849 struct ethtool_rxnfc *cmd)
2850{
2851 struct macb *bp = netdev_priv(netdev);
2852 struct ethtool_rx_flow_spec *fs = &cmd->fs;
2853 struct ethtool_rx_fs_item *item, *newfs;
2854 unsigned long flags;
2855 int ret = -EINVAL;
2856 bool added = false;
2857
2858 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
2859 if (newfs == NULL)
2860 return -ENOMEM;
2861 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
2862
2863 netdev_dbg(netdev,
2864 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2865 fs->flow_type, (int)fs->ring_cookie, fs->location,
2866 htonl(fs->h_u.tcp_ip4_spec.ip4src),
2867 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2868 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
2869
2870 spin_lock_irqsave(&bp->rx_fs_lock, flags);
2871
2872
2873 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2874 if (item->fs.location > newfs->fs.location) {
2875 list_add_tail(&newfs->list, &item->list);
2876 added = true;
2877 break;
2878 } else if (item->fs.location == fs->location) {
2879 netdev_err(netdev, "Rule not added: location %d not free!\n",
2880 fs->location);
2881 ret = -EBUSY;
2882 goto err;
2883 }
2884 }
2885 if (!added)
2886 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
2887
2888 gem_prog_cmp_regs(bp, fs);
2889 bp->rx_fs_list.count++;
2890
2891 if (netdev->features & NETIF_F_NTUPLE)
2892 gem_enable_flow_filters(bp, 1);
2893
2894 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2895 return 0;
2896
2897err:
2898 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2899 kfree(newfs);
2900 return ret;
2901}
2902
2903static int gem_del_flow_filter(struct net_device *netdev,
2904 struct ethtool_rxnfc *cmd)
2905{
2906 struct macb *bp = netdev_priv(netdev);
2907 struct ethtool_rx_fs_item *item;
2908 struct ethtool_rx_flow_spec *fs;
2909 unsigned long flags;
2910
2911 spin_lock_irqsave(&bp->rx_fs_lock, flags);
2912
2913 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2914 if (item->fs.location == cmd->fs.location) {
2915
2916 fs = &(item->fs);
2917 netdev_dbg(netdev,
2918 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
2919 fs->flow_type, (int)fs->ring_cookie, fs->location,
2920 htonl(fs->h_u.tcp_ip4_spec.ip4src),
2921 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
2922 htons(fs->h_u.tcp_ip4_spec.psrc),
2923 htons(fs->h_u.tcp_ip4_spec.pdst));
2924
2925 gem_writel_n(bp, SCRT2, fs->location, 0);
2926
2927 list_del(&item->list);
2928 bp->rx_fs_list.count--;
2929 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2930 kfree(item);
2931 return 0;
2932 }
2933 }
2934
2935 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
2936 return -EINVAL;
2937}
2938
2939static int gem_get_flow_entry(struct net_device *netdev,
2940 struct ethtool_rxnfc *cmd)
2941{
2942 struct macb *bp = netdev_priv(netdev);
2943 struct ethtool_rx_fs_item *item;
2944
2945 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2946 if (item->fs.location == cmd->fs.location) {
2947 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
2948 return 0;
2949 }
2950 }
2951 return -EINVAL;
2952}
2953
2954static int gem_get_all_flow_entries(struct net_device *netdev,
2955 struct ethtool_rxnfc *cmd, u32 *rule_locs)
2956{
2957 struct macb *bp = netdev_priv(netdev);
2958 struct ethtool_rx_fs_item *item;
2959 uint32_t cnt = 0;
2960
2961 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
2962 if (cnt == cmd->rule_cnt)
2963 return -EMSGSIZE;
2964 rule_locs[cnt] = item->fs.location;
2965 cnt++;
2966 }
2967 cmd->data = bp->max_tuples;
2968 cmd->rule_cnt = cnt;
2969
2970 return 0;
2971}
2972
2973static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
2974 u32 *rule_locs)
2975{
2976 struct macb *bp = netdev_priv(netdev);
2977 int ret = 0;
2978
2979 switch (cmd->cmd) {
2980 case ETHTOOL_GRXRINGS:
2981 cmd->data = bp->num_queues;
2982 break;
2983 case ETHTOOL_GRXCLSRLCNT:
2984 cmd->rule_cnt = bp->rx_fs_list.count;
2985 break;
2986 case ETHTOOL_GRXCLSRULE:
2987 ret = gem_get_flow_entry(netdev, cmd);
2988 break;
2989 case ETHTOOL_GRXCLSRLALL:
2990 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
2991 break;
2992 default:
2993 netdev_err(netdev,
2994 "Command parameter %d is not supported\n", cmd->cmd);
2995 ret = -EOPNOTSUPP;
2996 }
2997
2998 return ret;
2999}
3000
3001static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3002{
3003 struct macb *bp = netdev_priv(netdev);
3004 int ret;
3005
3006 switch (cmd->cmd) {
3007 case ETHTOOL_SRXCLSRLINS:
3008 if ((cmd->fs.location >= bp->max_tuples)
3009 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3010 ret = -EINVAL;
3011 break;
3012 }
3013 ret = gem_add_flow_filter(netdev, cmd);
3014 break;
3015 case ETHTOOL_SRXCLSRLDEL:
3016 ret = gem_del_flow_filter(netdev, cmd);
3017 break;
3018 default:
3019 netdev_err(netdev,
3020 "Command parameter %d is not supported\n", cmd->cmd);
3021 ret = -EOPNOTSUPP;
3022 }
3023
3024 return ret;
3025}
3026
3027static const struct ethtool_ops macb_ethtool_ops = {
3028 .get_regs_len = macb_get_regs_len,
3029 .get_regs = macb_get_regs,
3030 .get_link = ethtool_op_get_link,
3031 .get_ts_info = ethtool_op_get_ts_info,
3032 .get_wol = macb_get_wol,
3033 .set_wol = macb_set_wol,
3034 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3035 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3036 .get_ringparam = macb_get_ringparam,
3037 .set_ringparam = macb_set_ringparam,
3038};
3039
3040static const struct ethtool_ops gem_ethtool_ops = {
3041 .get_regs_len = macb_get_regs_len,
3042 .get_regs = macb_get_regs,
3043 .get_link = ethtool_op_get_link,
3044 .get_ts_info = macb_get_ts_info,
3045 .get_ethtool_stats = gem_get_ethtool_stats,
3046 .get_strings = gem_get_ethtool_strings,
3047 .get_sset_count = gem_get_sset_count,
3048 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3049 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3050 .get_ringparam = macb_get_ringparam,
3051 .set_ringparam = macb_set_ringparam,
3052 .get_rxnfc = gem_get_rxnfc,
3053 .set_rxnfc = gem_set_rxnfc,
3054};
3055
3056static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3057{
3058 struct phy_device *phydev = dev->phydev;
3059 struct macb *bp = netdev_priv(dev);
3060
3061 if (!netif_running(dev))
3062 return -EINVAL;
3063
3064 if (!phydev)
3065 return -ENODEV;
3066
3067 if (!bp->ptp_info)
3068 return phy_mii_ioctl(phydev, rq, cmd);
3069
3070 switch (cmd) {
3071 case SIOCSHWTSTAMP:
3072 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3073 case SIOCGHWTSTAMP:
3074 return bp->ptp_info->get_hwtst(dev, rq);
3075 default:
3076 return phy_mii_ioctl(phydev, rq, cmd);
3077 }
3078}
3079
3080static int macb_set_features(struct net_device *netdev,
3081 netdev_features_t features)
3082{
3083 struct macb *bp = netdev_priv(netdev);
3084 netdev_features_t changed = features ^ netdev->features;
3085
3086
3087 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
3088 u32 dmacfg;
3089
3090 dmacfg = gem_readl(bp, DMACFG);
3091 if (features & NETIF_F_HW_CSUM)
3092 dmacfg |= GEM_BIT(TXCOEN);
3093 else
3094 dmacfg &= ~GEM_BIT(TXCOEN);
3095 gem_writel(bp, DMACFG, dmacfg);
3096 }
3097
3098
3099 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
3100 u32 netcfg;
3101
3102 netcfg = gem_readl(bp, NCFGR);
3103 if (features & NETIF_F_RXCSUM &&
3104 !(netdev->flags & IFF_PROMISC))
3105 netcfg |= GEM_BIT(RXCOEN);
3106 else
3107 netcfg &= ~GEM_BIT(RXCOEN);
3108 gem_writel(bp, NCFGR, netcfg);
3109 }
3110
3111
3112 if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
3113 bool turn_on = features & NETIF_F_NTUPLE;
3114
3115 gem_enable_flow_filters(bp, turn_on);
3116 }
3117 return 0;
3118}
3119
3120static const struct net_device_ops macb_netdev_ops = {
3121 .ndo_open = macb_open,
3122 .ndo_stop = macb_close,
3123 .ndo_start_xmit = macb_start_xmit,
3124 .ndo_set_rx_mode = macb_set_rx_mode,
3125 .ndo_get_stats = macb_get_stats,
3126 .ndo_do_ioctl = macb_ioctl,
3127 .ndo_validate_addr = eth_validate_addr,
3128 .ndo_change_mtu = macb_change_mtu,
3129 .ndo_set_mac_address = eth_mac_addr,
3130#ifdef CONFIG_NET_POLL_CONTROLLER
3131 .ndo_poll_controller = macb_poll_controller,
3132#endif
3133 .ndo_set_features = macb_set_features,
3134 .ndo_features_check = macb_features_check,
3135};
3136
3137
3138
3139
3140static void macb_configure_caps(struct macb *bp,
3141 const struct macb_config *dt_conf)
3142{
3143 u32 dcfg;
3144
3145 if (dt_conf)
3146 bp->caps = dt_conf->caps;
3147
3148 if (hw_is_gem(bp->regs, bp->native_io)) {
3149 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3150
3151 dcfg = gem_readl(bp, DCFG1);
3152 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3153 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3154 dcfg = gem_readl(bp, DCFG2);
3155 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3156 bp->caps |= MACB_CAPS_FIFO_MODE;
3157#ifdef CONFIG_MACB_USE_HWSTAMP
3158 if (gem_has_ptp(bp)) {
3159 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
3160 pr_err("GEM doesn't support hardware ptp.\n");
3161 else {
3162 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
3163 bp->ptp_info = &gem_ptp_info;
3164 }
3165 }
3166#endif
3167 }
3168
3169 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
3170}
3171
3172static void macb_probe_queues(void __iomem *mem,
3173 bool native_io,
3174 unsigned int *queue_mask,
3175 unsigned int *num_queues)
3176{
3177 unsigned int hw_q;
3178
3179 *queue_mask = 0x1;
3180 *num_queues = 1;
3181
3182
3183
3184
3185
3186
3187
3188 if (!hw_is_gem(mem, native_io))
3189 return;
3190
3191
3192 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
3193
3194 *queue_mask |= 0x1;
3195
3196 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
3197 if (*queue_mask & (1 << hw_q))
3198 (*num_queues)++;
3199}
3200
3201static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3202 struct clk **hclk, struct clk **tx_clk,
3203 struct clk **rx_clk)
3204{
3205 struct macb_platform_data *pdata;
3206 int err;
3207
3208 pdata = dev_get_platdata(&pdev->dev);
3209 if (pdata) {
3210 *pclk = pdata->pclk;
3211 *hclk = pdata->hclk;
3212 } else {
3213 *pclk = devm_clk_get(&pdev->dev, "pclk");
3214 *hclk = devm_clk_get(&pdev->dev, "hclk");
3215 }
3216
3217 if (IS_ERR(*pclk)) {
3218 err = PTR_ERR(*pclk);
3219 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3220 return err;
3221 }
3222
3223 if (IS_ERR(*hclk)) {
3224 err = PTR_ERR(*hclk);
3225 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3226 return err;
3227 }
3228
3229 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
3230 if (IS_ERR(*tx_clk))
3231 *tx_clk = NULL;
3232
3233 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
3234 if (IS_ERR(*rx_clk))
3235 *rx_clk = NULL;
3236
3237 err = clk_prepare_enable(*pclk);
3238 if (err) {
3239 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3240 return err;
3241 }
3242
3243 err = clk_prepare_enable(*hclk);
3244 if (err) {
3245 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
3246 goto err_disable_pclk;
3247 }
3248
3249 err = clk_prepare_enable(*tx_clk);
3250 if (err) {
3251 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
3252 goto err_disable_hclk;
3253 }
3254
3255 err = clk_prepare_enable(*rx_clk);
3256 if (err) {
3257 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
3258 goto err_disable_txclk;
3259 }
3260
3261 return 0;
3262
3263err_disable_txclk:
3264 clk_disable_unprepare(*tx_clk);
3265
3266err_disable_hclk:
3267 clk_disable_unprepare(*hclk);
3268
3269err_disable_pclk:
3270 clk_disable_unprepare(*pclk);
3271
3272 return err;
3273}
3274
3275static int macb_init(struct platform_device *pdev)
3276{
3277 struct net_device *dev = platform_get_drvdata(pdev);
3278 unsigned int hw_q, q;
3279 struct macb *bp = netdev_priv(dev);
3280 struct macb_queue *queue;
3281 int err;
3282 u32 val, reg;
3283
3284 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3285 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3286
3287
3288
3289
3290
3291 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
3292 if (!(bp->queue_mask & (1 << hw_q)))
3293 continue;
3294
3295 queue = &bp->queues[q];
3296 queue->bp = bp;
3297 netif_napi_add(dev, &queue->napi, macb_poll, 64);
3298 if (hw_q) {
3299 queue->ISR = GEM_ISR(hw_q - 1);
3300 queue->IER = GEM_IER(hw_q - 1);
3301 queue->IDR = GEM_IDR(hw_q - 1);
3302 queue->IMR = GEM_IMR(hw_q - 1);
3303 queue->TBQP = GEM_TBQP(hw_q - 1);
3304 queue->RBQP = GEM_RBQP(hw_q - 1);
3305 queue->RBQS = GEM_RBQS(hw_q - 1);
3306#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3307 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3308 queue->TBQPH = GEM_TBQPH(hw_q - 1);
3309 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3310 }
3311#endif
3312 } else {
3313
3314 queue->ISR = MACB_ISR;
3315 queue->IER = MACB_IER;
3316 queue->IDR = MACB_IDR;
3317 queue->IMR = MACB_IMR;
3318 queue->TBQP = MACB_TBQP;
3319 queue->RBQP = MACB_RBQP;
3320#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3321 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
3322 queue->TBQPH = MACB_TBQPH;
3323 queue->RBQPH = MACB_RBQPH;
3324 }
3325#endif
3326 }
3327
3328
3329
3330
3331
3332
3333 queue->irq = platform_get_irq(pdev, q);
3334 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
3335 IRQF_SHARED, dev->name, queue);
3336 if (err) {
3337 dev_err(&pdev->dev,
3338 "Unable to request IRQ %d (error %d)\n",
3339 queue->irq, err);
3340 return err;
3341 }
3342
3343 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
3344 q++;
3345 }
3346
3347 dev->netdev_ops = &macb_netdev_ops;
3348
3349
3350 if (macb_is_gem(bp)) {
3351 bp->max_tx_length = GEM_MAX_TX_LEN;
3352 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3353 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3354 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3355 bp->macbgem_ops.mog_rx = gem_rx;
3356 dev->ethtool_ops = &gem_ethtool_ops;
3357 } else {
3358 bp->max_tx_length = MACB_MAX_TX_LEN;
3359 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3360 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3361 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3362 bp->macbgem_ops.mog_rx = macb_rx;
3363 dev->ethtool_ops = &macb_ethtool_ops;
3364 }
3365
3366
3367 dev->hw_features = NETIF_F_SG;
3368
3369
3370 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3371 dev->hw_features |= MACB_NETIF_LSO;
3372
3373
3374 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
3375 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
3376 if (bp->caps & MACB_CAPS_SG_DISABLED)
3377 dev->hw_features &= ~NETIF_F_SG;
3378 dev->features = dev->hw_features;
3379
3380
3381
3382
3383
3384 reg = gem_readl(bp, DCFG8);
3385 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3386 GEM_BFEXT(T2SCR, reg));
3387 if (bp->max_tuples > 0) {
3388
3389 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3390
3391 reg = 0;
3392 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3393 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3394
3395 dev->hw_features |= NETIF_F_NTUPLE;
3396
3397 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3398 bp->rx_fs_list.count = 0;
3399 spin_lock_init(&bp->rx_fs_lock);
3400 } else
3401 bp->max_tuples = 0;
3402 }
3403
3404 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3405 val = 0;
3406 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
3407 val = GEM_BIT(RGMII);
3408 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
3409 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3410 val = MACB_BIT(RMII);
3411 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
3412 val = MACB_BIT(MII);
3413
3414 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3415 val |= MACB_BIT(CLKEN);
3416
3417 macb_or_gem_writel(bp, USRIO, val);
3418 }
3419
3420
3421 val = macb_mdc_clk_div(bp);
3422 val |= macb_dbw(bp);
3423 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3424 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
3425 macb_writel(bp, NCFGR, val);
3426
3427 return 0;
3428}
3429
3430#if defined(CONFIG_OF)
3431
3432#define AT91ETHER_MAX_RBUFF_SZ 0x600
3433
3434#define AT91ETHER_MAX_RX_DESCR 9
3435
3436
3437static int at91ether_start(struct net_device *dev)
3438{
3439 struct macb *lp = netdev_priv(dev);
3440 struct macb_queue *q = &lp->queues[0];
3441 struct macb_dma_desc *desc;
3442 dma_addr_t addr;
3443 u32 ctl;
3444 int i;
3445
3446 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
3447 (AT91ETHER_MAX_RX_DESCR *
3448 macb_dma_desc_get_size(lp)),
3449 &q->rx_ring_dma, GFP_KERNEL);
3450 if (!q->rx_ring)
3451 return -ENOMEM;
3452
3453 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
3454 AT91ETHER_MAX_RX_DESCR *
3455 AT91ETHER_MAX_RBUFF_SZ,
3456 &q->rx_buffers_dma, GFP_KERNEL);
3457 if (!q->rx_buffers) {
3458 dma_free_coherent(&lp->pdev->dev,
3459 AT91ETHER_MAX_RX_DESCR *
3460 macb_dma_desc_get_size(lp),
3461 q->rx_ring, q->rx_ring_dma);
3462 q->rx_ring = NULL;
3463 return -ENOMEM;
3464 }
3465
3466 addr = q->rx_buffers_dma;
3467 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
3468 desc = macb_rx_desc(q, i);
3469 macb_set_addr(lp, desc, addr);
3470 desc->ctrl = 0;
3471 addr += AT91ETHER_MAX_RBUFF_SZ;
3472 }
3473
3474
3475 desc->addr |= MACB_BIT(RX_WRAP);
3476
3477
3478 q->rx_tail = 0;
3479
3480
3481 macb_writel(lp, RBQP, q->rx_ring_dma);
3482
3483
3484 ctl = macb_readl(lp, NCR);
3485 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3486
3487 return 0;
3488}
3489
3490
3491static int at91ether_open(struct net_device *dev)
3492{
3493 struct macb *lp = netdev_priv(dev);
3494 u32 ctl;
3495 int ret;
3496
3497
3498 ctl = macb_readl(lp, NCR);
3499 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3500
3501 macb_set_hwaddr(lp);
3502
3503 ret = at91ether_start(dev);
3504 if (ret)
3505 return ret;
3506
3507
3508 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3509 MACB_BIT(RXUBR) |
3510 MACB_BIT(ISR_TUND) |
3511 MACB_BIT(ISR_RLE) |
3512 MACB_BIT(TCOMP) |
3513 MACB_BIT(ISR_ROVR) |
3514 MACB_BIT(HRESP));
3515
3516
3517 phy_start(dev->phydev);
3518
3519 netif_start_queue(dev);
3520
3521 return 0;
3522}
3523
3524
3525static int at91ether_close(struct net_device *dev)
3526{
3527 struct macb *lp = netdev_priv(dev);
3528 struct macb_queue *q = &lp->queues[0];
3529 u32 ctl;
3530
3531
3532 ctl = macb_readl(lp, NCR);
3533 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3534
3535
3536 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3537 MACB_BIT(RXUBR) |
3538 MACB_BIT(ISR_TUND) |
3539 MACB_BIT(ISR_RLE) |
3540 MACB_BIT(TCOMP) |
3541 MACB_BIT(ISR_ROVR) |
3542 MACB_BIT(HRESP));
3543
3544 netif_stop_queue(dev);
3545
3546 dma_free_coherent(&lp->pdev->dev,
3547 AT91ETHER_MAX_RX_DESCR *
3548 macb_dma_desc_get_size(lp),
3549 q->rx_ring, q->rx_ring_dma);
3550 q->rx_ring = NULL;
3551
3552 dma_free_coherent(&lp->pdev->dev,
3553 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
3554 q->rx_buffers, q->rx_buffers_dma);
3555 q->rx_buffers = NULL;
3556
3557 return 0;
3558}
3559
3560
3561static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
3562{
3563 struct macb *lp = netdev_priv(dev);
3564
3565 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3566 netif_stop_queue(dev);
3567
3568
3569 lp->skb = skb;
3570 lp->skb_length = skb->len;
3571 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
3572 DMA_TO_DEVICE);
3573 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
3574 dev_kfree_skb_any(skb);
3575 dev->stats.tx_dropped++;
3576 netdev_err(dev, "%s: DMA mapping error\n", __func__);
3577 return NETDEV_TX_OK;
3578 }
3579
3580
3581 macb_writel(lp, TAR, lp->skb_physaddr);
3582
3583 macb_writel(lp, TCR, skb->len);
3584
3585 } else {
3586 netdev_err(dev, "%s called, but device is busy!\n", __func__);
3587 return NETDEV_TX_BUSY;
3588 }
3589
3590 return NETDEV_TX_OK;
3591}
3592
3593
3594
3595
3596static void at91ether_rx(struct net_device *dev)
3597{
3598 struct macb *lp = netdev_priv(dev);
3599 struct macb_queue *q = &lp->queues[0];
3600 struct macb_dma_desc *desc;
3601 unsigned char *p_recv;
3602 struct sk_buff *skb;
3603 unsigned int pktlen;
3604
3605 desc = macb_rx_desc(q, q->rx_tail);
3606 while (desc->addr & MACB_BIT(RX_USED)) {
3607 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
3608 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
3609 skb = netdev_alloc_skb(dev, pktlen + 2);
3610 if (skb) {
3611 skb_reserve(skb, 2);
3612 skb_put_data(skb, p_recv, pktlen);
3613
3614 skb->protocol = eth_type_trans(skb, dev);
3615 dev->stats.rx_packets++;
3616 dev->stats.rx_bytes += pktlen;
3617 netif_rx(skb);
3618 } else {
3619 dev->stats.rx_dropped++;
3620 }
3621
3622 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3623 dev->stats.multicast++;
3624
3625
3626 desc->addr &= ~MACB_BIT(RX_USED);
3627
3628
3629 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3630 q->rx_tail = 0;
3631 else
3632 q->rx_tail++;
3633
3634 desc = macb_rx_desc(q, q->rx_tail);
3635 }
3636}
3637
3638
3639static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3640{
3641 struct net_device *dev = dev_id;
3642 struct macb *lp = netdev_priv(dev);
3643 u32 intstatus, ctl;
3644
3645
3646
3647
3648 intstatus = macb_readl(lp, ISR);
3649
3650
3651 if (intstatus & MACB_BIT(RCOMP))
3652 at91ether_rx(dev);
3653
3654
3655 if (intstatus & MACB_BIT(TCOMP)) {
3656
3657 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3658 dev->stats.tx_errors++;
3659
3660 if (lp->skb) {
3661 dev_kfree_skb_irq(lp->skb);
3662 lp->skb = NULL;
3663 dma_unmap_single(NULL, lp->skb_physaddr,
3664 lp->skb_length, DMA_TO_DEVICE);
3665 dev->stats.tx_packets++;
3666 dev->stats.tx_bytes += lp->skb_length;
3667 }
3668 netif_wake_queue(dev);
3669 }
3670
3671
3672 if (intstatus & MACB_BIT(RXUBR)) {
3673 ctl = macb_readl(lp, NCR);
3674 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3675 wmb();
3676 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3677 }
3678
3679 if (intstatus & MACB_BIT(ISR_ROVR))
3680 netdev_err(dev, "ROVR error\n");
3681
3682 return IRQ_HANDLED;
3683}
3684
3685#ifdef CONFIG_NET_POLL_CONTROLLER
3686static void at91ether_poll_controller(struct net_device *dev)
3687{
3688 unsigned long flags;
3689
3690 local_irq_save(flags);
3691 at91ether_interrupt(dev->irq, dev);
3692 local_irq_restore(flags);
3693}
3694#endif
3695
3696static const struct net_device_ops at91ether_netdev_ops = {
3697 .ndo_open = at91ether_open,
3698 .ndo_stop = at91ether_close,
3699 .ndo_start_xmit = at91ether_start_xmit,
3700 .ndo_get_stats = macb_get_stats,
3701 .ndo_set_rx_mode = macb_set_rx_mode,
3702 .ndo_set_mac_address = eth_mac_addr,
3703 .ndo_do_ioctl = macb_ioctl,
3704 .ndo_validate_addr = eth_validate_addr,
3705#ifdef CONFIG_NET_POLL_CONTROLLER
3706 .ndo_poll_controller = at91ether_poll_controller,
3707#endif
3708};
3709
3710static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3711 struct clk **hclk, struct clk **tx_clk,
3712 struct clk **rx_clk)
3713{
3714 int err;
3715
3716 *hclk = NULL;
3717 *tx_clk = NULL;
3718 *rx_clk = NULL;
3719
3720 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3721 if (IS_ERR(*pclk))
3722 return PTR_ERR(*pclk);
3723
3724 err = clk_prepare_enable(*pclk);
3725 if (err) {
3726 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3727 return err;
3728 }
3729
3730 return 0;
3731}
3732
3733static int at91ether_init(struct platform_device *pdev)
3734{
3735 struct net_device *dev = platform_get_drvdata(pdev);
3736 struct macb *bp = netdev_priv(dev);
3737 int err;
3738 u32 reg;
3739
3740 bp->queues[0].bp = bp;
3741
3742 dev->netdev_ops = &at91ether_netdev_ops;
3743 dev->ethtool_ops = &macb_ethtool_ops;
3744
3745 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3746 0, dev->name, dev);
3747 if (err)
3748 return err;
3749
3750 macb_writel(bp, NCR, 0);
3751
3752 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3753 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3754 reg |= MACB_BIT(RM9200_RMII);
3755
3756 macb_writel(bp, NCFGR, reg);
3757
3758 return 0;
3759}
3760
3761static const struct macb_config at91sam9260_config = {
3762 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3763 .clk_init = macb_clk_init,
3764 .init = macb_init,
3765};
3766
3767static const struct macb_config pc302gem_config = {
3768 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3769 .dma_burst_length = 16,
3770 .clk_init = macb_clk_init,
3771 .init = macb_init,
3772};
3773
3774static const struct macb_config sama5d2_config = {
3775 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3776 .dma_burst_length = 16,
3777 .clk_init = macb_clk_init,
3778 .init = macb_init,
3779};
3780
3781static const struct macb_config sama5d3_config = {
3782 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3783 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
3784 .dma_burst_length = 16,
3785 .clk_init = macb_clk_init,
3786 .init = macb_init,
3787 .jumbo_max_len = 10240,
3788};
3789
3790static const struct macb_config sama5d4_config = {
3791 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3792 .dma_burst_length = 4,
3793 .clk_init = macb_clk_init,
3794 .init = macb_init,
3795};
3796
3797static const struct macb_config emac_config = {
3798 .clk_init = at91ether_clk_init,
3799 .init = at91ether_init,
3800};
3801
3802static const struct macb_config np4_config = {
3803 .caps = MACB_CAPS_USRIO_DISABLED,
3804 .clk_init = macb_clk_init,
3805 .init = macb_init,
3806};
3807
3808static const struct macb_config zynqmp_config = {
3809 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3810 MACB_CAPS_JUMBO |
3811 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
3812 .dma_burst_length = 16,
3813 .clk_init = macb_clk_init,
3814 .init = macb_init,
3815 .jumbo_max_len = 10240,
3816};
3817
3818static const struct macb_config zynq_config = {
3819 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3820 .dma_burst_length = 16,
3821 .clk_init = macb_clk_init,
3822 .init = macb_init,
3823};
3824
3825static const struct of_device_id macb_dt_ids[] = {
3826 { .compatible = "cdns,at32ap7000-macb" },
3827 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3828 { .compatible = "cdns,macb" },
3829 { .compatible = "cdns,np4-macb", .data = &np4_config },
3830 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3831 { .compatible = "cdns,gem", .data = &pc302gem_config },
3832 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3833 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3834 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3835 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3836 { .compatible = "cdns,emac", .data = &emac_config },
3837 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3838 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
3839 { }
3840};
3841MODULE_DEVICE_TABLE(of, macb_dt_ids);
3842#endif
3843
3844static const struct macb_config default_gem_config = {
3845 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3846 MACB_CAPS_JUMBO |
3847 MACB_CAPS_GEM_HAS_PTP,
3848 .dma_burst_length = 16,
3849 .clk_init = macb_clk_init,
3850 .init = macb_init,
3851 .jumbo_max_len = 10240,
3852};
3853
3854static int macb_probe(struct platform_device *pdev)
3855{
3856 const struct macb_config *macb_config = &default_gem_config;
3857 int (*clk_init)(struct platform_device *, struct clk **,
3858 struct clk **, struct clk **, struct clk **)
3859 = macb_config->clk_init;
3860 int (*init)(struct platform_device *) = macb_config->init;
3861 struct device_node *np = pdev->dev.of_node;
3862 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3863 unsigned int queue_mask, num_queues;
3864 struct macb_platform_data *pdata;
3865 bool native_io;
3866 struct phy_device *phydev;
3867 struct net_device *dev;
3868 struct resource *regs;
3869 void __iomem *mem;
3870 const char *mac;
3871 struct macb *bp;
3872 int err, val;
3873
3874 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3875 mem = devm_ioremap_resource(&pdev->dev, regs);
3876 if (IS_ERR(mem))
3877 return PTR_ERR(mem);
3878
3879 if (np) {
3880 const struct of_device_id *match;
3881
3882 match = of_match_node(macb_dt_ids, np);
3883 if (match && match->data) {
3884 macb_config = match->data;
3885 clk_init = macb_config->clk_init;
3886 init = macb_config->init;
3887 }
3888 }
3889
3890 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
3891 if (err)
3892 return err;
3893
3894 native_io = hw_is_native_io(mem);
3895
3896 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3897 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3898 if (!dev) {
3899 err = -ENOMEM;
3900 goto err_disable_clocks;
3901 }
3902
3903 dev->base_addr = regs->start;
3904
3905 SET_NETDEV_DEV(dev, &pdev->dev);
3906
3907 bp = netdev_priv(dev);
3908 bp->pdev = pdev;
3909 bp->dev = dev;
3910 bp->regs = mem;
3911 bp->native_io = native_io;
3912 if (native_io) {
3913 bp->macb_reg_readl = hw_readl_native;
3914 bp->macb_reg_writel = hw_writel_native;
3915 } else {
3916 bp->macb_reg_readl = hw_readl;
3917 bp->macb_reg_writel = hw_writel;
3918 }
3919 bp->num_queues = num_queues;
3920 bp->queue_mask = queue_mask;
3921 if (macb_config)
3922 bp->dma_burst_length = macb_config->dma_burst_length;
3923 bp->pclk = pclk;
3924 bp->hclk = hclk;
3925 bp->tx_clk = tx_clk;
3926 bp->rx_clk = rx_clk;
3927 if (macb_config)
3928 bp->jumbo_max_len = macb_config->jumbo_max_len;
3929
3930 bp->wol = 0;
3931 if (of_get_property(np, "magic-packet", NULL))
3932 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3933 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3934
3935 spin_lock_init(&bp->lock);
3936
3937
3938 macb_configure_caps(bp, macb_config);
3939
3940#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3941 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3942 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3943 bp->hw_dma_cap |= HW_DMA_CAP_64B;
3944 }
3945#endif
3946 platform_set_drvdata(pdev, dev);
3947
3948 dev->irq = platform_get_irq(pdev, 0);
3949 if (dev->irq < 0) {
3950 err = dev->irq;
3951 goto err_out_free_netdev;
3952 }
3953
3954
3955 dev->min_mtu = GEM_MTU_MIN_SIZE;
3956 if (bp->caps & MACB_CAPS_JUMBO)
3957 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3958 else
3959 dev->max_mtu = ETH_DATA_LEN;
3960
3961 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
3962 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
3963 if (val)
3964 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
3965 macb_dma_desc_get_size(bp);
3966
3967 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
3968 if (val)
3969 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
3970 macb_dma_desc_get_size(bp);
3971 }
3972
3973 mac = of_get_mac_address(np);
3974 if (mac) {
3975 ether_addr_copy(bp->dev->dev_addr, mac);
3976 } else {
3977 err = of_get_nvmem_mac_address(np, bp->dev->dev_addr);
3978 if (err) {
3979 if (err == -EPROBE_DEFER)
3980 goto err_out_free_netdev;
3981 macb_get_hwaddr(bp);
3982 }
3983 }
3984
3985 err = of_get_phy_mode(np);
3986 if (err < 0) {
3987 pdata = dev_get_platdata(&pdev->dev);
3988 if (pdata && pdata->is_rmii)
3989 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3990 else
3991 bp->phy_interface = PHY_INTERFACE_MODE_MII;
3992 } else {
3993 bp->phy_interface = err;
3994 }
3995
3996
3997 err = init(pdev);
3998 if (err)
3999 goto err_out_free_netdev;
4000
4001 err = macb_mii_init(bp);
4002 if (err)
4003 goto err_out_free_netdev;
4004
4005 phydev = dev->phydev;
4006
4007 netif_carrier_off(dev);
4008
4009 err = register_netdev(dev);
4010 if (err) {
4011 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
4012 goto err_out_unregister_mdio;
4013 }
4014
4015 tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
4016 (unsigned long)bp);
4017
4018 phy_attached_info(phydev);
4019
4020 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4021 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4022 dev->base_addr, dev->irq, dev->dev_addr);
4023
4024 return 0;
4025
4026err_out_unregister_mdio:
4027 phy_disconnect(dev->phydev);
4028 mdiobus_unregister(bp->mii_bus);
4029 of_node_put(bp->phy_node);
4030 if (np && of_phy_is_fixed_link(np))
4031 of_phy_deregister_fixed_link(np);
4032 mdiobus_free(bp->mii_bus);
4033
4034err_out_free_netdev:
4035 free_netdev(dev);
4036
4037err_disable_clocks:
4038 clk_disable_unprepare(tx_clk);
4039 clk_disable_unprepare(hclk);
4040 clk_disable_unprepare(pclk);
4041 clk_disable_unprepare(rx_clk);
4042
4043 return err;
4044}
4045
4046static int macb_remove(struct platform_device *pdev)
4047{
4048 struct net_device *dev;
4049 struct macb *bp;
4050 struct device_node *np = pdev->dev.of_node;
4051
4052 dev = platform_get_drvdata(pdev);
4053
4054 if (dev) {
4055 bp = netdev_priv(dev);
4056 if (dev->phydev)
4057 phy_disconnect(dev->phydev);
4058 mdiobus_unregister(bp->mii_bus);
4059 if (np && of_phy_is_fixed_link(np))
4060 of_phy_deregister_fixed_link(np);
4061 dev->phydev = NULL;
4062 mdiobus_free(bp->mii_bus);
4063
4064 unregister_netdev(dev);
4065 clk_disable_unprepare(bp->tx_clk);
4066 clk_disable_unprepare(bp->hclk);
4067 clk_disable_unprepare(bp->pclk);
4068 clk_disable_unprepare(bp->rx_clk);
4069 of_node_put(bp->phy_node);
4070 free_netdev(dev);
4071 }
4072
4073 return 0;
4074}
4075
4076static int __maybe_unused macb_suspend(struct device *dev)
4077{
4078 struct platform_device *pdev = to_platform_device(dev);
4079 struct net_device *netdev = platform_get_drvdata(pdev);
4080 struct macb *bp = netdev_priv(netdev);
4081
4082 netif_carrier_off(netdev);
4083 netif_device_detach(netdev);
4084
4085 if (bp->wol & MACB_WOL_ENABLED) {
4086 macb_writel(bp, IER, MACB_BIT(WOL));
4087 macb_writel(bp, WOL, MACB_BIT(MAG));
4088 enable_irq_wake(bp->queues[0].irq);
4089 } else {
4090 clk_disable_unprepare(bp->tx_clk);
4091 clk_disable_unprepare(bp->hclk);
4092 clk_disable_unprepare(bp->pclk);
4093 clk_disable_unprepare(bp->rx_clk);
4094 }
4095
4096 return 0;
4097}
4098
4099static int __maybe_unused macb_resume(struct device *dev)
4100{
4101 struct platform_device *pdev = to_platform_device(dev);
4102 struct net_device *netdev = platform_get_drvdata(pdev);
4103 struct macb *bp = netdev_priv(netdev);
4104
4105 if (bp->wol & MACB_WOL_ENABLED) {
4106 macb_writel(bp, IDR, MACB_BIT(WOL));
4107 macb_writel(bp, WOL, 0);
4108 disable_irq_wake(bp->queues[0].irq);
4109 } else {
4110 clk_prepare_enable(bp->pclk);
4111 clk_prepare_enable(bp->hclk);
4112 clk_prepare_enable(bp->tx_clk);
4113 clk_prepare_enable(bp->rx_clk);
4114 }
4115
4116 netif_device_attach(netdev);
4117
4118 return 0;
4119}
4120
4121static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
4122
4123static struct platform_driver macb_driver = {
4124 .probe = macb_probe,
4125 .remove = macb_remove,
4126 .driver = {
4127 .name = "macb",
4128 .of_match_table = of_match_ptr(macb_dt_ids),
4129 .pm = &macb_pm_ops,
4130 },
4131};
4132
4133module_platform_driver(macb_driver);
4134
4135MODULE_LICENSE("GPL");
4136MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
4137MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
4138MODULE_ALIAS("platform:macb");
4139