1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/circ_buf.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22#include <linux/gpio/consumer.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/dma-mapping.h>
27#include <linux/platform_data/macb.h>
28#include <linux/platform_device.h>
29#include <linux/phy.h>
30#include <linux/of.h>
31#include <linux/of_device.h>
32#include <linux/of_gpio.h>
33#include <linux/of_mdio.h>
34#include <linux/of_net.h>
35#include <linux/ip.h>
36#include <linux/udp.h>
37#include <linux/tcp.h>
38#include "macb.h"
39
40#define MACB_RX_BUFFER_SIZE 128
41#define RX_BUFFER_MULTIPLE 64
42
43#define DEFAULT_RX_RING_SIZE 512
44#define MIN_RX_RING_SIZE 64
45#define MAX_RX_RING_SIZE 8192
46#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
47 * (bp)->rx_ring_size)
48
49#define DEFAULT_TX_RING_SIZE 512
50#define MIN_TX_RING_SIZE 64
51#define MAX_TX_RING_SIZE 4096
52#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
53 * (bp)->tx_ring_size)
54
55
56#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
57
58#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
59 | MACB_BIT(ISR_ROVR))
60#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
61 | MACB_BIT(ISR_RLE) \
62 | MACB_BIT(TXERR))
63#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
64
65
66#define MACB_TX_LEN_ALIGN 8
67#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
68#define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
69
70#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
71#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO)
72
73#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
74#define MACB_WOL_ENABLED (0x1 << 1)
75
76
77
78
79#define MACB_HALT_TIMEOUT 1230
80
81
82
83
84static unsigned int macb_dma_desc_get_size(struct macb *bp)
85{
86#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
87 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
88 return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
89#endif
90 return sizeof(struct macb_dma_desc);
91}
92
93static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
94{
95#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
96
97
98
99 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
100 idx <<= 1;
101#endif
102 return idx;
103}
104
105#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
106static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
107{
108 return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
109}
110#endif
111
112
113static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
114{
115 return index & (bp->tx_ring_size - 1);
116}
117
118static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
119 unsigned int index)
120{
121 index = macb_tx_ring_wrap(queue->bp, index);
122 index = macb_adj_dma_desc_idx(queue->bp, index);
123 return &queue->tx_ring[index];
124}
125
126static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
127 unsigned int index)
128{
129 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
130}
131
132static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
133{
134 dma_addr_t offset;
135
136 offset = macb_tx_ring_wrap(queue->bp, index) *
137 macb_dma_desc_get_size(queue->bp);
138
139 return queue->tx_ring_dma + offset;
140}
141
142static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
143{
144 return index & (bp->rx_ring_size - 1);
145}
146
147static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
148{
149 index = macb_rx_ring_wrap(bp, index);
150 index = macb_adj_dma_desc_idx(bp, index);
151 return &bp->rx_ring[index];
152}
153
154static void *macb_rx_buffer(struct macb *bp, unsigned int index)
155{
156 return bp->rx_buffers + bp->rx_buffer_size *
157 macb_rx_ring_wrap(bp, index);
158}
159
160
161static u32 hw_readl_native(struct macb *bp, int offset)
162{
163 return __raw_readl(bp->regs + offset);
164}
165
166static void hw_writel_native(struct macb *bp, int offset, u32 value)
167{
168 __raw_writel(value, bp->regs + offset);
169}
170
171static u32 hw_readl(struct macb *bp, int offset)
172{
173 return readl_relaxed(bp->regs + offset);
174}
175
176static void hw_writel(struct macb *bp, int offset, u32 value)
177{
178 writel_relaxed(value, bp->regs + offset);
179}
180
181
182
183
184
185static bool hw_is_native_io(void __iomem *addr)
186{
187 u32 value = MACB_BIT(LLB);
188
189 __raw_writel(value, addr + MACB_NCR);
190 value = __raw_readl(addr + MACB_NCR);
191
192
193 __raw_writel(0, addr + MACB_NCR);
194
195 return value == MACB_BIT(LLB);
196}
197
198static bool hw_is_gem(void __iomem *addr, bool native_io)
199{
200 u32 id;
201
202 if (native_io)
203 id = __raw_readl(addr + MACB_MID);
204 else
205 id = readl_relaxed(addr + MACB_MID);
206
207 return MACB_BFEXT(IDNUM, id) >= 0x2;
208}
209
210static void macb_set_hwaddr(struct macb *bp)
211{
212 u32 bottom;
213 u16 top;
214
215 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
216 macb_or_gem_writel(bp, SA1B, bottom);
217 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
218 macb_or_gem_writel(bp, SA1T, top);
219
220
221 macb_or_gem_writel(bp, SA2B, 0);
222 macb_or_gem_writel(bp, SA2T, 0);
223 macb_or_gem_writel(bp, SA3B, 0);
224 macb_or_gem_writel(bp, SA3T, 0);
225 macb_or_gem_writel(bp, SA4B, 0);
226 macb_or_gem_writel(bp, SA4T, 0);
227}
228
229static void macb_get_hwaddr(struct macb *bp)
230{
231 struct macb_platform_data *pdata;
232 u32 bottom;
233 u16 top;
234 u8 addr[6];
235 int i;
236
237 pdata = dev_get_platdata(&bp->pdev->dev);
238
239
240 for (i = 0; i < 4; i++) {
241 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
242 top = macb_or_gem_readl(bp, SA1T + i * 8);
243
244 if (pdata && pdata->rev_eth_addr) {
245 addr[5] = bottom & 0xff;
246 addr[4] = (bottom >> 8) & 0xff;
247 addr[3] = (bottom >> 16) & 0xff;
248 addr[2] = (bottom >> 24) & 0xff;
249 addr[1] = top & 0xff;
250 addr[0] = (top & 0xff00) >> 8;
251 } else {
252 addr[0] = bottom & 0xff;
253 addr[1] = (bottom >> 8) & 0xff;
254 addr[2] = (bottom >> 16) & 0xff;
255 addr[3] = (bottom >> 24) & 0xff;
256 addr[4] = top & 0xff;
257 addr[5] = (top >> 8) & 0xff;
258 }
259
260 if (is_valid_ether_addr(addr)) {
261 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
262 return;
263 }
264 }
265
266 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
267 eth_hw_addr_random(bp->dev);
268}
269
270static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
271{
272 struct macb *bp = bus->priv;
273 int value;
274
275 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
276 | MACB_BF(RW, MACB_MAN_READ)
277 | MACB_BF(PHYA, mii_id)
278 | MACB_BF(REGA, regnum)
279 | MACB_BF(CODE, MACB_MAN_CODE)));
280
281
282 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
283 cpu_relax();
284
285 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
286
287 return value;
288}
289
290static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
291 u16 value)
292{
293 struct macb *bp = bus->priv;
294
295 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
296 | MACB_BF(RW, MACB_MAN_WRITE)
297 | MACB_BF(PHYA, mii_id)
298 | MACB_BF(REGA, regnum)
299 | MACB_BF(CODE, MACB_MAN_CODE)
300 | MACB_BF(DATA, value)));
301
302
303 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
304 cpu_relax();
305
306 return 0;
307}
308
309
310
311
312
313
314
315static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
316{
317 long ferr, rate, rate_rounded;
318
319 if (!clk)
320 return;
321
322 switch (speed) {
323 case SPEED_10:
324 rate = 2500000;
325 break;
326 case SPEED_100:
327 rate = 25000000;
328 break;
329 case SPEED_1000:
330 rate = 125000000;
331 break;
332 default:
333 return;
334 }
335
336 rate_rounded = clk_round_rate(clk, rate);
337 if (rate_rounded < 0)
338 return;
339
340
341
342
343 ferr = abs(rate_rounded - rate);
344 ferr = DIV_ROUND_UP(ferr, rate / 100000);
345 if (ferr > 5)
346 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
347 rate);
348
349 if (clk_set_rate(clk, rate_rounded))
350 netdev_err(dev, "adjusting tx_clk failed.\n");
351}
352
353static void macb_handle_link_change(struct net_device *dev)
354{
355 struct macb *bp = netdev_priv(dev);
356 struct phy_device *phydev = dev->phydev;
357 unsigned long flags;
358 int status_change = 0;
359
360 spin_lock_irqsave(&bp->lock, flags);
361
362 if (phydev->link) {
363 if ((bp->speed != phydev->speed) ||
364 (bp->duplex != phydev->duplex)) {
365 u32 reg;
366
367 reg = macb_readl(bp, NCFGR);
368 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
369 if (macb_is_gem(bp))
370 reg &= ~GEM_BIT(GBE);
371
372 if (phydev->duplex)
373 reg |= MACB_BIT(FD);
374 if (phydev->speed == SPEED_100)
375 reg |= MACB_BIT(SPD);
376 if (phydev->speed == SPEED_1000 &&
377 bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
378 reg |= GEM_BIT(GBE);
379
380 macb_or_gem_writel(bp, NCFGR, reg);
381
382 bp->speed = phydev->speed;
383 bp->duplex = phydev->duplex;
384 status_change = 1;
385 }
386 }
387
388 if (phydev->link != bp->link) {
389 if (!phydev->link) {
390 bp->speed = 0;
391 bp->duplex = -1;
392 }
393 bp->link = phydev->link;
394
395 status_change = 1;
396 }
397
398 spin_unlock_irqrestore(&bp->lock, flags);
399
400 if (status_change) {
401 if (phydev->link) {
402
403
404
405 macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
406
407 netif_carrier_on(dev);
408 netdev_info(dev, "link up (%d/%s)\n",
409 phydev->speed,
410 phydev->duplex == DUPLEX_FULL ?
411 "Full" : "Half");
412 } else {
413 netif_carrier_off(dev);
414 netdev_info(dev, "link down\n");
415 }
416 }
417}
418
419
420static int macb_mii_probe(struct net_device *dev)
421{
422 struct macb *bp = netdev_priv(dev);
423 struct macb_platform_data *pdata;
424 struct phy_device *phydev;
425 int phy_irq;
426 int ret;
427
428 phydev = phy_find_first(bp->mii_bus);
429 if (!phydev) {
430 netdev_err(dev, "no PHY found\n");
431 return -ENXIO;
432 }
433
434 pdata = dev_get_platdata(&bp->pdev->dev);
435 if (pdata) {
436 if (gpio_is_valid(pdata->phy_irq_pin)) {
437 ret = devm_gpio_request(&bp->pdev->dev,
438 pdata->phy_irq_pin, "phy int");
439 if (!ret) {
440 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
441 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
442 }
443 } else {
444 phydev->irq = PHY_POLL;
445 }
446 }
447
448
449 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
450 bp->phy_interface);
451 if (ret) {
452 netdev_err(dev, "Could not attach to PHY\n");
453 return ret;
454 }
455
456
457 if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
458 phydev->supported &= PHY_GBIT_FEATURES;
459 else
460 phydev->supported &= PHY_BASIC_FEATURES;
461
462 if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
463 phydev->supported &= ~SUPPORTED_1000baseT_Half;
464
465 phydev->advertising = phydev->supported;
466
467 bp->link = 0;
468 bp->speed = 0;
469 bp->duplex = -1;
470
471 return 0;
472}
473
474static int macb_mii_init(struct macb *bp)
475{
476 struct macb_platform_data *pdata;
477 struct device_node *np;
478 int err = -ENXIO, i;
479
480
481 macb_writel(bp, NCR, MACB_BIT(MPE));
482
483 bp->mii_bus = mdiobus_alloc();
484 if (!bp->mii_bus) {
485 err = -ENOMEM;
486 goto err_out;
487 }
488
489 bp->mii_bus->name = "MACB_mii_bus";
490 bp->mii_bus->read = &macb_mdio_read;
491 bp->mii_bus->write = &macb_mdio_write;
492 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
493 bp->pdev->name, bp->pdev->id);
494 bp->mii_bus->priv = bp;
495 bp->mii_bus->parent = &bp->pdev->dev;
496 pdata = dev_get_platdata(&bp->pdev->dev);
497
498 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
499
500 np = bp->pdev->dev.of_node;
501 if (np) {
502
503 err = of_mdiobus_register(bp->mii_bus, np);
504
505
506
507
508 if (!err && !phy_find_first(bp->mii_bus)) {
509 for (i = 0; i < PHY_MAX_ADDR; i++) {
510 struct phy_device *phydev;
511
512 phydev = mdiobus_scan(bp->mii_bus, i);
513 if (IS_ERR(phydev) &&
514 PTR_ERR(phydev) != -ENODEV) {
515 err = PTR_ERR(phydev);
516 break;
517 }
518 }
519
520 if (err)
521 goto err_out_unregister_bus;
522 }
523 } else {
524 for (i = 0; i < PHY_MAX_ADDR; i++)
525 bp->mii_bus->irq[i] = PHY_POLL;
526
527 if (pdata)
528 bp->mii_bus->phy_mask = pdata->phy_mask;
529
530 err = mdiobus_register(bp->mii_bus);
531 }
532
533 if (err)
534 goto err_out_free_mdiobus;
535
536 err = macb_mii_probe(bp->dev);
537 if (err)
538 goto err_out_unregister_bus;
539
540 return 0;
541
542err_out_unregister_bus:
543 mdiobus_unregister(bp->mii_bus);
544err_out_free_mdiobus:
545 mdiobus_free(bp->mii_bus);
546err_out:
547 return err;
548}
549
550static void macb_update_stats(struct macb *bp)
551{
552 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
553 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
554 int offset = MACB_PFR;
555
556 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
557
558 for (; p < end; p++, offset += 4)
559 *p += bp->macb_reg_readl(bp, offset);
560}
561
562static int macb_halt_tx(struct macb *bp)
563{
564 unsigned long halt_time, timeout;
565 u32 status;
566
567 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
568
569 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
570 do {
571 halt_time = jiffies;
572 status = macb_readl(bp, TSR);
573 if (!(status & MACB_BIT(TGO)))
574 return 0;
575
576 usleep_range(10, 250);
577 } while (time_before(halt_time, timeout));
578
579 return -ETIMEDOUT;
580}
581
582static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
583{
584 if (tx_skb->mapping) {
585 if (tx_skb->mapped_as_page)
586 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
587 tx_skb->size, DMA_TO_DEVICE);
588 else
589 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
590 tx_skb->size, DMA_TO_DEVICE);
591 tx_skb->mapping = 0;
592 }
593
594 if (tx_skb->skb) {
595 dev_kfree_skb_any(tx_skb->skb);
596 tx_skb->skb = NULL;
597 }
598}
599
600static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
601{
602#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
603 struct macb_dma_desc_64 *desc_64;
604
605 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
606 desc_64 = macb_64b_desc(bp, desc);
607 desc_64->addrh = upper_32_bits(addr);
608 }
609#endif
610 desc->addr = lower_32_bits(addr);
611}
612
613static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
614{
615 dma_addr_t addr = 0;
616#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
617 struct macb_dma_desc_64 *desc_64;
618
619 if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
620 desc_64 = macb_64b_desc(bp, desc);
621 addr = ((u64)(desc_64->addrh) << 32);
622 }
623#endif
624 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
625 return addr;
626}
627
628static void macb_tx_error_task(struct work_struct *work)
629{
630 struct macb_queue *queue = container_of(work, struct macb_queue,
631 tx_error_task);
632 struct macb *bp = queue->bp;
633 struct macb_tx_skb *tx_skb;
634 struct macb_dma_desc *desc;
635 struct sk_buff *skb;
636 unsigned int tail;
637 unsigned long flags;
638
639 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
640 (unsigned int)(queue - bp->queues),
641 queue->tx_tail, queue->tx_head);
642
643
644
645
646
647
648
649 spin_lock_irqsave(&bp->lock, flags);
650
651
652 netif_tx_stop_all_queues(bp->dev);
653
654
655
656
657
658 if (macb_halt_tx(bp))
659
660 netdev_err(bp->dev, "BUG: halt tx timed out\n");
661
662
663
664
665 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
666 u32 ctrl;
667
668 desc = macb_tx_desc(queue, tail);
669 ctrl = desc->ctrl;
670 tx_skb = macb_tx_skb(queue, tail);
671 skb = tx_skb->skb;
672
673 if (ctrl & MACB_BIT(TX_USED)) {
674
675 while (!skb) {
676 macb_tx_unmap(bp, tx_skb);
677 tail++;
678 tx_skb = macb_tx_skb(queue, tail);
679 skb = tx_skb->skb;
680 }
681
682
683
684
685 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
686 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
687 macb_tx_ring_wrap(bp, tail),
688 skb->data);
689 bp->dev->stats.tx_packets++;
690 bp->dev->stats.tx_bytes += skb->len;
691 }
692 } else {
693
694
695
696
697 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
698 netdev_err(bp->dev,
699 "BUG: TX buffers exhausted mid-frame\n");
700
701 desc->ctrl = ctrl | MACB_BIT(TX_USED);
702 }
703
704 macb_tx_unmap(bp, tx_skb);
705 }
706
707
708 desc = macb_tx_desc(queue, 0);
709 macb_set_addr(bp, desc, 0);
710 desc->ctrl = MACB_BIT(TX_USED);
711
712
713 wmb();
714
715
716 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
717#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
718 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
719 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
720#endif
721
722 queue->tx_head = 0;
723 queue->tx_tail = 0;
724
725
726 macb_writel(bp, TSR, macb_readl(bp, TSR));
727 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
728
729
730 netif_tx_start_all_queues(bp->dev);
731 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
732
733 spin_unlock_irqrestore(&bp->lock, flags);
734}
735
736static void macb_tx_interrupt(struct macb_queue *queue)
737{
738 unsigned int tail;
739 unsigned int head;
740 u32 status;
741 struct macb *bp = queue->bp;
742 u16 queue_index = queue - bp->queues;
743
744 status = macb_readl(bp, TSR);
745 macb_writel(bp, TSR, status);
746
747 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
748 queue_writel(queue, ISR, MACB_BIT(TCOMP));
749
750 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
751 (unsigned long)status);
752
753 head = queue->tx_head;
754 for (tail = queue->tx_tail; tail != head; tail++) {
755 struct macb_tx_skb *tx_skb;
756 struct sk_buff *skb;
757 struct macb_dma_desc *desc;
758 u32 ctrl;
759
760 desc = macb_tx_desc(queue, tail);
761
762
763 rmb();
764
765 ctrl = desc->ctrl;
766
767
768
769
770 if (!(ctrl & MACB_BIT(TX_USED)))
771 break;
772
773
774 for (;; tail++) {
775 tx_skb = macb_tx_skb(queue, tail);
776 skb = tx_skb->skb;
777
778
779 if (skb) {
780 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
781 macb_tx_ring_wrap(bp, tail),
782 skb->data);
783 bp->dev->stats.tx_packets++;
784 bp->dev->stats.tx_bytes += skb->len;
785 }
786
787
788 macb_tx_unmap(bp, tx_skb);
789
790
791
792
793
794 if (skb)
795 break;
796 }
797 }
798
799 queue->tx_tail = tail;
800 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
801 CIRC_CNT(queue->tx_head, queue->tx_tail,
802 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
803 netif_wake_subqueue(bp->dev, queue_index);
804}
805
806static void gem_rx_refill(struct macb *bp)
807{
808 unsigned int entry;
809 struct sk_buff *skb;
810 dma_addr_t paddr;
811 struct macb_dma_desc *desc;
812
813 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
814 bp->rx_ring_size) > 0) {
815 entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
816
817
818 rmb();
819
820 bp->rx_prepared_head++;
821 desc = macb_rx_desc(bp, entry);
822
823 if (!bp->rx_skbuff[entry]) {
824
825 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
826 if (unlikely(!skb)) {
827 netdev_err(bp->dev,
828 "Unable to allocate sk_buff\n");
829 break;
830 }
831
832
833 paddr = dma_map_single(&bp->pdev->dev, skb->data,
834 bp->rx_buffer_size,
835 DMA_FROM_DEVICE);
836 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
837 dev_kfree_skb(skb);
838 break;
839 }
840
841 bp->rx_skbuff[entry] = skb;
842
843 if (entry == bp->rx_ring_size - 1)
844 paddr |= MACB_BIT(RX_WRAP);
845 macb_set_addr(bp, desc, paddr);
846 desc->ctrl = 0;
847
848
849 skb_reserve(skb, NET_IP_ALIGN);
850 } else {
851 desc->addr &= ~MACB_BIT(RX_USED);
852 desc->ctrl = 0;
853 }
854 }
855
856
857 wmb();
858
859 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
860 bp->rx_prepared_head, bp->rx_tail);
861}
862
863
864static void discard_partial_frame(struct macb *bp, unsigned int begin,
865 unsigned int end)
866{
867 unsigned int frag;
868
869 for (frag = begin; frag != end; frag++) {
870 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
871
872 desc->addr &= ~MACB_BIT(RX_USED);
873 }
874
875
876 wmb();
877
878
879
880
881
882}
883
884static int gem_rx(struct macb *bp, int budget)
885{
886 unsigned int len;
887 unsigned int entry;
888 struct sk_buff *skb;
889 struct macb_dma_desc *desc;
890 int count = 0;
891
892 while (count < budget) {
893 u32 ctrl;
894 dma_addr_t addr;
895 bool rxused;
896
897 entry = macb_rx_ring_wrap(bp, bp->rx_tail);
898 desc = macb_rx_desc(bp, entry);
899
900
901 rmb();
902
903 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
904 addr = macb_get_addr(bp, desc);
905 ctrl = desc->ctrl;
906
907 if (!rxused)
908 break;
909
910 bp->rx_tail++;
911 count++;
912
913 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
914 netdev_err(bp->dev,
915 "not whole frame pointed by descriptor\n");
916 bp->dev->stats.rx_dropped++;
917 break;
918 }
919 skb = bp->rx_skbuff[entry];
920 if (unlikely(!skb)) {
921 netdev_err(bp->dev,
922 "inconsistent Rx descriptor chain\n");
923 bp->dev->stats.rx_dropped++;
924 break;
925 }
926
927 bp->rx_skbuff[entry] = NULL;
928 len = ctrl & bp->rx_frm_len_mask;
929
930 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
931
932 skb_put(skb, len);
933 dma_unmap_single(&bp->pdev->dev, addr,
934 bp->rx_buffer_size, DMA_FROM_DEVICE);
935
936 skb->protocol = eth_type_trans(skb, bp->dev);
937 skb_checksum_none_assert(skb);
938 if (bp->dev->features & NETIF_F_RXCSUM &&
939 !(bp->dev->flags & IFF_PROMISC) &&
940 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
941 skb->ip_summed = CHECKSUM_UNNECESSARY;
942
943 bp->dev->stats.rx_packets++;
944 bp->dev->stats.rx_bytes += skb->len;
945
946#if defined(DEBUG) && defined(VERBOSE_DEBUG)
947 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
948 skb->len, skb->csum);
949 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
950 skb_mac_header(skb), 16, true);
951 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
952 skb->data, 32, true);
953#endif
954
955 netif_receive_skb(skb);
956 }
957
958 gem_rx_refill(bp);
959
960 return count;
961}
962
963static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
964 unsigned int last_frag)
965{
966 unsigned int len;
967 unsigned int frag;
968 unsigned int offset;
969 struct sk_buff *skb;
970 struct macb_dma_desc *desc;
971
972 desc = macb_rx_desc(bp, last_frag);
973 len = desc->ctrl & bp->rx_frm_len_mask;
974
975 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
976 macb_rx_ring_wrap(bp, first_frag),
977 macb_rx_ring_wrap(bp, last_frag), len);
978
979
980
981
982
983
984
985
986
987 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
988 if (!skb) {
989 bp->dev->stats.rx_dropped++;
990 for (frag = first_frag; ; frag++) {
991 desc = macb_rx_desc(bp, frag);
992 desc->addr &= ~MACB_BIT(RX_USED);
993 if (frag == last_frag)
994 break;
995 }
996
997
998 wmb();
999
1000 return 1;
1001 }
1002
1003 offset = 0;
1004 len += NET_IP_ALIGN;
1005 skb_checksum_none_assert(skb);
1006 skb_put(skb, len);
1007
1008 for (frag = first_frag; ; frag++) {
1009 unsigned int frag_len = bp->rx_buffer_size;
1010
1011 if (offset + frag_len > len) {
1012 if (unlikely(frag != last_frag)) {
1013 dev_kfree_skb_any(skb);
1014 return -1;
1015 }
1016 frag_len = len - offset;
1017 }
1018 skb_copy_to_linear_data_offset(skb, offset,
1019 macb_rx_buffer(bp, frag),
1020 frag_len);
1021 offset += bp->rx_buffer_size;
1022 desc = macb_rx_desc(bp, frag);
1023 desc->addr &= ~MACB_BIT(RX_USED);
1024
1025 if (frag == last_frag)
1026 break;
1027 }
1028
1029
1030 wmb();
1031
1032 __skb_pull(skb, NET_IP_ALIGN);
1033 skb->protocol = eth_type_trans(skb, bp->dev);
1034
1035 bp->dev->stats.rx_packets++;
1036 bp->dev->stats.rx_bytes += skb->len;
1037 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1038 skb->len, skb->csum);
1039 netif_receive_skb(skb);
1040
1041 return 0;
1042}
1043
1044static inline void macb_init_rx_ring(struct macb *bp)
1045{
1046 dma_addr_t addr;
1047 struct macb_dma_desc *desc = NULL;
1048 int i;
1049
1050 addr = bp->rx_buffers_dma;
1051 for (i = 0; i < bp->rx_ring_size; i++) {
1052 desc = macb_rx_desc(bp, i);
1053 macb_set_addr(bp, desc, addr);
1054 desc->ctrl = 0;
1055 addr += bp->rx_buffer_size;
1056 }
1057 desc->addr |= MACB_BIT(RX_WRAP);
1058 bp->rx_tail = 0;
1059}
1060
1061static int macb_rx(struct macb *bp, int budget)
1062{
1063 bool reset_rx_queue = false;
1064 int received = 0;
1065 unsigned int tail;
1066 int first_frag = -1;
1067
1068 for (tail = bp->rx_tail; budget > 0; tail++) {
1069 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
1070 u32 ctrl;
1071
1072
1073 rmb();
1074
1075 ctrl = desc->ctrl;
1076
1077 if (!(desc->addr & MACB_BIT(RX_USED)))
1078 break;
1079
1080 if (ctrl & MACB_BIT(RX_SOF)) {
1081 if (first_frag != -1)
1082 discard_partial_frame(bp, first_frag, tail);
1083 first_frag = tail;
1084 }
1085
1086 if (ctrl & MACB_BIT(RX_EOF)) {
1087 int dropped;
1088
1089 if (unlikely(first_frag == -1)) {
1090 reset_rx_queue = true;
1091 continue;
1092 }
1093
1094 dropped = macb_rx_frame(bp, first_frag, tail);
1095 first_frag = -1;
1096 if (unlikely(dropped < 0)) {
1097 reset_rx_queue = true;
1098 continue;
1099 }
1100 if (!dropped) {
1101 received++;
1102 budget--;
1103 }
1104 }
1105 }
1106
1107 if (unlikely(reset_rx_queue)) {
1108 unsigned long flags;
1109 u32 ctrl;
1110
1111 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1112
1113 spin_lock_irqsave(&bp->lock, flags);
1114
1115 ctrl = macb_readl(bp, NCR);
1116 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1117
1118 macb_init_rx_ring(bp);
1119 macb_writel(bp, RBQP, bp->rx_ring_dma);
1120
1121 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1122
1123 spin_unlock_irqrestore(&bp->lock, flags);
1124 return received;
1125 }
1126
1127 if (first_frag != -1)
1128 bp->rx_tail = first_frag;
1129 else
1130 bp->rx_tail = tail;
1131
1132 return received;
1133}
1134
1135static int macb_poll(struct napi_struct *napi, int budget)
1136{
1137 struct macb *bp = container_of(napi, struct macb, napi);
1138 int work_done;
1139 u32 status;
1140
1141 status = macb_readl(bp, RSR);
1142 macb_writel(bp, RSR, status);
1143
1144 work_done = 0;
1145
1146 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
1147 (unsigned long)status, budget);
1148
1149 work_done = bp->macbgem_ops.mog_rx(bp, budget);
1150 if (work_done < budget) {
1151 napi_complete_done(napi, work_done);
1152
1153
1154 status = macb_readl(bp, RSR);
1155 if (status) {
1156 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1157 macb_writel(bp, ISR, MACB_BIT(RCOMP));
1158 napi_reschedule(napi);
1159 } else {
1160 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
1161 }
1162 }
1163
1164
1165
1166 return work_done;
1167}
1168
1169static irqreturn_t macb_interrupt(int irq, void *dev_id)
1170{
1171 struct macb_queue *queue = dev_id;
1172 struct macb *bp = queue->bp;
1173 struct net_device *dev = bp->dev;
1174 u32 status, ctrl;
1175
1176 status = queue_readl(queue, ISR);
1177
1178 if (unlikely(!status))
1179 return IRQ_NONE;
1180
1181 spin_lock(&bp->lock);
1182
1183 while (status) {
1184
1185 if (unlikely(!netif_running(dev))) {
1186 queue_writel(queue, IDR, -1);
1187 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1188 queue_writel(queue, ISR, -1);
1189 break;
1190 }
1191
1192 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1193 (unsigned int)(queue - bp->queues),
1194 (unsigned long)status);
1195
1196 if (status & MACB_RX_INT_FLAGS) {
1197
1198
1199
1200
1201
1202
1203 queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
1204 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1205 queue_writel(queue, ISR, MACB_BIT(RCOMP));
1206
1207 if (napi_schedule_prep(&bp->napi)) {
1208 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
1209 __napi_schedule(&bp->napi);
1210 }
1211 }
1212
1213 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
1214 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1215 schedule_work(&queue->tx_error_task);
1216
1217 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1218 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
1219
1220 break;
1221 }
1222
1223 if (status & MACB_BIT(TCOMP))
1224 macb_tx_interrupt(queue);
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236 if (status & MACB_BIT(RXUBR)) {
1237 ctrl = macb_readl(bp, NCR);
1238 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1239 wmb();
1240 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1241
1242 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1243 queue_writel(queue, ISR, MACB_BIT(RXUBR));
1244 }
1245
1246 if (status & MACB_BIT(ISR_ROVR)) {
1247
1248 if (macb_is_gem(bp))
1249 bp->hw_stats.gem.rx_overruns++;
1250 else
1251 bp->hw_stats.macb.rx_overruns++;
1252
1253 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1254 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
1255 }
1256
1257 if (status & MACB_BIT(HRESP)) {
1258
1259
1260
1261
1262 netdev_err(dev, "DMA bus error: HRESP not OK\n");
1263
1264 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1265 queue_writel(queue, ISR, MACB_BIT(HRESP));
1266 }
1267
1268 status = queue_readl(queue, ISR);
1269 }
1270
1271 spin_unlock(&bp->lock);
1272
1273 return IRQ_HANDLED;
1274}
1275
1276#ifdef CONFIG_NET_POLL_CONTROLLER
1277
1278
1279
1280static void macb_poll_controller(struct net_device *dev)
1281{
1282 struct macb *bp = netdev_priv(dev);
1283 struct macb_queue *queue;
1284 unsigned long flags;
1285 unsigned int q;
1286
1287 local_irq_save(flags);
1288 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1289 macb_interrupt(dev->irq, queue);
1290 local_irq_restore(flags);
1291}
1292#endif
1293
1294static unsigned int macb_tx_map(struct macb *bp,
1295 struct macb_queue *queue,
1296 struct sk_buff *skb,
1297 unsigned int hdrlen)
1298{
1299 dma_addr_t mapping;
1300 unsigned int len, entry, i, tx_head = queue->tx_head;
1301 struct macb_tx_skb *tx_skb = NULL;
1302 struct macb_dma_desc *desc;
1303 unsigned int offset, size, count = 0;
1304 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
1305 unsigned int eof = 1, mss_mfs = 0;
1306 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1307
1308
1309 if (skb_shinfo(skb)->gso_size != 0) {
1310 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1311
1312 lso_ctrl = MACB_LSO_UFO_ENABLE;
1313 else
1314
1315 lso_ctrl = MACB_LSO_TSO_ENABLE;
1316 }
1317
1318
1319 len = skb_headlen(skb);
1320
1321
1322 size = hdrlen;
1323
1324 offset = 0;
1325 while (len) {
1326 entry = macb_tx_ring_wrap(bp, tx_head);
1327 tx_skb = &queue->tx_skb[entry];
1328
1329 mapping = dma_map_single(&bp->pdev->dev,
1330 skb->data + offset,
1331 size, DMA_TO_DEVICE);
1332 if (dma_mapping_error(&bp->pdev->dev, mapping))
1333 goto dma_error;
1334
1335
1336 tx_skb->skb = NULL;
1337 tx_skb->mapping = mapping;
1338 tx_skb->size = size;
1339 tx_skb->mapped_as_page = false;
1340
1341 len -= size;
1342 offset += size;
1343 count++;
1344 tx_head++;
1345
1346 size = min(len, bp->max_tx_length);
1347 }
1348
1349
1350 for (f = 0; f < nr_frags; f++) {
1351 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1352
1353 len = skb_frag_size(frag);
1354 offset = 0;
1355 while (len) {
1356 size = min(len, bp->max_tx_length);
1357 entry = macb_tx_ring_wrap(bp, tx_head);
1358 tx_skb = &queue->tx_skb[entry];
1359
1360 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1361 offset, size, DMA_TO_DEVICE);
1362 if (dma_mapping_error(&bp->pdev->dev, mapping))
1363 goto dma_error;
1364
1365
1366 tx_skb->skb = NULL;
1367 tx_skb->mapping = mapping;
1368 tx_skb->size = size;
1369 tx_skb->mapped_as_page = true;
1370
1371 len -= size;
1372 offset += size;
1373 count++;
1374 tx_head++;
1375 }
1376 }
1377
1378
1379 if (unlikely(!tx_skb)) {
1380 netdev_err(bp->dev, "BUG! empty skb!\n");
1381 return 0;
1382 }
1383
1384
1385 tx_skb->skb = skb;
1386
1387
1388
1389
1390
1391
1392
1393
1394 i = tx_head;
1395 entry = macb_tx_ring_wrap(bp, i);
1396 ctrl = MACB_BIT(TX_USED);
1397 desc = macb_tx_desc(queue, entry);
1398 desc->ctrl = ctrl;
1399
1400 if (lso_ctrl) {
1401 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1402
1403 mss_mfs = skb_shinfo(skb)->gso_size +
1404 skb_transport_offset(skb) +
1405 ETH_FCS_LEN;
1406 else {
1407 mss_mfs = skb_shinfo(skb)->gso_size;
1408
1409
1410
1411 seq_ctrl = 0;
1412 }
1413 }
1414
1415 do {
1416 i--;
1417 entry = macb_tx_ring_wrap(bp, i);
1418 tx_skb = &queue->tx_skb[entry];
1419 desc = macb_tx_desc(queue, entry);
1420
1421 ctrl = (u32)tx_skb->size;
1422 if (eof) {
1423 ctrl |= MACB_BIT(TX_LAST);
1424 eof = 0;
1425 }
1426 if (unlikely(entry == (bp->tx_ring_size - 1)))
1427 ctrl |= MACB_BIT(TX_WRAP);
1428
1429
1430 if (i == queue->tx_head) {
1431 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1432 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
1433 } else
1434
1435
1436
1437 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1438
1439
1440 macb_set_addr(bp, desc, tx_skb->mapping);
1441
1442
1443
1444 wmb();
1445 desc->ctrl = ctrl;
1446 } while (i != queue->tx_head);
1447
1448 queue->tx_head = tx_head;
1449
1450 return count;
1451
1452dma_error:
1453 netdev_err(bp->dev, "TX DMA map failed\n");
1454
1455 for (i = queue->tx_head; i != tx_head; i++) {
1456 tx_skb = macb_tx_skb(queue, i);
1457
1458 macb_tx_unmap(bp, tx_skb);
1459 }
1460
1461 return 0;
1462}
1463
1464static netdev_features_t macb_features_check(struct sk_buff *skb,
1465 struct net_device *dev,
1466 netdev_features_t features)
1467{
1468 unsigned int nr_frags, f;
1469 unsigned int hdrlen;
1470
1471
1472
1473
1474 if (!skb_is_nonlinear(skb))
1475 return features;
1476
1477
1478 hdrlen = skb_transport_offset(skb);
1479 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1480 hdrlen += tcp_hdrlen(skb);
1481
1482
1483
1484
1485
1486 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1487 return features & ~MACB_NETIF_LSO;
1488
1489 nr_frags = skb_shinfo(skb)->nr_frags;
1490
1491 nr_frags--;
1492 for (f = 0; f < nr_frags; f++) {
1493 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1494
1495 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1496 return features & ~MACB_NETIF_LSO;
1497 }
1498 return features;
1499}
1500
1501static inline int macb_clear_csum(struct sk_buff *skb)
1502{
1503
1504 if (skb->ip_summed != CHECKSUM_PARTIAL)
1505 return 0;
1506
1507
1508 if (unlikely(skb_cow_head(skb, 0)))
1509 return -1;
1510
1511
1512
1513
1514
1515 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1516 return 0;
1517}
1518
1519static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1520{
1521 u16 queue_index = skb_get_queue_mapping(skb);
1522 struct macb *bp = netdev_priv(dev);
1523 struct macb_queue *queue = &bp->queues[queue_index];
1524 unsigned long flags;
1525 unsigned int desc_cnt, nr_frags, frag_size, f;
1526 unsigned int hdrlen;
1527 bool is_lso, is_udp = 0;
1528
1529 is_lso = (skb_shinfo(skb)->gso_size != 0);
1530
1531 if (is_lso) {
1532 is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
1533
1534
1535 if (is_udp)
1536
1537 hdrlen = skb_transport_offset(skb);
1538 else
1539 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
1540 if (skb_headlen(skb) < hdrlen) {
1541 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
1542
1543 return NETDEV_TX_BUSY;
1544 }
1545 } else
1546 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
1547
1548#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1549 netdev_vdbg(bp->dev,
1550 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
1551 queue_index, skb->len, skb->head, skb->data,
1552 skb_tail_pointer(skb), skb_end_pointer(skb));
1553 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
1554 skb->data, 16, true);
1555#endif
1556
1557
1558
1559
1560
1561 if (is_lso && (skb_headlen(skb) > hdrlen))
1562
1563 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
1564 else
1565 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1566 nr_frags = skb_shinfo(skb)->nr_frags;
1567 for (f = 0; f < nr_frags; f++) {
1568 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1569 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1570 }
1571
1572 spin_lock_irqsave(&bp->lock, flags);
1573
1574
1575 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
1576 bp->tx_ring_size) < desc_cnt) {
1577 netif_stop_subqueue(dev, queue_index);
1578 spin_unlock_irqrestore(&bp->lock, flags);
1579 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
1580 queue->tx_head, queue->tx_tail);
1581 return NETDEV_TX_BUSY;
1582 }
1583
1584 if (macb_clear_csum(skb)) {
1585 dev_kfree_skb_any(skb);
1586 goto unlock;
1587 }
1588
1589
1590 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
1591 dev_kfree_skb_any(skb);
1592 goto unlock;
1593 }
1594
1595
1596 wmb();
1597
1598 skb_tx_timestamp(skb);
1599
1600 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1601
1602 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
1603 netif_stop_subqueue(dev, queue_index);
1604
1605unlock:
1606 spin_unlock_irqrestore(&bp->lock, flags);
1607
1608 return NETDEV_TX_OK;
1609}
1610
1611static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1612{
1613 if (!macb_is_gem(bp)) {
1614 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1615 } else {
1616 bp->rx_buffer_size = size;
1617
1618 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1619 netdev_dbg(bp->dev,
1620 "RX buffer must be multiple of %d bytes, expanding\n",
1621 RX_BUFFER_MULTIPLE);
1622 bp->rx_buffer_size =
1623 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1624 }
1625 }
1626
1627 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
1628 bp->dev->mtu, bp->rx_buffer_size);
1629}
1630
1631static void gem_free_rx_buffers(struct macb *bp)
1632{
1633 struct sk_buff *skb;
1634 struct macb_dma_desc *desc;
1635 dma_addr_t addr;
1636 int i;
1637
1638 if (!bp->rx_skbuff)
1639 return;
1640
1641 for (i = 0; i < bp->rx_ring_size; i++) {
1642 skb = bp->rx_skbuff[i];
1643
1644 if (!skb)
1645 continue;
1646
1647 desc = macb_rx_desc(bp, i);
1648 addr = macb_get_addr(bp, desc);
1649
1650 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
1651 DMA_FROM_DEVICE);
1652 dev_kfree_skb_any(skb);
1653 skb = NULL;
1654 }
1655
1656 kfree(bp->rx_skbuff);
1657 bp->rx_skbuff = NULL;
1658}
1659
1660static void macb_free_rx_buffers(struct macb *bp)
1661{
1662 if (bp->rx_buffers) {
1663 dma_free_coherent(&bp->pdev->dev,
1664 bp->rx_ring_size * bp->rx_buffer_size,
1665 bp->rx_buffers, bp->rx_buffers_dma);
1666 bp->rx_buffers = NULL;
1667 }
1668}
1669
1670static void macb_free_consistent(struct macb *bp)
1671{
1672 struct macb_queue *queue;
1673 unsigned int q;
1674
1675 bp->macbgem_ops.mog_free_rx_buffers(bp);
1676 if (bp->rx_ring) {
1677 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1678 bp->rx_ring, bp->rx_ring_dma);
1679 bp->rx_ring = NULL;
1680 }
1681
1682 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1683 kfree(queue->tx_skb);
1684 queue->tx_skb = NULL;
1685 if (queue->tx_ring) {
1686 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
1687 queue->tx_ring, queue->tx_ring_dma);
1688 queue->tx_ring = NULL;
1689 }
1690 }
1691}
1692
1693static int gem_alloc_rx_buffers(struct macb *bp)
1694{
1695 int size;
1696
1697 size = bp->rx_ring_size * sizeof(struct sk_buff *);
1698 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1699 if (!bp->rx_skbuff)
1700 return -ENOMEM;
1701 else
1702 netdev_dbg(bp->dev,
1703 "Allocated %d RX struct sk_buff entries at %p\n",
1704 bp->rx_ring_size, bp->rx_skbuff);
1705 return 0;
1706}
1707
1708static int macb_alloc_rx_buffers(struct macb *bp)
1709{
1710 int size;
1711
1712 size = bp->rx_ring_size * bp->rx_buffer_size;
1713 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1714 &bp->rx_buffers_dma, GFP_KERNEL);
1715 if (!bp->rx_buffers)
1716 return -ENOMEM;
1717
1718 netdev_dbg(bp->dev,
1719 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1720 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1721 return 0;
1722}
1723
1724static int macb_alloc_consistent(struct macb *bp)
1725{
1726 struct macb_queue *queue;
1727 unsigned int q;
1728 int size;
1729
1730 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1731 size = TX_RING_BYTES(bp);
1732 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1733 &queue->tx_ring_dma,
1734 GFP_KERNEL);
1735 if (!queue->tx_ring)
1736 goto out_err;
1737 netdev_dbg(bp->dev,
1738 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
1739 q, size, (unsigned long)queue->tx_ring_dma,
1740 queue->tx_ring);
1741
1742 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
1743 queue->tx_skb = kmalloc(size, GFP_KERNEL);
1744 if (!queue->tx_skb)
1745 goto out_err;
1746 }
1747
1748 size = RX_RING_BYTES(bp);
1749 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1750 &bp->rx_ring_dma, GFP_KERNEL);
1751 if (!bp->rx_ring)
1752 goto out_err;
1753 netdev_dbg(bp->dev,
1754 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1755 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1756
1757 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1758 goto out_err;
1759
1760 return 0;
1761
1762out_err:
1763 macb_free_consistent(bp);
1764 return -ENOMEM;
1765}
1766
1767static void gem_init_rings(struct macb *bp)
1768{
1769 struct macb_queue *queue;
1770 struct macb_dma_desc *desc = NULL;
1771 unsigned int q;
1772 int i;
1773
1774 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1775 for (i = 0; i < bp->tx_ring_size; i++) {
1776 desc = macb_tx_desc(queue, i);
1777 macb_set_addr(bp, desc, 0);
1778 desc->ctrl = MACB_BIT(TX_USED);
1779 }
1780 desc->ctrl |= MACB_BIT(TX_WRAP);
1781 queue->tx_head = 0;
1782 queue->tx_tail = 0;
1783 }
1784
1785 bp->rx_tail = 0;
1786 bp->rx_prepared_head = 0;
1787
1788 gem_rx_refill(bp);
1789}
1790
1791static void macb_init_rings(struct macb *bp)
1792{
1793 int i;
1794 struct macb_dma_desc *desc = NULL;
1795
1796 macb_init_rx_ring(bp);
1797
1798 for (i = 0; i < bp->tx_ring_size; i++) {
1799 desc = macb_tx_desc(&bp->queues[0], i);
1800 macb_set_addr(bp, desc, 0);
1801 desc->ctrl = MACB_BIT(TX_USED);
1802 }
1803 bp->queues[0].tx_head = 0;
1804 bp->queues[0].tx_tail = 0;
1805 desc->ctrl |= MACB_BIT(TX_WRAP);
1806}
1807
1808static void macb_reset_hw(struct macb *bp)
1809{
1810 struct macb_queue *queue;
1811 unsigned int q;
1812
1813
1814
1815
1816 macb_writel(bp, NCR, 0);
1817
1818
1819 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1820
1821
1822 macb_writel(bp, TSR, -1);
1823 macb_writel(bp, RSR, -1);
1824
1825
1826 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1827 queue_writel(queue, IDR, -1);
1828 queue_readl(queue, ISR);
1829 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1830 queue_writel(queue, ISR, -1);
1831 }
1832}
1833
1834static u32 gem_mdc_clk_div(struct macb *bp)
1835{
1836 u32 config;
1837 unsigned long pclk_hz = clk_get_rate(bp->pclk);
1838
1839 if (pclk_hz <= 20000000)
1840 config = GEM_BF(CLK, GEM_CLK_DIV8);
1841 else if (pclk_hz <= 40000000)
1842 config = GEM_BF(CLK, GEM_CLK_DIV16);
1843 else if (pclk_hz <= 80000000)
1844 config = GEM_BF(CLK, GEM_CLK_DIV32);
1845 else if (pclk_hz <= 120000000)
1846 config = GEM_BF(CLK, GEM_CLK_DIV48);
1847 else if (pclk_hz <= 160000000)
1848 config = GEM_BF(CLK, GEM_CLK_DIV64);
1849 else
1850 config = GEM_BF(CLK, GEM_CLK_DIV96);
1851
1852 return config;
1853}
1854
1855static u32 macb_mdc_clk_div(struct macb *bp)
1856{
1857 u32 config;
1858 unsigned long pclk_hz;
1859
1860 if (macb_is_gem(bp))
1861 return gem_mdc_clk_div(bp);
1862
1863 pclk_hz = clk_get_rate(bp->pclk);
1864 if (pclk_hz <= 20000000)
1865 config = MACB_BF(CLK, MACB_CLK_DIV8);
1866 else if (pclk_hz <= 40000000)
1867 config = MACB_BF(CLK, MACB_CLK_DIV16);
1868 else if (pclk_hz <= 80000000)
1869 config = MACB_BF(CLK, MACB_CLK_DIV32);
1870 else
1871 config = MACB_BF(CLK, MACB_CLK_DIV64);
1872
1873 return config;
1874}
1875
1876
1877
1878
1879
1880static u32 macb_dbw(struct macb *bp)
1881{
1882 if (!macb_is_gem(bp))
1883 return 0;
1884
1885 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1886 case 4:
1887 return GEM_BF(DBW, GEM_DBW128);
1888 case 2:
1889 return GEM_BF(DBW, GEM_DBW64);
1890 case 1:
1891 default:
1892 return GEM_BF(DBW, GEM_DBW32);
1893 }
1894}
1895
1896
1897
1898
1899
1900
1901
1902
1903static void macb_configure_dma(struct macb *bp)
1904{
1905 u32 dmacfg;
1906
1907 if (macb_is_gem(bp)) {
1908 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1909 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1910 if (bp->dma_burst_length)
1911 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
1912 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1913 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1914
1915 if (bp->native_io)
1916 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1917 else
1918 dmacfg |= GEM_BIT(ENDIA_DESC);
1919
1920 if (bp->dev->features & NETIF_F_HW_CSUM)
1921 dmacfg |= GEM_BIT(TXCOEN);
1922 else
1923 dmacfg &= ~GEM_BIT(TXCOEN);
1924
1925#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1926 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1927 dmacfg |= GEM_BIT(ADDR64);
1928#endif
1929 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
1930 dmacfg);
1931 gem_writel(bp, DMACFG, dmacfg);
1932 }
1933}
1934
1935static void macb_init_hw(struct macb *bp)
1936{
1937 struct macb_queue *queue;
1938 unsigned int q;
1939
1940 u32 config;
1941
1942 macb_reset_hw(bp);
1943 macb_set_hwaddr(bp);
1944
1945 config = macb_mdc_clk_div(bp);
1946 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
1947 config |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
1948 config |= MACB_BF(RBOF, NET_IP_ALIGN);
1949 config |= MACB_BIT(PAE);
1950 config |= MACB_BIT(DRFCS);
1951 if (bp->caps & MACB_CAPS_JUMBO)
1952 config |= MACB_BIT(JFRAME);
1953 else
1954 config |= MACB_BIT(BIG);
1955 if (bp->dev->flags & IFF_PROMISC)
1956 config |= MACB_BIT(CAF);
1957 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
1958 config |= GEM_BIT(RXCOEN);
1959 if (!(bp->dev->flags & IFF_BROADCAST))
1960 config |= MACB_BIT(NBC);
1961 config |= macb_dbw(bp);
1962 macb_writel(bp, NCFGR, config);
1963 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
1964 gem_writel(bp, JML, bp->jumbo_max_len);
1965 bp->speed = SPEED_10;
1966 bp->duplex = DUPLEX_HALF;
1967 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
1968 if (bp->caps & MACB_CAPS_JUMBO)
1969 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
1970
1971 macb_configure_dma(bp);
1972
1973
1974 macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
1975#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1976 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1977 macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
1978#endif
1979 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1980 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1981#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1982 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
1983 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
1984#endif
1985
1986
1987 queue_writel(queue, IER,
1988 MACB_RX_INT_FLAGS |
1989 MACB_TX_INT_FLAGS |
1990 MACB_BIT(HRESP));
1991 }
1992
1993
1994 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1995}
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030static inline int hash_bit_value(int bitnr, __u8 *addr)
2031{
2032 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2033 return 1;
2034 return 0;
2035}
2036
2037
2038static int hash_get_index(__u8 *addr)
2039{
2040 int i, j, bitval;
2041 int hash_index = 0;
2042
2043 for (j = 0; j < 6; j++) {
2044 for (i = 0, bitval = 0; i < 8; i++)
2045 bitval ^= hash_bit_value(i * 6 + j, addr);
2046
2047 hash_index |= (bitval << j);
2048 }
2049
2050 return hash_index;
2051}
2052
2053
2054static void macb_sethashtable(struct net_device *dev)
2055{
2056 struct netdev_hw_addr *ha;
2057 unsigned long mc_filter[2];
2058 unsigned int bitnr;
2059 struct macb *bp = netdev_priv(dev);
2060
2061 mc_filter[0] = 0;
2062 mc_filter[1] = 0;
2063
2064 netdev_for_each_mc_addr(ha, dev) {
2065 bitnr = hash_get_index(ha->addr);
2066 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2067 }
2068
2069 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2070 macb_or_gem_writel(bp, HRT, mc_filter[1]);
2071}
2072
2073
2074static void macb_set_rx_mode(struct net_device *dev)
2075{
2076 unsigned long cfg;
2077 struct macb *bp = netdev_priv(dev);
2078
2079 cfg = macb_readl(bp, NCFGR);
2080
2081 if (dev->flags & IFF_PROMISC) {
2082
2083 cfg |= MACB_BIT(CAF);
2084
2085
2086 if (macb_is_gem(bp))
2087 cfg &= ~GEM_BIT(RXCOEN);
2088 } else {
2089
2090 cfg &= ~MACB_BIT(CAF);
2091
2092
2093 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2094 cfg |= GEM_BIT(RXCOEN);
2095 }
2096
2097 if (dev->flags & IFF_ALLMULTI) {
2098
2099 macb_or_gem_writel(bp, HRB, -1);
2100 macb_or_gem_writel(bp, HRT, -1);
2101 cfg |= MACB_BIT(NCFGR_MTI);
2102 } else if (!netdev_mc_empty(dev)) {
2103
2104 macb_sethashtable(dev);
2105 cfg |= MACB_BIT(NCFGR_MTI);
2106 } else if (dev->flags & (~IFF_ALLMULTI)) {
2107
2108 macb_or_gem_writel(bp, HRB, 0);
2109 macb_or_gem_writel(bp, HRT, 0);
2110 cfg &= ~MACB_BIT(NCFGR_MTI);
2111 }
2112
2113 macb_writel(bp, NCFGR, cfg);
2114}
2115
2116static int macb_open(struct net_device *dev)
2117{
2118 struct macb *bp = netdev_priv(dev);
2119 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
2120 int err;
2121
2122 netdev_dbg(bp->dev, "open\n");
2123
2124
2125 netif_carrier_off(dev);
2126
2127
2128 if (!dev->phydev)
2129 return -EAGAIN;
2130
2131
2132 macb_init_rx_buffer_size(bp, bufsz);
2133
2134 err = macb_alloc_consistent(bp);
2135 if (err) {
2136 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2137 err);
2138 return err;
2139 }
2140
2141 napi_enable(&bp->napi);
2142
2143 bp->macbgem_ops.mog_init_rings(bp);
2144 macb_init_hw(bp);
2145
2146
2147 phy_start(dev->phydev);
2148
2149 netif_tx_start_all_queues(dev);
2150
2151 if (bp->ptp_info)
2152 bp->ptp_info->ptp_init(dev);
2153
2154 return 0;
2155}
2156
2157static int macb_close(struct net_device *dev)
2158{
2159 struct macb *bp = netdev_priv(dev);
2160 unsigned long flags;
2161
2162 netif_tx_stop_all_queues(dev);
2163 napi_disable(&bp->napi);
2164
2165 if (dev->phydev)
2166 phy_stop(dev->phydev);
2167
2168 spin_lock_irqsave(&bp->lock, flags);
2169 macb_reset_hw(bp);
2170 netif_carrier_off(dev);
2171 spin_unlock_irqrestore(&bp->lock, flags);
2172
2173 macb_free_consistent(bp);
2174
2175 if (bp->ptp_info)
2176 bp->ptp_info->ptp_remove(dev);
2177
2178 return 0;
2179}
2180
2181static int macb_change_mtu(struct net_device *dev, int new_mtu)
2182{
2183 if (netif_running(dev))
2184 return -EBUSY;
2185
2186 dev->mtu = new_mtu;
2187
2188 return 0;
2189}
2190
2191static void gem_update_stats(struct macb *bp)
2192{
2193 unsigned int i;
2194 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
2195
2196 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2197 u32 offset = gem_statistics[i].offset;
2198 u64 val = bp->macb_reg_readl(bp, offset);
2199
2200 bp->ethtool_stats[i] += val;
2201 *p += val;
2202
2203 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2204
2205 val = bp->macb_reg_readl(bp, offset + 4);
2206 bp->ethtool_stats[i] += ((u64)val) << 32;
2207 *(++p) += val;
2208 }
2209 }
2210}
2211
2212static struct net_device_stats *gem_get_stats(struct macb *bp)
2213{
2214 struct gem_stats *hwstat = &bp->hw_stats.gem;
2215 struct net_device_stats *nstat = &bp->dev->stats;
2216
2217 gem_update_stats(bp);
2218
2219 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2220 hwstat->rx_alignment_errors +
2221 hwstat->rx_resource_errors +
2222 hwstat->rx_overruns +
2223 hwstat->rx_oversize_frames +
2224 hwstat->rx_jabbers +
2225 hwstat->rx_undersized_frames +
2226 hwstat->rx_length_field_frame_errors);
2227 nstat->tx_errors = (hwstat->tx_late_collisions +
2228 hwstat->tx_excessive_collisions +
2229 hwstat->tx_underrun +
2230 hwstat->tx_carrier_sense_errors);
2231 nstat->multicast = hwstat->rx_multicast_frames;
2232 nstat->collisions = (hwstat->tx_single_collision_frames +
2233 hwstat->tx_multiple_collision_frames +
2234 hwstat->tx_excessive_collisions);
2235 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2236 hwstat->rx_jabbers +
2237 hwstat->rx_undersized_frames +
2238 hwstat->rx_length_field_frame_errors);
2239 nstat->rx_over_errors = hwstat->rx_resource_errors;
2240 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2241 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2242 nstat->rx_fifo_errors = hwstat->rx_overruns;
2243 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2244 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2245 nstat->tx_fifo_errors = hwstat->tx_underrun;
2246
2247 return nstat;
2248}
2249
2250static void gem_get_ethtool_stats(struct net_device *dev,
2251 struct ethtool_stats *stats, u64 *data)
2252{
2253 struct macb *bp;
2254
2255 bp = netdev_priv(dev);
2256 gem_update_stats(bp);
2257 memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN);
2258}
2259
2260static int gem_get_sset_count(struct net_device *dev, int sset)
2261{
2262 switch (sset) {
2263 case ETH_SS_STATS:
2264 return GEM_STATS_LEN;
2265 default:
2266 return -EOPNOTSUPP;
2267 }
2268}
2269
2270static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2271{
2272 unsigned int i;
2273
2274 switch (sset) {
2275 case ETH_SS_STATS:
2276 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2277 memcpy(p, gem_statistics[i].stat_string,
2278 ETH_GSTRING_LEN);
2279 break;
2280 }
2281}
2282
2283static struct net_device_stats *macb_get_stats(struct net_device *dev)
2284{
2285 struct macb *bp = netdev_priv(dev);
2286 struct net_device_stats *nstat = &bp->dev->stats;
2287 struct macb_stats *hwstat = &bp->hw_stats.macb;
2288
2289 if (macb_is_gem(bp))
2290 return gem_get_stats(bp);
2291
2292
2293 macb_update_stats(bp);
2294
2295
2296 nstat->rx_errors = (hwstat->rx_fcs_errors +
2297 hwstat->rx_align_errors +
2298 hwstat->rx_resource_errors +
2299 hwstat->rx_overruns +
2300 hwstat->rx_oversize_pkts +
2301 hwstat->rx_jabbers +
2302 hwstat->rx_undersize_pkts +
2303 hwstat->rx_length_mismatch);
2304 nstat->tx_errors = (hwstat->tx_late_cols +
2305 hwstat->tx_excessive_cols +
2306 hwstat->tx_underruns +
2307 hwstat->tx_carrier_errors +
2308 hwstat->sqe_test_errors);
2309 nstat->collisions = (hwstat->tx_single_cols +
2310 hwstat->tx_multiple_cols +
2311 hwstat->tx_excessive_cols);
2312 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2313 hwstat->rx_jabbers +
2314 hwstat->rx_undersize_pkts +
2315 hwstat->rx_length_mismatch);
2316 nstat->rx_over_errors = hwstat->rx_resource_errors +
2317 hwstat->rx_overruns;
2318 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2319 nstat->rx_frame_errors = hwstat->rx_align_errors;
2320 nstat->rx_fifo_errors = hwstat->rx_overruns;
2321
2322 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2323 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2324 nstat->tx_fifo_errors = hwstat->tx_underruns;
2325
2326
2327 return nstat;
2328}
2329
2330static int macb_get_regs_len(struct net_device *netdev)
2331{
2332 return MACB_GREGS_NBR * sizeof(u32);
2333}
2334
2335static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2336 void *p)
2337{
2338 struct macb *bp = netdev_priv(dev);
2339 unsigned int tail, head;
2340 u32 *regs_buff = p;
2341
2342 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2343 | MACB_GREGS_VERSION;
2344
2345 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2346 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
2347
2348 regs_buff[0] = macb_readl(bp, NCR);
2349 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2350 regs_buff[2] = macb_readl(bp, NSR);
2351 regs_buff[3] = macb_readl(bp, TSR);
2352 regs_buff[4] = macb_readl(bp, RBQP);
2353 regs_buff[5] = macb_readl(bp, TBQP);
2354 regs_buff[6] = macb_readl(bp, RSR);
2355 regs_buff[7] = macb_readl(bp, IMR);
2356
2357 regs_buff[8] = tail;
2358 regs_buff[9] = head;
2359 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2360 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
2361
2362 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2363 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
2364 if (macb_is_gem(bp))
2365 regs_buff[13] = gem_readl(bp, DMACFG);
2366}
2367
2368static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2369{
2370 struct macb *bp = netdev_priv(netdev);
2371
2372 wol->supported = 0;
2373 wol->wolopts = 0;
2374
2375 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
2376 wol->supported = WAKE_MAGIC;
2377
2378 if (bp->wol & MACB_WOL_ENABLED)
2379 wol->wolopts |= WAKE_MAGIC;
2380 }
2381}
2382
2383static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2384{
2385 struct macb *bp = netdev_priv(netdev);
2386
2387 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2388 (wol->wolopts & ~WAKE_MAGIC))
2389 return -EOPNOTSUPP;
2390
2391 if (wol->wolopts & WAKE_MAGIC)
2392 bp->wol |= MACB_WOL_ENABLED;
2393 else
2394 bp->wol &= ~MACB_WOL_ENABLED;
2395
2396 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2397
2398 return 0;
2399}
2400
2401static void macb_get_ringparam(struct net_device *netdev,
2402 struct ethtool_ringparam *ring)
2403{
2404 struct macb *bp = netdev_priv(netdev);
2405
2406 ring->rx_max_pending = MAX_RX_RING_SIZE;
2407 ring->tx_max_pending = MAX_TX_RING_SIZE;
2408
2409 ring->rx_pending = bp->rx_ring_size;
2410 ring->tx_pending = bp->tx_ring_size;
2411}
2412
2413static int macb_set_ringparam(struct net_device *netdev,
2414 struct ethtool_ringparam *ring)
2415{
2416 struct macb *bp = netdev_priv(netdev);
2417 u32 new_rx_size, new_tx_size;
2418 unsigned int reset = 0;
2419
2420 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2421 return -EINVAL;
2422
2423 new_rx_size = clamp_t(u32, ring->rx_pending,
2424 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2425 new_rx_size = roundup_pow_of_two(new_rx_size);
2426
2427 new_tx_size = clamp_t(u32, ring->tx_pending,
2428 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2429 new_tx_size = roundup_pow_of_two(new_tx_size);
2430
2431 if ((new_tx_size == bp->tx_ring_size) &&
2432 (new_rx_size == bp->rx_ring_size)) {
2433
2434 return 0;
2435 }
2436
2437 if (netif_running(bp->dev)) {
2438 reset = 1;
2439 macb_close(bp->dev);
2440 }
2441
2442 bp->rx_ring_size = new_rx_size;
2443 bp->tx_ring_size = new_tx_size;
2444
2445 if (reset)
2446 macb_open(bp->dev);
2447
2448 return 0;
2449}
2450
2451static int macb_get_ts_info(struct net_device *netdev,
2452 struct ethtool_ts_info *info)
2453{
2454 struct macb *bp = netdev_priv(netdev);
2455
2456 if (bp->ptp_info)
2457 return bp->ptp_info->get_ts_info(netdev, info);
2458
2459 return ethtool_op_get_ts_info(netdev, info);
2460}
2461
2462static const struct ethtool_ops macb_ethtool_ops = {
2463 .get_regs_len = macb_get_regs_len,
2464 .get_regs = macb_get_regs,
2465 .get_link = ethtool_op_get_link,
2466 .get_ts_info = ethtool_op_get_ts_info,
2467 .get_wol = macb_get_wol,
2468 .set_wol = macb_set_wol,
2469 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2470 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2471 .get_ringparam = macb_get_ringparam,
2472 .set_ringparam = macb_set_ringparam,
2473};
2474
2475static const struct ethtool_ops gem_ethtool_ops = {
2476 .get_regs_len = macb_get_regs_len,
2477 .get_regs = macb_get_regs,
2478 .get_link = ethtool_op_get_link,
2479 .get_ts_info = macb_get_ts_info,
2480 .get_ethtool_stats = gem_get_ethtool_stats,
2481 .get_strings = gem_get_ethtool_strings,
2482 .get_sset_count = gem_get_sset_count,
2483 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2484 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2485 .get_ringparam = macb_get_ringparam,
2486 .set_ringparam = macb_set_ringparam,
2487};
2488
2489static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2490{
2491 struct phy_device *phydev = dev->phydev;
2492 struct macb *bp = netdev_priv(dev);
2493
2494 if (!netif_running(dev))
2495 return -EINVAL;
2496
2497 if (!phydev)
2498 return -ENODEV;
2499
2500 if (!bp->ptp_info)
2501 return phy_mii_ioctl(phydev, rq, cmd);
2502
2503 switch (cmd) {
2504 case SIOCSHWTSTAMP:
2505 return bp->ptp_info->set_hwtst(dev, rq, cmd);
2506 case SIOCGHWTSTAMP:
2507 return bp->ptp_info->get_hwtst(dev, rq);
2508 default:
2509 return phy_mii_ioctl(phydev, rq, cmd);
2510 }
2511}
2512
2513static int macb_set_features(struct net_device *netdev,
2514 netdev_features_t features)
2515{
2516 struct macb *bp = netdev_priv(netdev);
2517 netdev_features_t changed = features ^ netdev->features;
2518
2519
2520 if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
2521 u32 dmacfg;
2522
2523 dmacfg = gem_readl(bp, DMACFG);
2524 if (features & NETIF_F_HW_CSUM)
2525 dmacfg |= GEM_BIT(TXCOEN);
2526 else
2527 dmacfg &= ~GEM_BIT(TXCOEN);
2528 gem_writel(bp, DMACFG, dmacfg);
2529 }
2530
2531
2532 if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
2533 u32 netcfg;
2534
2535 netcfg = gem_readl(bp, NCFGR);
2536 if (features & NETIF_F_RXCSUM &&
2537 !(netdev->flags & IFF_PROMISC))
2538 netcfg |= GEM_BIT(RXCOEN);
2539 else
2540 netcfg &= ~GEM_BIT(RXCOEN);
2541 gem_writel(bp, NCFGR, netcfg);
2542 }
2543
2544 return 0;
2545}
2546
2547static const struct net_device_ops macb_netdev_ops = {
2548 .ndo_open = macb_open,
2549 .ndo_stop = macb_close,
2550 .ndo_start_xmit = macb_start_xmit,
2551 .ndo_set_rx_mode = macb_set_rx_mode,
2552 .ndo_get_stats = macb_get_stats,
2553 .ndo_do_ioctl = macb_ioctl,
2554 .ndo_validate_addr = eth_validate_addr,
2555 .ndo_change_mtu = macb_change_mtu,
2556 .ndo_set_mac_address = eth_mac_addr,
2557#ifdef CONFIG_NET_POLL_CONTROLLER
2558 .ndo_poll_controller = macb_poll_controller,
2559#endif
2560 .ndo_set_features = macb_set_features,
2561 .ndo_features_check = macb_features_check,
2562};
2563
2564
2565
2566
2567static void macb_configure_caps(struct macb *bp,
2568 const struct macb_config *dt_conf)
2569{
2570 u32 dcfg;
2571
2572 if (dt_conf)
2573 bp->caps = dt_conf->caps;
2574
2575 if (hw_is_gem(bp->regs, bp->native_io)) {
2576 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2577
2578 dcfg = gem_readl(bp, DCFG1);
2579 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
2580 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
2581 dcfg = gem_readl(bp, DCFG2);
2582 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
2583 bp->caps |= MACB_CAPS_FIFO_MODE;
2584 }
2585
2586 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2587}
2588
2589static void macb_probe_queues(void __iomem *mem,
2590 bool native_io,
2591 unsigned int *queue_mask,
2592 unsigned int *num_queues)
2593{
2594 unsigned int hw_q;
2595
2596 *queue_mask = 0x1;
2597 *num_queues = 1;
2598
2599
2600
2601
2602
2603
2604
2605 if (!hw_is_gem(mem, native_io))
2606 return;
2607
2608
2609 *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
2610
2611 *queue_mask |= 0x1;
2612
2613 for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
2614 if (*queue_mask & (1 << hw_q))
2615 (*num_queues)++;
2616}
2617
2618static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
2619 struct clk **hclk, struct clk **tx_clk,
2620 struct clk **rx_clk)
2621{
2622 struct macb_platform_data *pdata;
2623 int err;
2624
2625 pdata = dev_get_platdata(&pdev->dev);
2626 if (pdata) {
2627 *pclk = pdata->pclk;
2628 *hclk = pdata->hclk;
2629 } else {
2630 *pclk = devm_clk_get(&pdev->dev, "pclk");
2631 *hclk = devm_clk_get(&pdev->dev, "hclk");
2632 }
2633
2634 if (IS_ERR(*pclk)) {
2635 err = PTR_ERR(*pclk);
2636 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
2637 return err;
2638 }
2639
2640 if (IS_ERR(*hclk)) {
2641 err = PTR_ERR(*hclk);
2642 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
2643 return err;
2644 }
2645
2646 *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
2647 if (IS_ERR(*tx_clk))
2648 *tx_clk = NULL;
2649
2650 *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
2651 if (IS_ERR(*rx_clk))
2652 *rx_clk = NULL;
2653
2654 err = clk_prepare_enable(*pclk);
2655 if (err) {
2656 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
2657 return err;
2658 }
2659
2660 err = clk_prepare_enable(*hclk);
2661 if (err) {
2662 dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
2663 goto err_disable_pclk;
2664 }
2665
2666 err = clk_prepare_enable(*tx_clk);
2667 if (err) {
2668 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
2669 goto err_disable_hclk;
2670 }
2671
2672 err = clk_prepare_enable(*rx_clk);
2673 if (err) {
2674 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
2675 goto err_disable_txclk;
2676 }
2677
2678 return 0;
2679
2680err_disable_txclk:
2681 clk_disable_unprepare(*tx_clk);
2682
2683err_disable_hclk:
2684 clk_disable_unprepare(*hclk);
2685
2686err_disable_pclk:
2687 clk_disable_unprepare(*pclk);
2688
2689 return err;
2690}
2691
2692static int macb_init(struct platform_device *pdev)
2693{
2694 struct net_device *dev = platform_get_drvdata(pdev);
2695 unsigned int hw_q, q;
2696 struct macb *bp = netdev_priv(dev);
2697 struct macb_queue *queue;
2698 int err;
2699 u32 val;
2700
2701 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
2702 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
2703
2704
2705
2706
2707
2708 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
2709 if (!(bp->queue_mask & (1 << hw_q)))
2710 continue;
2711
2712 queue = &bp->queues[q];
2713 queue->bp = bp;
2714 if (hw_q) {
2715 queue->ISR = GEM_ISR(hw_q - 1);
2716 queue->IER = GEM_IER(hw_q - 1);
2717 queue->IDR = GEM_IDR(hw_q - 1);
2718 queue->IMR = GEM_IMR(hw_q - 1);
2719 queue->TBQP = GEM_TBQP(hw_q - 1);
2720#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2721 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2722 queue->TBQPH = GEM_TBQPH(hw_q - 1);
2723#endif
2724 } else {
2725
2726 queue->ISR = MACB_ISR;
2727 queue->IER = MACB_IER;
2728 queue->IDR = MACB_IDR;
2729 queue->IMR = MACB_IMR;
2730 queue->TBQP = MACB_TBQP;
2731#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
2732 if (bp->hw_dma_cap == HW_DMA_CAP_64B)
2733 queue->TBQPH = MACB_TBQPH;
2734#endif
2735 }
2736
2737
2738
2739
2740
2741
2742 queue->irq = platform_get_irq(pdev, q);
2743 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
2744 IRQF_SHARED, dev->name, queue);
2745 if (err) {
2746 dev_err(&pdev->dev,
2747 "Unable to request IRQ %d (error %d)\n",
2748 queue->irq, err);
2749 return err;
2750 }
2751
2752 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
2753 q++;
2754 }
2755
2756 dev->netdev_ops = &macb_netdev_ops;
2757 netif_napi_add(dev, &bp->napi, macb_poll, 64);
2758
2759
2760 if (macb_is_gem(bp)) {
2761 bp->max_tx_length = GEM_MAX_TX_LEN;
2762 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
2763 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
2764 bp->macbgem_ops.mog_init_rings = gem_init_rings;
2765 bp->macbgem_ops.mog_rx = gem_rx;
2766 dev->ethtool_ops = &gem_ethtool_ops;
2767 } else {
2768 bp->max_tx_length = MACB_MAX_TX_LEN;
2769 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
2770 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
2771 bp->macbgem_ops.mog_init_rings = macb_init_rings;
2772 bp->macbgem_ops.mog_rx = macb_rx;
2773 dev->ethtool_ops = &macb_ethtool_ops;
2774 }
2775
2776
2777 dev->hw_features = NETIF_F_SG;
2778
2779
2780 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
2781 dev->hw_features |= MACB_NETIF_LSO;
2782
2783
2784 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
2785 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
2786 if (bp->caps & MACB_CAPS_SG_DISABLED)
2787 dev->hw_features &= ~NETIF_F_SG;
2788 dev->features = dev->hw_features;
2789
2790 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
2791 val = 0;
2792 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
2793 val = GEM_BIT(RGMII);
2794 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
2795 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2796 val = MACB_BIT(RMII);
2797 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
2798 val = MACB_BIT(MII);
2799
2800 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
2801 val |= MACB_BIT(CLKEN);
2802
2803 macb_or_gem_writel(bp, USRIO, val);
2804 }
2805
2806
2807 val = macb_mdc_clk_div(bp);
2808 val |= macb_dbw(bp);
2809 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
2810 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
2811 macb_writel(bp, NCFGR, val);
2812
2813 return 0;
2814}
2815
2816#if defined(CONFIG_OF)
2817
2818#define AT91ETHER_MAX_RBUFF_SZ 0x600
2819
2820#define AT91ETHER_MAX_RX_DESCR 9
2821
2822
2823static int at91ether_start(struct net_device *dev)
2824{
2825 struct macb *lp = netdev_priv(dev);
2826 struct macb_dma_desc *desc;
2827 dma_addr_t addr;
2828 u32 ctl;
2829 int i;
2830
2831 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
2832 (AT91ETHER_MAX_RX_DESCR *
2833 macb_dma_desc_get_size(lp)),
2834 &lp->rx_ring_dma, GFP_KERNEL);
2835 if (!lp->rx_ring)
2836 return -ENOMEM;
2837
2838 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
2839 AT91ETHER_MAX_RX_DESCR *
2840 AT91ETHER_MAX_RBUFF_SZ,
2841 &lp->rx_buffers_dma, GFP_KERNEL);
2842 if (!lp->rx_buffers) {
2843 dma_free_coherent(&lp->pdev->dev,
2844 AT91ETHER_MAX_RX_DESCR *
2845 macb_dma_desc_get_size(lp),
2846 lp->rx_ring, lp->rx_ring_dma);
2847 lp->rx_ring = NULL;
2848 return -ENOMEM;
2849 }
2850
2851 addr = lp->rx_buffers_dma;
2852 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
2853 desc = macb_rx_desc(lp, i);
2854 macb_set_addr(lp, desc, addr);
2855 desc->ctrl = 0;
2856 addr += AT91ETHER_MAX_RBUFF_SZ;
2857 }
2858
2859
2860 desc->addr |= MACB_BIT(RX_WRAP);
2861
2862
2863 lp->rx_tail = 0;
2864
2865
2866 macb_writel(lp, RBQP, lp->rx_ring_dma);
2867
2868
2869 ctl = macb_readl(lp, NCR);
2870 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
2871
2872 return 0;
2873}
2874
2875
2876static int at91ether_open(struct net_device *dev)
2877{
2878 struct macb *lp = netdev_priv(dev);
2879 u32 ctl;
2880 int ret;
2881
2882
2883 ctl = macb_readl(lp, NCR);
2884 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2885
2886 macb_set_hwaddr(lp);
2887
2888 ret = at91ether_start(dev);
2889 if (ret)
2890 return ret;
2891
2892
2893 macb_writel(lp, IER, MACB_BIT(RCOMP) |
2894 MACB_BIT(RXUBR) |
2895 MACB_BIT(ISR_TUND) |
2896 MACB_BIT(ISR_RLE) |
2897 MACB_BIT(TCOMP) |
2898 MACB_BIT(ISR_ROVR) |
2899 MACB_BIT(HRESP));
2900
2901
2902 phy_start(dev->phydev);
2903
2904 netif_start_queue(dev);
2905
2906 return 0;
2907}
2908
2909
2910static int at91ether_close(struct net_device *dev)
2911{
2912 struct macb *lp = netdev_priv(dev);
2913 u32 ctl;
2914
2915
2916 ctl = macb_readl(lp, NCR);
2917 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
2918
2919
2920 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
2921 MACB_BIT(RXUBR) |
2922 MACB_BIT(ISR_TUND) |
2923 MACB_BIT(ISR_RLE) |
2924 MACB_BIT(TCOMP) |
2925 MACB_BIT(ISR_ROVR) |
2926 MACB_BIT(HRESP));
2927
2928 netif_stop_queue(dev);
2929
2930 dma_free_coherent(&lp->pdev->dev,
2931 AT91ETHER_MAX_RX_DESCR *
2932 macb_dma_desc_get_size(lp),
2933 lp->rx_ring, lp->rx_ring_dma);
2934 lp->rx_ring = NULL;
2935
2936 dma_free_coherent(&lp->pdev->dev,
2937 AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
2938 lp->rx_buffers, lp->rx_buffers_dma);
2939 lp->rx_buffers = NULL;
2940
2941 return 0;
2942}
2943
2944
2945static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
2946{
2947 struct macb *lp = netdev_priv(dev);
2948
2949 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
2950 netif_stop_queue(dev);
2951
2952
2953 lp->skb = skb;
2954 lp->skb_length = skb->len;
2955 lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
2956 DMA_TO_DEVICE);
2957 if (dma_mapping_error(NULL, lp->skb_physaddr)) {
2958 dev_kfree_skb_any(skb);
2959 dev->stats.tx_dropped++;
2960 netdev_err(dev, "%s: DMA mapping error\n", __func__);
2961 return NETDEV_TX_OK;
2962 }
2963
2964
2965 macb_writel(lp, TAR, lp->skb_physaddr);
2966
2967 macb_writel(lp, TCR, skb->len);
2968
2969 } else {
2970 netdev_err(dev, "%s called, but device is busy!\n", __func__);
2971 return NETDEV_TX_BUSY;
2972 }
2973
2974 return NETDEV_TX_OK;
2975}
2976
2977
2978
2979
2980static void at91ether_rx(struct net_device *dev)
2981{
2982 struct macb *lp = netdev_priv(dev);
2983 struct macb_dma_desc *desc;
2984 unsigned char *p_recv;
2985 struct sk_buff *skb;
2986 unsigned int pktlen;
2987
2988 desc = macb_rx_desc(lp, lp->rx_tail);
2989 while (desc->addr & MACB_BIT(RX_USED)) {
2990 p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
2991 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
2992 skb = netdev_alloc_skb(dev, pktlen + 2);
2993 if (skb) {
2994 skb_reserve(skb, 2);
2995 memcpy(skb_put(skb, pktlen), p_recv, pktlen);
2996
2997 skb->protocol = eth_type_trans(skb, dev);
2998 dev->stats.rx_packets++;
2999 dev->stats.rx_bytes += pktlen;
3000 netif_rx(skb);
3001 } else {
3002 dev->stats.rx_dropped++;
3003 }
3004
3005 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
3006 dev->stats.multicast++;
3007
3008
3009 desc->addr &= ~MACB_BIT(RX_USED);
3010
3011
3012 if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
3013 lp->rx_tail = 0;
3014 else
3015 lp->rx_tail++;
3016
3017 desc = macb_rx_desc(lp, lp->rx_tail);
3018 }
3019}
3020
3021
3022static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
3023{
3024 struct net_device *dev = dev_id;
3025 struct macb *lp = netdev_priv(dev);
3026 u32 intstatus, ctl;
3027
3028
3029
3030
3031 intstatus = macb_readl(lp, ISR);
3032
3033
3034 if (intstatus & MACB_BIT(RCOMP))
3035 at91ether_rx(dev);
3036
3037
3038 if (intstatus & MACB_BIT(TCOMP)) {
3039
3040 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
3041 dev->stats.tx_errors++;
3042
3043 if (lp->skb) {
3044 dev_kfree_skb_irq(lp->skb);
3045 lp->skb = NULL;
3046 dma_unmap_single(NULL, lp->skb_physaddr,
3047 lp->skb_length, DMA_TO_DEVICE);
3048 dev->stats.tx_packets++;
3049 dev->stats.tx_bytes += lp->skb_length;
3050 }
3051 netif_wake_queue(dev);
3052 }
3053
3054
3055 if (intstatus & MACB_BIT(RXUBR)) {
3056 ctl = macb_readl(lp, NCR);
3057 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
3058 wmb();
3059 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
3060 }
3061
3062 if (intstatus & MACB_BIT(ISR_ROVR))
3063 netdev_err(dev, "ROVR error\n");
3064
3065 return IRQ_HANDLED;
3066}
3067
3068#ifdef CONFIG_NET_POLL_CONTROLLER
3069static void at91ether_poll_controller(struct net_device *dev)
3070{
3071 unsigned long flags;
3072
3073 local_irq_save(flags);
3074 at91ether_interrupt(dev->irq, dev);
3075 local_irq_restore(flags);
3076}
3077#endif
3078
3079static const struct net_device_ops at91ether_netdev_ops = {
3080 .ndo_open = at91ether_open,
3081 .ndo_stop = at91ether_close,
3082 .ndo_start_xmit = at91ether_start_xmit,
3083 .ndo_get_stats = macb_get_stats,
3084 .ndo_set_rx_mode = macb_set_rx_mode,
3085 .ndo_set_mac_address = eth_mac_addr,
3086 .ndo_do_ioctl = macb_ioctl,
3087 .ndo_validate_addr = eth_validate_addr,
3088#ifdef CONFIG_NET_POLL_CONTROLLER
3089 .ndo_poll_controller = at91ether_poll_controller,
3090#endif
3091};
3092
3093static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
3094 struct clk **hclk, struct clk **tx_clk,
3095 struct clk **rx_clk)
3096{
3097 int err;
3098
3099 *hclk = NULL;
3100 *tx_clk = NULL;
3101 *rx_clk = NULL;
3102
3103 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
3104 if (IS_ERR(*pclk))
3105 return PTR_ERR(*pclk);
3106
3107 err = clk_prepare_enable(*pclk);
3108 if (err) {
3109 dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
3110 return err;
3111 }
3112
3113 return 0;
3114}
3115
3116static int at91ether_init(struct platform_device *pdev)
3117{
3118 struct net_device *dev = platform_get_drvdata(pdev);
3119 struct macb *bp = netdev_priv(dev);
3120 int err;
3121 u32 reg;
3122
3123 dev->netdev_ops = &at91ether_netdev_ops;
3124 dev->ethtool_ops = &macb_ethtool_ops;
3125
3126 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
3127 0, dev->name, dev);
3128 if (err)
3129 return err;
3130
3131 macb_writel(bp, NCR, 0);
3132
3133 reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
3134 if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
3135 reg |= MACB_BIT(RM9200_RMII);
3136
3137 macb_writel(bp, NCFGR, reg);
3138
3139 return 0;
3140}
3141
3142static const struct macb_config at91sam9260_config = {
3143 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3144 .clk_init = macb_clk_init,
3145 .init = macb_init,
3146};
3147
3148static const struct macb_config pc302gem_config = {
3149 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3150 .dma_burst_length = 16,
3151 .clk_init = macb_clk_init,
3152 .init = macb_init,
3153};
3154
3155static const struct macb_config sama5d2_config = {
3156 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3157 .dma_burst_length = 16,
3158 .clk_init = macb_clk_init,
3159 .init = macb_init,
3160};
3161
3162static const struct macb_config sama5d3_config = {
3163 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
3164 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3165 .dma_burst_length = 16,
3166 .clk_init = macb_clk_init,
3167 .init = macb_init,
3168};
3169
3170static const struct macb_config sama5d4_config = {
3171 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3172 .dma_burst_length = 4,
3173 .clk_init = macb_clk_init,
3174 .init = macb_init,
3175};
3176
3177static const struct macb_config emac_config = {
3178 .clk_init = at91ether_clk_init,
3179 .init = at91ether_init,
3180};
3181
3182static const struct macb_config np4_config = {
3183 .caps = MACB_CAPS_USRIO_DISABLED,
3184 .clk_init = macb_clk_init,
3185 .init = macb_init,
3186};
3187
3188static const struct macb_config zynqmp_config = {
3189 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
3190 .dma_burst_length = 16,
3191 .clk_init = macb_clk_init,
3192 .init = macb_init,
3193 .jumbo_max_len = 10240,
3194};
3195
3196static const struct macb_config zynq_config = {
3197 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
3198 .dma_burst_length = 16,
3199 .clk_init = macb_clk_init,
3200 .init = macb_init,
3201};
3202
3203static const struct of_device_id macb_dt_ids[] = {
3204 { .compatible = "cdns,at32ap7000-macb" },
3205 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
3206 { .compatible = "cdns,macb" },
3207 { .compatible = "cdns,np4-macb", .data = &np4_config },
3208 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
3209 { .compatible = "cdns,gem", .data = &pc302gem_config },
3210 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3211 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3212 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3213 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3214 { .compatible = "cdns,emac", .data = &emac_config },
3215 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
3216 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
3217 { }
3218};
3219MODULE_DEVICE_TABLE(of, macb_dt_ids);
3220#endif
3221
3222static const struct macb_config default_gem_config = {
3223 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO,
3224 .dma_burst_length = 16,
3225 .clk_init = macb_clk_init,
3226 .init = macb_init,
3227 .jumbo_max_len = 10240,
3228};
3229
3230static int macb_probe(struct platform_device *pdev)
3231{
3232 const struct macb_config *macb_config = &default_gem_config;
3233 int (*clk_init)(struct platform_device *, struct clk **,
3234 struct clk **, struct clk **, struct clk **)
3235 = macb_config->clk_init;
3236 int (*init)(struct platform_device *) = macb_config->init;
3237 struct device_node *np = pdev->dev.of_node;
3238 struct device_node *phy_node;
3239 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
3240 unsigned int queue_mask, num_queues;
3241 struct macb_platform_data *pdata;
3242 bool native_io;
3243 struct phy_device *phydev;
3244 struct net_device *dev;
3245 struct resource *regs;
3246 void __iomem *mem;
3247 const char *mac;
3248 struct macb *bp;
3249 int err;
3250
3251 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3252 mem = devm_ioremap_resource(&pdev->dev, regs);
3253 if (IS_ERR(mem))
3254 return PTR_ERR(mem);
3255
3256 if (np) {
3257 const struct of_device_id *match;
3258
3259 match = of_match_node(macb_dt_ids, np);
3260 if (match && match->data) {
3261 macb_config = match->data;
3262 clk_init = macb_config->clk_init;
3263 init = macb_config->init;
3264 }
3265 }
3266
3267 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk);
3268 if (err)
3269 return err;
3270
3271 native_io = hw_is_native_io(mem);
3272
3273 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
3274 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
3275 if (!dev) {
3276 err = -ENOMEM;
3277 goto err_disable_clocks;
3278 }
3279
3280 dev->base_addr = regs->start;
3281
3282 SET_NETDEV_DEV(dev, &pdev->dev);
3283
3284 bp = netdev_priv(dev);
3285 bp->pdev = pdev;
3286 bp->dev = dev;
3287 bp->regs = mem;
3288 bp->native_io = native_io;
3289 if (native_io) {
3290 bp->macb_reg_readl = hw_readl_native;
3291 bp->macb_reg_writel = hw_writel_native;
3292 } else {
3293 bp->macb_reg_readl = hw_readl;
3294 bp->macb_reg_writel = hw_writel;
3295 }
3296 bp->num_queues = num_queues;
3297 bp->queue_mask = queue_mask;
3298 if (macb_config)
3299 bp->dma_burst_length = macb_config->dma_burst_length;
3300 bp->pclk = pclk;
3301 bp->hclk = hclk;
3302 bp->tx_clk = tx_clk;
3303 bp->rx_clk = rx_clk;
3304 if (macb_config)
3305 bp->jumbo_max_len = macb_config->jumbo_max_len;
3306
3307 bp->wol = 0;
3308 if (of_get_property(np, "magic-packet", NULL))
3309 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
3310 device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
3311
3312#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3313 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
3314 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
3315 bp->hw_dma_cap = HW_DMA_CAP_64B;
3316 } else
3317 bp->hw_dma_cap = HW_DMA_CAP_32B;
3318#endif
3319
3320 spin_lock_init(&bp->lock);
3321
3322
3323 macb_configure_caps(bp, macb_config);
3324
3325 platform_set_drvdata(pdev, dev);
3326
3327 dev->irq = platform_get_irq(pdev, 0);
3328 if (dev->irq < 0) {
3329 err = dev->irq;
3330 goto err_out_free_netdev;
3331 }
3332
3333
3334 dev->min_mtu = GEM_MTU_MIN_SIZE;
3335 if (bp->caps & MACB_CAPS_JUMBO)
3336 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
3337 else
3338 dev->max_mtu = ETH_DATA_LEN;
3339
3340 mac = of_get_mac_address(np);
3341 if (mac)
3342 ether_addr_copy(bp->dev->dev_addr, mac);
3343 else
3344 macb_get_hwaddr(bp);
3345
3346
3347 phy_node = of_get_next_available_child(np, NULL);
3348 if (phy_node) {
3349 int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
3350
3351 if (gpio_is_valid(gpio)) {
3352 bp->reset_gpio = gpio_to_desc(gpio);
3353 gpiod_direction_output(bp->reset_gpio, 1);
3354 }
3355 }
3356 of_node_put(phy_node);
3357
3358 err = of_get_phy_mode(np);
3359 if (err < 0) {
3360 pdata = dev_get_platdata(&pdev->dev);
3361 if (pdata && pdata->is_rmii)
3362 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
3363 else
3364 bp->phy_interface = PHY_INTERFACE_MODE_MII;
3365 } else {
3366 bp->phy_interface = err;
3367 }
3368
3369
3370 err = init(pdev);
3371 if (err)
3372 goto err_out_free_netdev;
3373
3374 err = macb_mii_init(bp);
3375 if (err)
3376 goto err_out_free_netdev;
3377
3378 phydev = dev->phydev;
3379
3380 netif_carrier_off(dev);
3381
3382 err = register_netdev(dev);
3383 if (err) {
3384 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
3385 goto err_out_unregister_mdio;
3386 }
3387
3388 phy_attached_info(phydev);
3389
3390 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
3391 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
3392 dev->base_addr, dev->irq, dev->dev_addr);
3393
3394 return 0;
3395
3396err_out_unregister_mdio:
3397 phy_disconnect(dev->phydev);
3398 mdiobus_unregister(bp->mii_bus);
3399 mdiobus_free(bp->mii_bus);
3400
3401
3402 if (bp->reset_gpio)
3403 gpiod_set_value(bp->reset_gpio, 0);
3404
3405err_out_free_netdev:
3406 free_netdev(dev);
3407
3408err_disable_clocks:
3409 clk_disable_unprepare(tx_clk);
3410 clk_disable_unprepare(hclk);
3411 clk_disable_unprepare(pclk);
3412 clk_disable_unprepare(rx_clk);
3413
3414 return err;
3415}
3416
3417static int macb_remove(struct platform_device *pdev)
3418{
3419 struct net_device *dev;
3420 struct macb *bp;
3421
3422 dev = platform_get_drvdata(pdev);
3423
3424 if (dev) {
3425 bp = netdev_priv(dev);
3426 if (dev->phydev)
3427 phy_disconnect(dev->phydev);
3428 mdiobus_unregister(bp->mii_bus);
3429 dev->phydev = NULL;
3430 mdiobus_free(bp->mii_bus);
3431
3432
3433 if (bp->reset_gpio)
3434 gpiod_set_value(bp->reset_gpio, 0);
3435
3436 unregister_netdev(dev);
3437 clk_disable_unprepare(bp->tx_clk);
3438 clk_disable_unprepare(bp->hclk);
3439 clk_disable_unprepare(bp->pclk);
3440 clk_disable_unprepare(bp->rx_clk);
3441 free_netdev(dev);
3442 }
3443
3444 return 0;
3445}
3446
3447static int __maybe_unused macb_suspend(struct device *dev)
3448{
3449 struct platform_device *pdev = to_platform_device(dev);
3450 struct net_device *netdev = platform_get_drvdata(pdev);
3451 struct macb *bp = netdev_priv(netdev);
3452
3453 netif_carrier_off(netdev);
3454 netif_device_detach(netdev);
3455
3456 if (bp->wol & MACB_WOL_ENABLED) {
3457 macb_writel(bp, IER, MACB_BIT(WOL));
3458 macb_writel(bp, WOL, MACB_BIT(MAG));
3459 enable_irq_wake(bp->queues[0].irq);
3460 } else {
3461 clk_disable_unprepare(bp->tx_clk);
3462 clk_disable_unprepare(bp->hclk);
3463 clk_disable_unprepare(bp->pclk);
3464 clk_disable_unprepare(bp->rx_clk);
3465 }
3466
3467 return 0;
3468}
3469
3470static int __maybe_unused macb_resume(struct device *dev)
3471{
3472 struct platform_device *pdev = to_platform_device(dev);
3473 struct net_device *netdev = platform_get_drvdata(pdev);
3474 struct macb *bp = netdev_priv(netdev);
3475
3476 if (bp->wol & MACB_WOL_ENABLED) {
3477 macb_writel(bp, IDR, MACB_BIT(WOL));
3478 macb_writel(bp, WOL, 0);
3479 disable_irq_wake(bp->queues[0].irq);
3480 } else {
3481 clk_prepare_enable(bp->pclk);
3482 clk_prepare_enable(bp->hclk);
3483 clk_prepare_enable(bp->tx_clk);
3484 clk_prepare_enable(bp->rx_clk);
3485 }
3486
3487 netif_device_attach(netdev);
3488
3489 return 0;
3490}
3491
3492static SIMPLE_DEV_PM_OPS(macb_pm_ops, macb_suspend, macb_resume);
3493
3494static struct platform_driver macb_driver = {
3495 .probe = macb_probe,
3496 .remove = macb_remove,
3497 .driver = {
3498 .name = "macb",
3499 .of_match_table = of_match_ptr(macb_dt_ids),
3500 .pm = &macb_pm_ops,
3501 },
3502};
3503
3504module_platform_driver(macb_driver);
3505
3506MODULE_LICENSE("GPL");
3507MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
3508MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
3509MODULE_ALIAS("platform:macb");
3510