1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/circ_buf.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/gpio.h>
21#include <linux/interrupt.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/dma-mapping.h>
25#include <linux/platform_data/macb.h>
26#include <linux/platform_device.h>
27#include <linux/phy.h>
28#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
32#include <linux/pinctrl/consumer.h>
33
34#include "macb.h"
35
36#define MACB_RX_BUFFER_SIZE 128
37#define RX_BUFFER_MULTIPLE 64
38#define RX_RING_SIZE 512
39#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
40
41#define TX_RING_SIZE 128
42#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
43
44
45#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
46
47#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
48 | MACB_BIT(ISR_ROVR))
49#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
50 | MACB_BIT(ISR_RLE) \
51 | MACB_BIT(TXERR))
52#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
53
54
55
56
57
58#define MACB_HALT_TIMEOUT 1230
59
60
61static unsigned int macb_tx_ring_wrap(unsigned int index)
62{
63 return index & (TX_RING_SIZE - 1);
64}
65
66static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
67{
68 return &bp->tx_ring[macb_tx_ring_wrap(index)];
69}
70
71static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
72{
73 return &bp->tx_skb[macb_tx_ring_wrap(index)];
74}
75
76static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
77{
78 dma_addr_t offset;
79
80 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
81
82 return bp->tx_ring_dma + offset;
83}
84
85static unsigned int macb_rx_ring_wrap(unsigned int index)
86{
87 return index & (RX_RING_SIZE - 1);
88}
89
90static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
91{
92 return &bp->rx_ring[macb_rx_ring_wrap(index)];
93}
94
95static void *macb_rx_buffer(struct macb *bp, unsigned int index)
96{
97 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
98}
99
100void macb_set_hwaddr(struct macb *bp)
101{
102 u32 bottom;
103 u16 top;
104
105 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
106 macb_or_gem_writel(bp, SA1B, bottom);
107 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
108 macb_or_gem_writel(bp, SA1T, top);
109
110
111 macb_or_gem_writel(bp, SA2B, 0);
112 macb_or_gem_writel(bp, SA2T, 0);
113 macb_or_gem_writel(bp, SA3B, 0);
114 macb_or_gem_writel(bp, SA3T, 0);
115 macb_or_gem_writel(bp, SA4B, 0);
116 macb_or_gem_writel(bp, SA4T, 0);
117}
118EXPORT_SYMBOL_GPL(macb_set_hwaddr);
119
120void macb_get_hwaddr(struct macb *bp)
121{
122 struct macb_platform_data *pdata;
123 u32 bottom;
124 u16 top;
125 u8 addr[6];
126 int i;
127
128 pdata = dev_get_platdata(&bp->pdev->dev);
129
130
131 for (i = 0; i < 4; i++) {
132 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
133 top = macb_or_gem_readl(bp, SA1T + i * 8);
134
135 if (pdata && pdata->rev_eth_addr) {
136 addr[5] = bottom & 0xff;
137 addr[4] = (bottom >> 8) & 0xff;
138 addr[3] = (bottom >> 16) & 0xff;
139 addr[2] = (bottom >> 24) & 0xff;
140 addr[1] = top & 0xff;
141 addr[0] = (top & 0xff00) >> 8;
142 } else {
143 addr[0] = bottom & 0xff;
144 addr[1] = (bottom >> 8) & 0xff;
145 addr[2] = (bottom >> 16) & 0xff;
146 addr[3] = (bottom >> 24) & 0xff;
147 addr[4] = top & 0xff;
148 addr[5] = (top >> 8) & 0xff;
149 }
150
151 if (is_valid_ether_addr(addr)) {
152 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
153 return;
154 }
155 }
156
157 netdev_info(bp->dev, "invalid hw address, using random\n");
158 eth_hw_addr_random(bp->dev);
159}
160EXPORT_SYMBOL_GPL(macb_get_hwaddr);
161
162static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
163{
164 struct macb *bp = bus->priv;
165 int value;
166
167 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
168 | MACB_BF(RW, MACB_MAN_READ)
169 | MACB_BF(PHYA, mii_id)
170 | MACB_BF(REGA, regnum)
171 | MACB_BF(CODE, MACB_MAN_CODE)));
172
173
174 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
175 cpu_relax();
176
177 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
178
179 return value;
180}
181
182static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
183 u16 value)
184{
185 struct macb *bp = bus->priv;
186
187 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
188 | MACB_BF(RW, MACB_MAN_WRITE)
189 | MACB_BF(PHYA, mii_id)
190 | MACB_BF(REGA, regnum)
191 | MACB_BF(CODE, MACB_MAN_CODE)
192 | MACB_BF(DATA, value)));
193
194
195 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
196 cpu_relax();
197
198 return 0;
199}
200
201static int macb_mdio_reset(struct mii_bus *bus)
202{
203 return 0;
204}
205
206static void macb_handle_link_change(struct net_device *dev)
207{
208 struct macb *bp = netdev_priv(dev);
209 struct phy_device *phydev = bp->phy_dev;
210 unsigned long flags;
211
212 int status_change = 0;
213
214 spin_lock_irqsave(&bp->lock, flags);
215
216 if (phydev->link) {
217 if ((bp->speed != phydev->speed) ||
218 (bp->duplex != phydev->duplex)) {
219 u32 reg;
220
221 reg = macb_readl(bp, NCFGR);
222 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
223 if (macb_is_gem(bp))
224 reg &= ~GEM_BIT(GBE);
225
226 if (phydev->duplex)
227 reg |= MACB_BIT(FD);
228 if (phydev->speed == SPEED_100)
229 reg |= MACB_BIT(SPD);
230 if (phydev->speed == SPEED_1000)
231 reg |= GEM_BIT(GBE);
232
233 macb_or_gem_writel(bp, NCFGR, reg);
234
235 bp->speed = phydev->speed;
236 bp->duplex = phydev->duplex;
237 status_change = 1;
238 }
239 }
240
241 if (phydev->link != bp->link) {
242 if (!phydev->link) {
243 bp->speed = 0;
244 bp->duplex = -1;
245 }
246 bp->link = phydev->link;
247
248 status_change = 1;
249 }
250
251 spin_unlock_irqrestore(&bp->lock, flags);
252
253 if (status_change) {
254 if (phydev->link) {
255 netif_carrier_on(dev);
256 netdev_info(dev, "link up (%d/%s)\n",
257 phydev->speed,
258 phydev->duplex == DUPLEX_FULL ?
259 "Full" : "Half");
260 } else {
261 netif_carrier_off(dev);
262 netdev_info(dev, "link down\n");
263 }
264 }
265}
266
267
268static int macb_mii_probe(struct net_device *dev)
269{
270 struct macb *bp = netdev_priv(dev);
271 struct macb_platform_data *pdata;
272 struct phy_device *phydev;
273 int phy_irq;
274 int ret;
275
276 phydev = phy_find_first(bp->mii_bus);
277 if (!phydev) {
278 netdev_err(dev, "no PHY found\n");
279 return -ENXIO;
280 }
281
282 pdata = dev_get_platdata(&bp->pdev->dev);
283 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
284 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
285 if (!ret) {
286 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
287 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
288 }
289 }
290
291
292 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
293 bp->phy_interface);
294 if (ret) {
295 netdev_err(dev, "Could not attach to PHY\n");
296 return ret;
297 }
298
299
300 if (macb_is_gem(bp))
301 phydev->supported &= PHY_GBIT_FEATURES;
302 else
303 phydev->supported &= PHY_BASIC_FEATURES;
304
305 phydev->advertising = phydev->supported;
306
307 bp->link = 0;
308 bp->speed = 0;
309 bp->duplex = -1;
310 bp->phy_dev = phydev;
311
312 return 0;
313}
314
315int macb_mii_init(struct macb *bp)
316{
317 struct macb_platform_data *pdata;
318 struct device_node *np;
319 int err = -ENXIO, i;
320
321
322 macb_writel(bp, NCR, MACB_BIT(MPE));
323
324 bp->mii_bus = mdiobus_alloc();
325 if (bp->mii_bus == NULL) {
326 err = -ENOMEM;
327 goto err_out;
328 }
329
330 bp->mii_bus->name = "MACB_mii_bus";
331 bp->mii_bus->read = &macb_mdio_read;
332 bp->mii_bus->write = &macb_mdio_write;
333 bp->mii_bus->reset = &macb_mdio_reset;
334 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
335 bp->pdev->name, bp->pdev->id);
336 bp->mii_bus->priv = bp;
337 bp->mii_bus->parent = &bp->dev->dev;
338 pdata = dev_get_platdata(&bp->pdev->dev);
339
340 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
341 if (!bp->mii_bus->irq) {
342 err = -ENOMEM;
343 goto err_out_free_mdiobus;
344 }
345
346 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
347
348 np = bp->pdev->dev.of_node;
349 if (np) {
350
351 err = of_mdiobus_register(bp->mii_bus, np);
352
353
354
355 if (!err && !phy_find_first(bp->mii_bus)) {
356 for (i = 0; i < PHY_MAX_ADDR; i++) {
357 struct phy_device *phydev;
358
359 phydev = mdiobus_scan(bp->mii_bus, i);
360 if (IS_ERR(phydev)) {
361 err = PTR_ERR(phydev);
362 break;
363 }
364 }
365
366 if (err)
367 goto err_out_unregister_bus;
368 }
369 } else {
370 for (i = 0; i < PHY_MAX_ADDR; i++)
371 bp->mii_bus->irq[i] = PHY_POLL;
372
373 if (pdata)
374 bp->mii_bus->phy_mask = pdata->phy_mask;
375
376 err = mdiobus_register(bp->mii_bus);
377 }
378
379 if (err)
380 goto err_out_free_mdio_irq;
381
382 err = macb_mii_probe(bp->dev);
383 if (err)
384 goto err_out_unregister_bus;
385
386 return 0;
387
388err_out_unregister_bus:
389 mdiobus_unregister(bp->mii_bus);
390err_out_free_mdio_irq:
391 kfree(bp->mii_bus->irq);
392err_out_free_mdiobus:
393 mdiobus_free(bp->mii_bus);
394err_out:
395 return err;
396}
397EXPORT_SYMBOL_GPL(macb_mii_init);
398
399static void macb_update_stats(struct macb *bp)
400{
401 u32 __iomem *reg = bp->regs + MACB_PFR;
402 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
403 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
404
405 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
406
407 for(; p < end; p++, reg++)
408 *p += __raw_readl(reg);
409}
410
411static int macb_halt_tx(struct macb *bp)
412{
413 unsigned long halt_time, timeout;
414 u32 status;
415
416 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
417
418 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
419 do {
420 halt_time = jiffies;
421 status = macb_readl(bp, TSR);
422 if (!(status & MACB_BIT(TGO)))
423 return 0;
424
425 usleep_range(10, 250);
426 } while (time_before(halt_time, timeout));
427
428 return -ETIMEDOUT;
429}
430
431static void macb_tx_error_task(struct work_struct *work)
432{
433 struct macb *bp = container_of(work, struct macb, tx_error_task);
434 struct macb_tx_skb *tx_skb;
435 struct sk_buff *skb;
436 unsigned int tail;
437
438 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
439 bp->tx_tail, bp->tx_head);
440
441
442 netif_stop_queue(bp->dev);
443
444
445
446
447
448 if (macb_halt_tx(bp))
449
450 netdev_err(bp->dev, "BUG: halt tx timed out\n");
451
452
453
454
455
456
457
458 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
459 struct macb_dma_desc *desc;
460 u32 ctrl;
461
462 desc = macb_tx_desc(bp, tail);
463 ctrl = desc->ctrl;
464 tx_skb = macb_tx_skb(bp, tail);
465 skb = tx_skb->skb;
466
467 if (ctrl & MACB_BIT(TX_USED)) {
468 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
469 macb_tx_ring_wrap(tail), skb->data);
470 bp->stats.tx_packets++;
471 bp->stats.tx_bytes += skb->len;
472 } else {
473
474
475
476
477
478 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
479 netdev_err(bp->dev,
480 "BUG: TX buffers exhausted mid-frame\n");
481
482 desc->ctrl = ctrl | MACB_BIT(TX_USED);
483 }
484
485 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
486 DMA_TO_DEVICE);
487 tx_skb->skb = NULL;
488 dev_kfree_skb(skb);
489 }
490
491
492 wmb();
493
494
495 macb_writel(bp, TBQP, bp->tx_ring_dma);
496
497 bp->tx_head = bp->tx_tail = 0;
498
499
500 netif_wake_queue(bp->dev);
501
502
503 macb_writel(bp, TSR, macb_readl(bp, TSR));
504 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
505}
506
507static void macb_tx_interrupt(struct macb *bp)
508{
509 unsigned int tail;
510 unsigned int head;
511 u32 status;
512
513 status = macb_readl(bp, TSR);
514 macb_writel(bp, TSR, status);
515
516 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
517 macb_writel(bp, ISR, MACB_BIT(TCOMP));
518
519 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
520 (unsigned long)status);
521
522 head = bp->tx_head;
523 for (tail = bp->tx_tail; tail != head; tail++) {
524 struct macb_tx_skb *tx_skb;
525 struct sk_buff *skb;
526 struct macb_dma_desc *desc;
527 u32 ctrl;
528
529 desc = macb_tx_desc(bp, tail);
530
531
532 rmb();
533
534 ctrl = desc->ctrl;
535
536 if (!(ctrl & MACB_BIT(TX_USED)))
537 break;
538
539 tx_skb = macb_tx_skb(bp, tail);
540 skb = tx_skb->skb;
541
542 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
543 macb_tx_ring_wrap(tail), skb->data);
544 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
545 DMA_TO_DEVICE);
546 bp->stats.tx_packets++;
547 bp->stats.tx_bytes += skb->len;
548 tx_skb->skb = NULL;
549 dev_kfree_skb_irq(skb);
550 }
551
552 bp->tx_tail = tail;
553 if (netif_queue_stopped(bp->dev)
554 && CIRC_CNT(bp->tx_head, bp->tx_tail,
555 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
556 netif_wake_queue(bp->dev);
557}
558
559static void gem_rx_refill(struct macb *bp)
560{
561 unsigned int entry;
562 struct sk_buff *skb;
563 struct macb_dma_desc *desc;
564 dma_addr_t paddr;
565
566 while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
567 u32 addr, ctrl;
568
569 entry = macb_rx_ring_wrap(bp->rx_prepared_head);
570 desc = &bp->rx_ring[entry];
571
572
573 rmb();
574
575 addr = desc->addr;
576 ctrl = desc->ctrl;
577 bp->rx_prepared_head++;
578
579 if ((addr & MACB_BIT(RX_USED)))
580 continue;
581
582 if (bp->rx_skbuff[entry] == NULL) {
583
584 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
585 if (unlikely(skb == NULL)) {
586 netdev_err(bp->dev,
587 "Unable to allocate sk_buff\n");
588 break;
589 }
590 bp->rx_skbuff[entry] = skb;
591
592
593 paddr = dma_map_single(&bp->pdev->dev, skb->data,
594 bp->rx_buffer_size, DMA_FROM_DEVICE);
595
596 if (entry == RX_RING_SIZE - 1)
597 paddr |= MACB_BIT(RX_WRAP);
598 bp->rx_ring[entry].addr = paddr;
599 bp->rx_ring[entry].ctrl = 0;
600
601
602 skb_reserve(skb, NET_IP_ALIGN);
603 }
604 }
605
606
607 wmb();
608
609 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
610 bp->rx_prepared_head, bp->rx_tail);
611}
612
613
614static void discard_partial_frame(struct macb *bp, unsigned int begin,
615 unsigned int end)
616{
617 unsigned int frag;
618
619 for (frag = begin; frag != end; frag++) {
620 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
621 desc->addr &= ~MACB_BIT(RX_USED);
622 }
623
624
625 wmb();
626
627
628
629
630
631
632}
633
634static int gem_rx(struct macb *bp, int budget)
635{
636 unsigned int len;
637 unsigned int entry;
638 struct sk_buff *skb;
639 struct macb_dma_desc *desc;
640 int count = 0;
641
642 while (count < budget) {
643 u32 addr, ctrl;
644
645 entry = macb_rx_ring_wrap(bp->rx_tail);
646 desc = &bp->rx_ring[entry];
647
648
649 rmb();
650
651 addr = desc->addr;
652 ctrl = desc->ctrl;
653
654 if (!(addr & MACB_BIT(RX_USED)))
655 break;
656
657 desc->addr &= ~MACB_BIT(RX_USED);
658 bp->rx_tail++;
659 count++;
660
661 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
662 netdev_err(bp->dev,
663 "not whole frame pointed by descriptor\n");
664 bp->stats.rx_dropped++;
665 break;
666 }
667 skb = bp->rx_skbuff[entry];
668 if (unlikely(!skb)) {
669 netdev_err(bp->dev,
670 "inconsistent Rx descriptor chain\n");
671 bp->stats.rx_dropped++;
672 break;
673 }
674
675 bp->rx_skbuff[entry] = NULL;
676 len = MACB_BFEXT(RX_FRMLEN, ctrl);
677
678 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
679
680 skb_put(skb, len);
681 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
682 dma_unmap_single(&bp->pdev->dev, addr,
683 len, DMA_FROM_DEVICE);
684
685 skb->protocol = eth_type_trans(skb, bp->dev);
686 skb_checksum_none_assert(skb);
687
688 bp->stats.rx_packets++;
689 bp->stats.rx_bytes += skb->len;
690
691#if defined(DEBUG) && defined(VERBOSE_DEBUG)
692 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
693 skb->len, skb->csum);
694 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
695 skb->mac_header, 16, true);
696 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
697 skb->data, 32, true);
698#endif
699
700 netif_receive_skb(skb);
701 }
702
703 gem_rx_refill(bp);
704
705 return count;
706}
707
708static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
709 unsigned int last_frag)
710{
711 unsigned int len;
712 unsigned int frag;
713 unsigned int offset;
714 struct sk_buff *skb;
715 struct macb_dma_desc *desc;
716
717 desc = macb_rx_desc(bp, last_frag);
718 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
719
720 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
721 macb_rx_ring_wrap(first_frag),
722 macb_rx_ring_wrap(last_frag), len);
723
724
725
726
727
728
729
730
731
732
733 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
734 if (!skb) {
735 bp->stats.rx_dropped++;
736 for (frag = first_frag; ; frag++) {
737 desc = macb_rx_desc(bp, frag);
738 desc->addr &= ~MACB_BIT(RX_USED);
739 if (frag == last_frag)
740 break;
741 }
742
743
744 wmb();
745
746 return 1;
747 }
748
749 offset = 0;
750 len += NET_IP_ALIGN;
751 skb_checksum_none_assert(skb);
752 skb_put(skb, len);
753
754 for (frag = first_frag; ; frag++) {
755 unsigned int frag_len = bp->rx_buffer_size;
756
757 if (offset + frag_len > len) {
758 BUG_ON(frag != last_frag);
759 frag_len = len - offset;
760 }
761 skb_copy_to_linear_data_offset(skb, offset,
762 macb_rx_buffer(bp, frag), frag_len);
763 offset += bp->rx_buffer_size;
764 desc = macb_rx_desc(bp, frag);
765 desc->addr &= ~MACB_BIT(RX_USED);
766
767 if (frag == last_frag)
768 break;
769 }
770
771
772 wmb();
773
774 __skb_pull(skb, NET_IP_ALIGN);
775 skb->protocol = eth_type_trans(skb, bp->dev);
776
777 bp->stats.rx_packets++;
778 bp->stats.rx_bytes += skb->len;
779 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
780 skb->len, skb->csum);
781 netif_receive_skb(skb);
782
783 return 0;
784}
785
786static int macb_rx(struct macb *bp, int budget)
787{
788 int received = 0;
789 unsigned int tail;
790 int first_frag = -1;
791
792 for (tail = bp->rx_tail; budget > 0; tail++) {
793 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
794 u32 addr, ctrl;
795
796
797 rmb();
798
799 addr = desc->addr;
800 ctrl = desc->ctrl;
801
802 if (!(addr & MACB_BIT(RX_USED)))
803 break;
804
805 if (ctrl & MACB_BIT(RX_SOF)) {
806 if (first_frag != -1)
807 discard_partial_frame(bp, first_frag, tail);
808 first_frag = tail;
809 }
810
811 if (ctrl & MACB_BIT(RX_EOF)) {
812 int dropped;
813 BUG_ON(first_frag == -1);
814
815 dropped = macb_rx_frame(bp, first_frag, tail);
816 first_frag = -1;
817 if (!dropped) {
818 received++;
819 budget--;
820 }
821 }
822 }
823
824 if (first_frag != -1)
825 bp->rx_tail = first_frag;
826 else
827 bp->rx_tail = tail;
828
829 return received;
830}
831
832static int macb_poll(struct napi_struct *napi, int budget)
833{
834 struct macb *bp = container_of(napi, struct macb, napi);
835 int work_done;
836 u32 status;
837
838 status = macb_readl(bp, RSR);
839 macb_writel(bp, RSR, status);
840
841 work_done = 0;
842
843 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
844 (unsigned long)status, budget);
845
846 work_done = bp->macbgem_ops.mog_rx(bp, budget);
847 if (work_done < budget) {
848 napi_complete(napi);
849
850
851
852
853
854 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
855
856
857 status = macb_readl(bp, RSR);
858 if (unlikely(status))
859 napi_reschedule(napi);
860 }
861
862
863
864 return work_done;
865}
866
867static irqreturn_t macb_interrupt(int irq, void *dev_id)
868{
869 struct net_device *dev = dev_id;
870 struct macb *bp = netdev_priv(dev);
871 u32 status;
872
873 status = macb_readl(bp, ISR);
874
875 if (unlikely(!status))
876 return IRQ_NONE;
877
878 spin_lock(&bp->lock);
879
880 while (status) {
881
882 if (unlikely(!netif_running(dev))) {
883 macb_writel(bp, IDR, -1);
884 break;
885 }
886
887 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
888
889 if (status & MACB_RX_INT_FLAGS) {
890
891
892
893
894
895
896
897 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
898 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
899 macb_writel(bp, ISR, MACB_BIT(RCOMP));
900
901 if (napi_schedule_prep(&bp->napi)) {
902 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
903 __napi_schedule(&bp->napi);
904 }
905 }
906
907 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
908 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
909 schedule_work(&bp->tx_error_task);
910 break;
911 }
912
913 if (status & MACB_BIT(TCOMP))
914 macb_tx_interrupt(bp);
915
916
917
918
919
920
921 if (status & MACB_BIT(ISR_ROVR)) {
922
923 if (macb_is_gem(bp))
924 bp->hw_stats.gem.rx_overruns++;
925 else
926 bp->hw_stats.macb.rx_overruns++;
927 }
928
929 if (status & MACB_BIT(HRESP)) {
930
931
932
933
934
935 netdev_err(dev, "DMA bus error: HRESP not OK\n");
936 }
937
938 status = macb_readl(bp, ISR);
939 }
940
941 spin_unlock(&bp->lock);
942
943 return IRQ_HANDLED;
944}
945
946#ifdef CONFIG_NET_POLL_CONTROLLER
947
948
949
950
951static void macb_poll_controller(struct net_device *dev)
952{
953 unsigned long flags;
954
955 local_irq_save(flags);
956 macb_interrupt(dev->irq, dev);
957 local_irq_restore(flags);
958}
959#endif
960
961static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
962{
963 struct macb *bp = netdev_priv(dev);
964 dma_addr_t mapping;
965 unsigned int len, entry;
966 struct macb_dma_desc *desc;
967 struct macb_tx_skb *tx_skb;
968 u32 ctrl;
969 unsigned long flags;
970
971#if defined(DEBUG) && defined(VERBOSE_DEBUG)
972 netdev_vdbg(bp->dev,
973 "start_xmit: len %u head %p data %p tail %p end %p\n",
974 skb->len, skb->head, skb->data,
975 skb_tail_pointer(skb), skb_end_pointer(skb));
976 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
977 skb->data, 16, true);
978#endif
979
980 len = skb->len;
981 spin_lock_irqsave(&bp->lock, flags);
982
983
984 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
985 netif_stop_queue(dev);
986 spin_unlock_irqrestore(&bp->lock, flags);
987 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
988 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
989 bp->tx_head, bp->tx_tail);
990 return NETDEV_TX_BUSY;
991 }
992
993 entry = macb_tx_ring_wrap(bp->tx_head);
994 bp->tx_head++;
995 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
996 mapping = dma_map_single(&bp->pdev->dev, skb->data,
997 len, DMA_TO_DEVICE);
998
999 tx_skb = &bp->tx_skb[entry];
1000 tx_skb->skb = skb;
1001 tx_skb->mapping = mapping;
1002 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
1003 skb->data, (unsigned long)mapping);
1004
1005 ctrl = MACB_BF(TX_FRMLEN, len);
1006 ctrl |= MACB_BIT(TX_LAST);
1007 if (entry == (TX_RING_SIZE - 1))
1008 ctrl |= MACB_BIT(TX_WRAP);
1009
1010 desc = &bp->tx_ring[entry];
1011 desc->addr = mapping;
1012 desc->ctrl = ctrl;
1013
1014
1015 wmb();
1016
1017 skb_tx_timestamp(skb);
1018
1019 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1020
1021 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
1022 netif_stop_queue(dev);
1023
1024 spin_unlock_irqrestore(&bp->lock, flags);
1025
1026 return NETDEV_TX_OK;
1027}
1028
1029static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
1030{
1031 if (!macb_is_gem(bp)) {
1032 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
1033 } else {
1034 bp->rx_buffer_size = size;
1035
1036 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
1037 netdev_dbg(bp->dev,
1038 "RX buffer must be multiple of %d bytes, expanding\n",
1039 RX_BUFFER_MULTIPLE);
1040 bp->rx_buffer_size =
1041 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
1042 }
1043 }
1044
1045 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
1046 bp->dev->mtu, bp->rx_buffer_size);
1047}
1048
1049static void gem_free_rx_buffers(struct macb *bp)
1050{
1051 struct sk_buff *skb;
1052 struct macb_dma_desc *desc;
1053 dma_addr_t addr;
1054 int i;
1055
1056 if (!bp->rx_skbuff)
1057 return;
1058
1059 for (i = 0; i < RX_RING_SIZE; i++) {
1060 skb = bp->rx_skbuff[i];
1061
1062 if (skb == NULL)
1063 continue;
1064
1065 desc = &bp->rx_ring[i];
1066 addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1067 dma_unmap_single(&bp->pdev->dev, addr, skb->len,
1068 DMA_FROM_DEVICE);
1069 dev_kfree_skb_any(skb);
1070 skb = NULL;
1071 }
1072
1073 kfree(bp->rx_skbuff);
1074 bp->rx_skbuff = NULL;
1075}
1076
1077static void macb_free_rx_buffers(struct macb *bp)
1078{
1079 if (bp->rx_buffers) {
1080 dma_free_coherent(&bp->pdev->dev,
1081 RX_RING_SIZE * bp->rx_buffer_size,
1082 bp->rx_buffers, bp->rx_buffers_dma);
1083 bp->rx_buffers = NULL;
1084 }
1085}
1086
1087static void macb_free_consistent(struct macb *bp)
1088{
1089 if (bp->tx_skb) {
1090 kfree(bp->tx_skb);
1091 bp->tx_skb = NULL;
1092 }
1093 bp->macbgem_ops.mog_free_rx_buffers(bp);
1094 if (bp->rx_ring) {
1095 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
1096 bp->rx_ring, bp->rx_ring_dma);
1097 bp->rx_ring = NULL;
1098 }
1099 if (bp->tx_ring) {
1100 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
1101 bp->tx_ring, bp->tx_ring_dma);
1102 bp->tx_ring = NULL;
1103 }
1104}
1105
1106static int gem_alloc_rx_buffers(struct macb *bp)
1107{
1108 int size;
1109
1110 size = RX_RING_SIZE * sizeof(struct sk_buff *);
1111 bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
1112 if (!bp->rx_skbuff)
1113 return -ENOMEM;
1114 else
1115 netdev_dbg(bp->dev,
1116 "Allocated %d RX struct sk_buff entries at %p\n",
1117 RX_RING_SIZE, bp->rx_skbuff);
1118 return 0;
1119}
1120
1121static int macb_alloc_rx_buffers(struct macb *bp)
1122{
1123 int size;
1124
1125 size = RX_RING_SIZE * bp->rx_buffer_size;
1126 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
1127 &bp->rx_buffers_dma, GFP_KERNEL);
1128 if (!bp->rx_buffers)
1129 return -ENOMEM;
1130 else
1131 netdev_dbg(bp->dev,
1132 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
1133 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
1134 return 0;
1135}
1136
1137static int macb_alloc_consistent(struct macb *bp)
1138{
1139 int size;
1140
1141 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
1142 bp->tx_skb = kmalloc(size, GFP_KERNEL);
1143 if (!bp->tx_skb)
1144 goto out_err;
1145
1146 size = RX_RING_BYTES;
1147 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1148 &bp->rx_ring_dma, GFP_KERNEL);
1149 if (!bp->rx_ring)
1150 goto out_err;
1151 netdev_dbg(bp->dev,
1152 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
1153 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
1154
1155 size = TX_RING_BYTES;
1156 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1157 &bp->tx_ring_dma, GFP_KERNEL);
1158 if (!bp->tx_ring)
1159 goto out_err;
1160 netdev_dbg(bp->dev,
1161 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
1162 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
1163
1164 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
1165 goto out_err;
1166
1167 return 0;
1168
1169out_err:
1170 macb_free_consistent(bp);
1171 return -ENOMEM;
1172}
1173
1174static void gem_init_rings(struct macb *bp)
1175{
1176 int i;
1177
1178 for (i = 0; i < TX_RING_SIZE; i++) {
1179 bp->tx_ring[i].addr = 0;
1180 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1181 }
1182 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1183
1184 bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
1185
1186 gem_rx_refill(bp);
1187}
1188
1189static void macb_init_rings(struct macb *bp)
1190{
1191 int i;
1192 dma_addr_t addr;
1193
1194 addr = bp->rx_buffers_dma;
1195 for (i = 0; i < RX_RING_SIZE; i++) {
1196 bp->rx_ring[i].addr = addr;
1197 bp->rx_ring[i].ctrl = 0;
1198 addr += bp->rx_buffer_size;
1199 }
1200 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
1201
1202 for (i = 0; i < TX_RING_SIZE; i++) {
1203 bp->tx_ring[i].addr = 0;
1204 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
1205 }
1206 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
1207
1208 bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
1209}
1210
1211static void macb_reset_hw(struct macb *bp)
1212{
1213
1214
1215
1216
1217 macb_writel(bp, NCR, 0);
1218
1219
1220 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1221
1222
1223 macb_writel(bp, TSR, -1);
1224 macb_writel(bp, RSR, -1);
1225
1226
1227 macb_writel(bp, IDR, -1);
1228 macb_readl(bp, ISR);
1229}
1230
1231static u32 gem_mdc_clk_div(struct macb *bp)
1232{
1233 u32 config;
1234 unsigned long pclk_hz = clk_get_rate(bp->pclk);
1235
1236 if (pclk_hz <= 20000000)
1237 config = GEM_BF(CLK, GEM_CLK_DIV8);
1238 else if (pclk_hz <= 40000000)
1239 config = GEM_BF(CLK, GEM_CLK_DIV16);
1240 else if (pclk_hz <= 80000000)
1241 config = GEM_BF(CLK, GEM_CLK_DIV32);
1242 else if (pclk_hz <= 120000000)
1243 config = GEM_BF(CLK, GEM_CLK_DIV48);
1244 else if (pclk_hz <= 160000000)
1245 config = GEM_BF(CLK, GEM_CLK_DIV64);
1246 else
1247 config = GEM_BF(CLK, GEM_CLK_DIV96);
1248
1249 return config;
1250}
1251
1252static u32 macb_mdc_clk_div(struct macb *bp)
1253{
1254 u32 config;
1255 unsigned long pclk_hz;
1256
1257 if (macb_is_gem(bp))
1258 return gem_mdc_clk_div(bp);
1259
1260 pclk_hz = clk_get_rate(bp->pclk);
1261 if (pclk_hz <= 20000000)
1262 config = MACB_BF(CLK, MACB_CLK_DIV8);
1263 else if (pclk_hz <= 40000000)
1264 config = MACB_BF(CLK, MACB_CLK_DIV16);
1265 else if (pclk_hz <= 80000000)
1266 config = MACB_BF(CLK, MACB_CLK_DIV32);
1267 else
1268 config = MACB_BF(CLK, MACB_CLK_DIV64);
1269
1270 return config;
1271}
1272
1273
1274
1275
1276
1277
1278static u32 macb_dbw(struct macb *bp)
1279{
1280 if (!macb_is_gem(bp))
1281 return 0;
1282
1283 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1284 case 4:
1285 return GEM_BF(DBW, GEM_DBW128);
1286 case 2:
1287 return GEM_BF(DBW, GEM_DBW64);
1288 case 1:
1289 default:
1290 return GEM_BF(DBW, GEM_DBW32);
1291 }
1292}
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302static void macb_configure_dma(struct macb *bp)
1303{
1304 u32 dmacfg;
1305
1306 if (macb_is_gem(bp)) {
1307 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1308 dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
1309 dmacfg |= GEM_BF(FBLDO, 16);
1310 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1311 dmacfg &= ~GEM_BIT(ENDIA);
1312 gem_writel(bp, DMACFG, dmacfg);
1313 }
1314}
1315
1316
1317
1318
1319static void macb_configure_caps(struct macb *bp)
1320{
1321 if (macb_is_gem(bp)) {
1322 if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
1323 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
1324 }
1325}
1326
1327static void macb_init_hw(struct macb *bp)
1328{
1329 u32 config;
1330
1331 macb_reset_hw(bp);
1332 macb_set_hwaddr(bp);
1333
1334 config = macb_mdc_clk_div(bp);
1335 config |= MACB_BF(RBOF, NET_IP_ALIGN);
1336 config |= MACB_BIT(PAE);
1337 config |= MACB_BIT(DRFCS);
1338 config |= MACB_BIT(BIG);
1339 if (bp->dev->flags & IFF_PROMISC)
1340 config |= MACB_BIT(CAF);
1341 if (!(bp->dev->flags & IFF_BROADCAST))
1342 config |= MACB_BIT(NBC);
1343 config |= macb_dbw(bp);
1344 macb_writel(bp, NCFGR, config);
1345 bp->speed = SPEED_10;
1346 bp->duplex = DUPLEX_HALF;
1347
1348 macb_configure_dma(bp);
1349 macb_configure_caps(bp);
1350
1351
1352 macb_writel(bp, RBQP, bp->rx_ring_dma);
1353 macb_writel(bp, TBQP, bp->tx_ring_dma);
1354
1355
1356 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1357
1358
1359 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
1360 | MACB_TX_INT_FLAGS
1361 | MACB_BIT(HRESP)));
1362
1363}
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399static inline int hash_bit_value(int bitnr, __u8 *addr)
1400{
1401 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1402 return 1;
1403 return 0;
1404}
1405
1406
1407
1408
1409static int hash_get_index(__u8 *addr)
1410{
1411 int i, j, bitval;
1412 int hash_index = 0;
1413
1414 for (j = 0; j < 6; j++) {
1415 for (i = 0, bitval = 0; i < 8; i++)
1416 bitval ^= hash_bit_value(i*6 + j, addr);
1417
1418 hash_index |= (bitval << j);
1419 }
1420
1421 return hash_index;
1422}
1423
1424
1425
1426
1427static void macb_sethashtable(struct net_device *dev)
1428{
1429 struct netdev_hw_addr *ha;
1430 unsigned long mc_filter[2];
1431 unsigned int bitnr;
1432 struct macb *bp = netdev_priv(dev);
1433
1434 mc_filter[0] = mc_filter[1] = 0;
1435
1436 netdev_for_each_mc_addr(ha, dev) {
1437 bitnr = hash_get_index(ha->addr);
1438 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1439 }
1440
1441 macb_or_gem_writel(bp, HRB, mc_filter[0]);
1442 macb_or_gem_writel(bp, HRT, mc_filter[1]);
1443}
1444
1445
1446
1447
1448void macb_set_rx_mode(struct net_device *dev)
1449{
1450 unsigned long cfg;
1451 struct macb *bp = netdev_priv(dev);
1452
1453 cfg = macb_readl(bp, NCFGR);
1454
1455 if (dev->flags & IFF_PROMISC)
1456
1457 cfg |= MACB_BIT(CAF);
1458 else if (dev->flags & (~IFF_PROMISC))
1459
1460 cfg &= ~MACB_BIT(CAF);
1461
1462 if (dev->flags & IFF_ALLMULTI) {
1463
1464 macb_or_gem_writel(bp, HRB, -1);
1465 macb_or_gem_writel(bp, HRT, -1);
1466 cfg |= MACB_BIT(NCFGR_MTI);
1467 } else if (!netdev_mc_empty(dev)) {
1468
1469 macb_sethashtable(dev);
1470 cfg |= MACB_BIT(NCFGR_MTI);
1471 } else if (dev->flags & (~IFF_ALLMULTI)) {
1472
1473 macb_or_gem_writel(bp, HRB, 0);
1474 macb_or_gem_writel(bp, HRT, 0);
1475 cfg &= ~MACB_BIT(NCFGR_MTI);
1476 }
1477
1478 macb_writel(bp, NCFGR, cfg);
1479}
1480EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1481
1482static int macb_open(struct net_device *dev)
1483{
1484 struct macb *bp = netdev_priv(dev);
1485 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
1486 int err;
1487
1488 netdev_dbg(bp->dev, "open\n");
1489
1490
1491 netif_carrier_off(dev);
1492
1493
1494 if (!bp->phy_dev)
1495 return -EAGAIN;
1496
1497
1498 macb_init_rx_buffer_size(bp, bufsz);
1499
1500 err = macb_alloc_consistent(bp);
1501 if (err) {
1502 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1503 err);
1504 return err;
1505 }
1506
1507 napi_enable(&bp->napi);
1508
1509 bp->macbgem_ops.mog_init_rings(bp);
1510 macb_init_hw(bp);
1511
1512
1513 phy_start(bp->phy_dev);
1514
1515 netif_start_queue(dev);
1516
1517 return 0;
1518}
1519
1520static int macb_close(struct net_device *dev)
1521{
1522 struct macb *bp = netdev_priv(dev);
1523 unsigned long flags;
1524
1525 netif_stop_queue(dev);
1526 napi_disable(&bp->napi);
1527
1528 if (bp->phy_dev)
1529 phy_stop(bp->phy_dev);
1530
1531 spin_lock_irqsave(&bp->lock, flags);
1532 macb_reset_hw(bp);
1533 netif_carrier_off(dev);
1534 spin_unlock_irqrestore(&bp->lock, flags);
1535
1536 macb_free_consistent(bp);
1537
1538 return 0;
1539}
1540
1541static void gem_update_stats(struct macb *bp)
1542{
1543 u32 __iomem *reg = bp->regs + GEM_OTX;
1544 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1545 u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
1546
1547 for (; p < end; p++, reg++)
1548 *p += __raw_readl(reg);
1549}
1550
1551static struct net_device_stats *gem_get_stats(struct macb *bp)
1552{
1553 struct gem_stats *hwstat = &bp->hw_stats.gem;
1554 struct net_device_stats *nstat = &bp->stats;
1555
1556 gem_update_stats(bp);
1557
1558 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1559 hwstat->rx_alignment_errors +
1560 hwstat->rx_resource_errors +
1561 hwstat->rx_overruns +
1562 hwstat->rx_oversize_frames +
1563 hwstat->rx_jabbers +
1564 hwstat->rx_undersized_frames +
1565 hwstat->rx_length_field_frame_errors);
1566 nstat->tx_errors = (hwstat->tx_late_collisions +
1567 hwstat->tx_excessive_collisions +
1568 hwstat->tx_underrun +
1569 hwstat->tx_carrier_sense_errors);
1570 nstat->multicast = hwstat->rx_multicast_frames;
1571 nstat->collisions = (hwstat->tx_single_collision_frames +
1572 hwstat->tx_multiple_collision_frames +
1573 hwstat->tx_excessive_collisions);
1574 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1575 hwstat->rx_jabbers +
1576 hwstat->rx_undersized_frames +
1577 hwstat->rx_length_field_frame_errors);
1578 nstat->rx_over_errors = hwstat->rx_resource_errors;
1579 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1580 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1581 nstat->rx_fifo_errors = hwstat->rx_overruns;
1582 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1583 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1584 nstat->tx_fifo_errors = hwstat->tx_underrun;
1585
1586 return nstat;
1587}
1588
1589struct net_device_stats *macb_get_stats(struct net_device *dev)
1590{
1591 struct macb *bp = netdev_priv(dev);
1592 struct net_device_stats *nstat = &bp->stats;
1593 struct macb_stats *hwstat = &bp->hw_stats.macb;
1594
1595 if (macb_is_gem(bp))
1596 return gem_get_stats(bp);
1597
1598
1599 macb_update_stats(bp);
1600
1601
1602 nstat->rx_errors = (hwstat->rx_fcs_errors +
1603 hwstat->rx_align_errors +
1604 hwstat->rx_resource_errors +
1605 hwstat->rx_overruns +
1606 hwstat->rx_oversize_pkts +
1607 hwstat->rx_jabbers +
1608 hwstat->rx_undersize_pkts +
1609 hwstat->sqe_test_errors +
1610 hwstat->rx_length_mismatch);
1611 nstat->tx_errors = (hwstat->tx_late_cols +
1612 hwstat->tx_excessive_cols +
1613 hwstat->tx_underruns +
1614 hwstat->tx_carrier_errors);
1615 nstat->collisions = (hwstat->tx_single_cols +
1616 hwstat->tx_multiple_cols +
1617 hwstat->tx_excessive_cols);
1618 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1619 hwstat->rx_jabbers +
1620 hwstat->rx_undersize_pkts +
1621 hwstat->rx_length_mismatch);
1622 nstat->rx_over_errors = hwstat->rx_resource_errors +
1623 hwstat->rx_overruns;
1624 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1625 nstat->rx_frame_errors = hwstat->rx_align_errors;
1626 nstat->rx_fifo_errors = hwstat->rx_overruns;
1627
1628 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1629 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1630 nstat->tx_fifo_errors = hwstat->tx_underruns;
1631
1632
1633 return nstat;
1634}
1635EXPORT_SYMBOL_GPL(macb_get_stats);
1636
1637static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1638{
1639 struct macb *bp = netdev_priv(dev);
1640 struct phy_device *phydev = bp->phy_dev;
1641
1642 if (!phydev)
1643 return -ENODEV;
1644
1645 return phy_ethtool_gset(phydev, cmd);
1646}
1647
1648static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1649{
1650 struct macb *bp = netdev_priv(dev);
1651 struct phy_device *phydev = bp->phy_dev;
1652
1653 if (!phydev)
1654 return -ENODEV;
1655
1656 return phy_ethtool_sset(phydev, cmd);
1657}
1658
1659static int macb_get_regs_len(struct net_device *netdev)
1660{
1661 return MACB_GREGS_NBR * sizeof(u32);
1662}
1663
1664static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1665 void *p)
1666{
1667 struct macb *bp = netdev_priv(dev);
1668 unsigned int tail, head;
1669 u32 *regs_buff = p;
1670
1671 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1672 | MACB_GREGS_VERSION;
1673
1674 tail = macb_tx_ring_wrap(bp->tx_tail);
1675 head = macb_tx_ring_wrap(bp->tx_head);
1676
1677 regs_buff[0] = macb_readl(bp, NCR);
1678 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
1679 regs_buff[2] = macb_readl(bp, NSR);
1680 regs_buff[3] = macb_readl(bp, TSR);
1681 regs_buff[4] = macb_readl(bp, RBQP);
1682 regs_buff[5] = macb_readl(bp, TBQP);
1683 regs_buff[6] = macb_readl(bp, RSR);
1684 regs_buff[7] = macb_readl(bp, IMR);
1685
1686 regs_buff[8] = tail;
1687 regs_buff[9] = head;
1688 regs_buff[10] = macb_tx_dma(bp, tail);
1689 regs_buff[11] = macb_tx_dma(bp, head);
1690
1691 if (macb_is_gem(bp)) {
1692 regs_buff[12] = gem_readl(bp, USRIO);
1693 regs_buff[13] = gem_readl(bp, DMACFG);
1694 }
1695}
1696
1697const struct ethtool_ops macb_ethtool_ops = {
1698 .get_settings = macb_get_settings,
1699 .set_settings = macb_set_settings,
1700 .get_regs_len = macb_get_regs_len,
1701 .get_regs = macb_get_regs,
1702 .get_link = ethtool_op_get_link,
1703 .get_ts_info = ethtool_op_get_ts_info,
1704};
1705EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1706
1707int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1708{
1709 struct macb *bp = netdev_priv(dev);
1710 struct phy_device *phydev = bp->phy_dev;
1711
1712 if (!netif_running(dev))
1713 return -EINVAL;
1714
1715 if (!phydev)
1716 return -ENODEV;
1717
1718 return phy_mii_ioctl(phydev, rq, cmd);
1719}
1720EXPORT_SYMBOL_GPL(macb_ioctl);
1721
1722static const struct net_device_ops macb_netdev_ops = {
1723 .ndo_open = macb_open,
1724 .ndo_stop = macb_close,
1725 .ndo_start_xmit = macb_start_xmit,
1726 .ndo_set_rx_mode = macb_set_rx_mode,
1727 .ndo_get_stats = macb_get_stats,
1728 .ndo_do_ioctl = macb_ioctl,
1729 .ndo_validate_addr = eth_validate_addr,
1730 .ndo_change_mtu = eth_change_mtu,
1731 .ndo_set_mac_address = eth_mac_addr,
1732#ifdef CONFIG_NET_POLL_CONTROLLER
1733 .ndo_poll_controller = macb_poll_controller,
1734#endif
1735};
1736
1737#if defined(CONFIG_OF)
1738static const struct of_device_id macb_dt_ids[] = {
1739 { .compatible = "cdns,at32ap7000-macb" },
1740 { .compatible = "cdns,at91sam9260-macb" },
1741 { .compatible = "cdns,macb" },
1742 { .compatible = "cdns,pc302-gem" },
1743 { .compatible = "cdns,gem" },
1744 { }
1745};
1746MODULE_DEVICE_TABLE(of, macb_dt_ids);
1747#endif
1748
1749static int __init macb_probe(struct platform_device *pdev)
1750{
1751 struct macb_platform_data *pdata;
1752 struct resource *regs;
1753 struct net_device *dev;
1754 struct macb *bp;
1755 struct phy_device *phydev;
1756 u32 config;
1757 int err = -ENXIO;
1758 struct pinctrl *pinctrl;
1759 const char *mac;
1760
1761 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1762 if (!regs) {
1763 dev_err(&pdev->dev, "no mmio resource defined\n");
1764 goto err_out;
1765 }
1766
1767 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1768 if (IS_ERR(pinctrl)) {
1769 err = PTR_ERR(pinctrl);
1770 if (err == -EPROBE_DEFER)
1771 goto err_out;
1772
1773 dev_warn(&pdev->dev, "No pinctrl provided\n");
1774 }
1775
1776 err = -ENOMEM;
1777 dev = alloc_etherdev(sizeof(*bp));
1778 if (!dev)
1779 goto err_out;
1780
1781 SET_NETDEV_DEV(dev, &pdev->dev);
1782
1783
1784 dev->features |= 0;
1785
1786 bp = netdev_priv(dev);
1787 bp->pdev = pdev;
1788 bp->dev = dev;
1789
1790 spin_lock_init(&bp->lock);
1791 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
1792
1793 bp->pclk = clk_get(&pdev->dev, "pclk");
1794 if (IS_ERR(bp->pclk)) {
1795 dev_err(&pdev->dev, "failed to get macb_clk\n");
1796 goto err_out_free_dev;
1797 }
1798 clk_prepare_enable(bp->pclk);
1799
1800 bp->hclk = clk_get(&pdev->dev, "hclk");
1801 if (IS_ERR(bp->hclk)) {
1802 dev_err(&pdev->dev, "failed to get hclk\n");
1803 goto err_out_put_pclk;
1804 }
1805 clk_prepare_enable(bp->hclk);
1806
1807 bp->regs = ioremap(regs->start, resource_size(regs));
1808 if (!bp->regs) {
1809 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
1810 err = -ENOMEM;
1811 goto err_out_disable_clocks;
1812 }
1813
1814 dev->irq = platform_get_irq(pdev, 0);
1815 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
1816 if (err) {
1817 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
1818 dev->irq, err);
1819 goto err_out_iounmap;
1820 }
1821
1822 dev->netdev_ops = &macb_netdev_ops;
1823 netif_napi_add(dev, &bp->napi, macb_poll, 64);
1824 dev->ethtool_ops = &macb_ethtool_ops;
1825
1826 dev->base_addr = regs->start;
1827
1828
1829 if (macb_is_gem(bp)) {
1830 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
1831 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
1832 bp->macbgem_ops.mog_init_rings = gem_init_rings;
1833 bp->macbgem_ops.mog_rx = gem_rx;
1834 } else {
1835 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
1836 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
1837 bp->macbgem_ops.mog_init_rings = macb_init_rings;
1838 bp->macbgem_ops.mog_rx = macb_rx;
1839 }
1840
1841
1842 config = macb_mdc_clk_div(bp);
1843 config |= macb_dbw(bp);
1844 macb_writel(bp, NCFGR, config);
1845
1846 mac = of_get_mac_address(pdev->dev.of_node);
1847 if (mac)
1848 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1849 else
1850 macb_get_hwaddr(bp);
1851
1852 err = of_get_phy_mode(pdev->dev.of_node);
1853 if (err < 0) {
1854 pdata = dev_get_platdata(&pdev->dev);
1855 if (pdata && pdata->is_rmii)
1856 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
1857 else
1858 bp->phy_interface = PHY_INTERFACE_MODE_MII;
1859 } else {
1860 bp->phy_interface = err;
1861 }
1862
1863 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
1864 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
1865 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
1866#if defined(CONFIG_ARCH_AT91)
1867 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1868 MACB_BIT(CLKEN)));
1869#else
1870 macb_or_gem_writel(bp, USRIO, 0);
1871#endif
1872 else
1873#if defined(CONFIG_ARCH_AT91)
1874 macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
1875#else
1876 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1877#endif
1878
1879 err = register_netdev(dev);
1880 if (err) {
1881 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1882 goto err_out_free_irq;
1883 }
1884
1885 err = macb_mii_init(bp);
1886 if (err)
1887 goto err_out_unregister_netdev;
1888
1889 platform_set_drvdata(pdev, dev);
1890
1891 netif_carrier_off(dev);
1892
1893 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
1894 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
1895 dev->irq, dev->dev_addr);
1896
1897 phydev = bp->phy_dev;
1898 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
1899 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
1900
1901 return 0;
1902
1903err_out_unregister_netdev:
1904 unregister_netdev(dev);
1905err_out_free_irq:
1906 free_irq(dev->irq, dev);
1907err_out_iounmap:
1908 iounmap(bp->regs);
1909err_out_disable_clocks:
1910 clk_disable_unprepare(bp->hclk);
1911 clk_put(bp->hclk);
1912 clk_disable_unprepare(bp->pclk);
1913err_out_put_pclk:
1914 clk_put(bp->pclk);
1915err_out_free_dev:
1916 free_netdev(dev);
1917err_out:
1918 return err;
1919}
1920
1921static int __exit macb_remove(struct platform_device *pdev)
1922{
1923 struct net_device *dev;
1924 struct macb *bp;
1925
1926 dev = platform_get_drvdata(pdev);
1927
1928 if (dev) {
1929 bp = netdev_priv(dev);
1930 if (bp->phy_dev)
1931 phy_disconnect(bp->phy_dev);
1932 mdiobus_unregister(bp->mii_bus);
1933 kfree(bp->mii_bus->irq);
1934 mdiobus_free(bp->mii_bus);
1935 unregister_netdev(dev);
1936 free_irq(dev->irq, dev);
1937 iounmap(bp->regs);
1938 clk_disable_unprepare(bp->hclk);
1939 clk_put(bp->hclk);
1940 clk_disable_unprepare(bp->pclk);
1941 clk_put(bp->pclk);
1942 free_netdev(dev);
1943 }
1944
1945 return 0;
1946}
1947
1948#ifdef CONFIG_PM
1949static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1950{
1951 struct net_device *netdev = platform_get_drvdata(pdev);
1952 struct macb *bp = netdev_priv(netdev);
1953
1954 netif_carrier_off(netdev);
1955 netif_device_detach(netdev);
1956
1957 clk_disable_unprepare(bp->hclk);
1958 clk_disable_unprepare(bp->pclk);
1959
1960 return 0;
1961}
1962
1963static int macb_resume(struct platform_device *pdev)
1964{
1965 struct net_device *netdev = platform_get_drvdata(pdev);
1966 struct macb *bp = netdev_priv(netdev);
1967
1968 clk_prepare_enable(bp->pclk);
1969 clk_prepare_enable(bp->hclk);
1970
1971 netif_device_attach(netdev);
1972
1973 return 0;
1974}
1975#else
1976#define macb_suspend NULL
1977#define macb_resume NULL
1978#endif
1979
1980static struct platform_driver macb_driver = {
1981 .remove = __exit_p(macb_remove),
1982 .suspend = macb_suspend,
1983 .resume = macb_resume,
1984 .driver = {
1985 .name = "macb",
1986 .owner = THIS_MODULE,
1987 .of_match_table = of_match_ptr(macb_dt_ids),
1988 },
1989};
1990
1991module_platform_driver_probe(macb_driver, macb_probe);
1992
1993MODULE_LICENSE("GPL");
1994MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
1995MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1996MODULE_ALIAS("platform:macb");
1997