1
2
3
4
5
6
7
8
9
10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12#include <linux/clk.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/circ_buf.h>
18#include <linux/slab.h>
19#include <linux/init.h>
20#include <linux/gpio.h>
21#include <linux/interrupt.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/dma-mapping.h>
25#include <linux/platform_data/macb.h>
26#include <linux/platform_device.h>
27#include <linux/phy.h>
28#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/of_net.h>
31#include <linux/pinctrl/consumer.h>
32
33#include "macb.h"
34
35#define RX_BUFFER_SIZE 128
36#define RX_RING_SIZE 512
37#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
38
39#define TX_RING_SIZE 128
40#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE)
41
42
43#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4)
44
45#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
46 | MACB_BIT(ISR_ROVR))
47#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
48 | MACB_BIT(ISR_RLE) \
49 | MACB_BIT(TXERR))
50#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
51
52
53
54
55
56#define MACB_HALT_TIMEOUT 1230
57
58
59static unsigned int macb_tx_ring_wrap(unsigned int index)
60{
61 return index & (TX_RING_SIZE - 1);
62}
63
64static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
65{
66 return &bp->tx_ring[macb_tx_ring_wrap(index)];
67}
68
69static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
70{
71 return &bp->tx_skb[macb_tx_ring_wrap(index)];
72}
73
74static dma_addr_t macb_tx_dma(struct macb *bp, unsigned int index)
75{
76 dma_addr_t offset;
77
78 offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc);
79
80 return bp->tx_ring_dma + offset;
81}
82
83static unsigned int macb_rx_ring_wrap(unsigned int index)
84{
85 return index & (RX_RING_SIZE - 1);
86}
87
88static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
89{
90 return &bp->rx_ring[macb_rx_ring_wrap(index)];
91}
92
93static void *macb_rx_buffer(struct macb *bp, unsigned int index)
94{
95 return bp->rx_buffers + RX_BUFFER_SIZE * macb_rx_ring_wrap(index);
96}
97
98void macb_set_hwaddr(struct macb *bp)
99{
100 u32 bottom;
101 u16 top;
102
103 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
104 macb_or_gem_writel(bp, SA1B, bottom);
105 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
106 macb_or_gem_writel(bp, SA1T, top);
107
108
109 macb_or_gem_writel(bp, SA2B, 0);
110 macb_or_gem_writel(bp, SA2T, 0);
111 macb_or_gem_writel(bp, SA3B, 0);
112 macb_or_gem_writel(bp, SA3T, 0);
113 macb_or_gem_writel(bp, SA4B, 0);
114 macb_or_gem_writel(bp, SA4T, 0);
115}
116EXPORT_SYMBOL_GPL(macb_set_hwaddr);
117
118void macb_get_hwaddr(struct macb *bp)
119{
120 struct macb_platform_data *pdata;
121 u32 bottom;
122 u16 top;
123 u8 addr[6];
124 int i;
125
126 pdata = bp->pdev->dev.platform_data;
127
128
129 for (i = 0; i < 4; i++) {
130 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
131 top = macb_or_gem_readl(bp, SA1T + i * 8);
132
133 if (pdata && pdata->rev_eth_addr) {
134 addr[5] = bottom & 0xff;
135 addr[4] = (bottom >> 8) & 0xff;
136 addr[3] = (bottom >> 16) & 0xff;
137 addr[2] = (bottom >> 24) & 0xff;
138 addr[1] = top & 0xff;
139 addr[0] = (top & 0xff00) >> 8;
140 } else {
141 addr[0] = bottom & 0xff;
142 addr[1] = (bottom >> 8) & 0xff;
143 addr[2] = (bottom >> 16) & 0xff;
144 addr[3] = (bottom >> 24) & 0xff;
145 addr[4] = top & 0xff;
146 addr[5] = (top >> 8) & 0xff;
147 }
148
149 if (is_valid_ether_addr(addr)) {
150 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
151 return;
152 }
153 }
154
155 netdev_info(bp->dev, "invalid hw address, using random\n");
156 eth_hw_addr_random(bp->dev);
157}
158EXPORT_SYMBOL_GPL(macb_get_hwaddr);
159
160static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
161{
162 struct macb *bp = bus->priv;
163 int value;
164
165 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
166 | MACB_BF(RW, MACB_MAN_READ)
167 | MACB_BF(PHYA, mii_id)
168 | MACB_BF(REGA, regnum)
169 | MACB_BF(CODE, MACB_MAN_CODE)));
170
171
172 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
173 cpu_relax();
174
175 value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
176
177 return value;
178}
179
180static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
181 u16 value)
182{
183 struct macb *bp = bus->priv;
184
185 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
186 | MACB_BF(RW, MACB_MAN_WRITE)
187 | MACB_BF(PHYA, mii_id)
188 | MACB_BF(REGA, regnum)
189 | MACB_BF(CODE, MACB_MAN_CODE)
190 | MACB_BF(DATA, value)));
191
192
193 while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
194 cpu_relax();
195
196 return 0;
197}
198
199static int macb_mdio_reset(struct mii_bus *bus)
200{
201 return 0;
202}
203
204static void macb_handle_link_change(struct net_device *dev)
205{
206 struct macb *bp = netdev_priv(dev);
207 struct phy_device *phydev = bp->phy_dev;
208 unsigned long flags;
209
210 int status_change = 0;
211
212 spin_lock_irqsave(&bp->lock, flags);
213
214 if (phydev->link) {
215 if ((bp->speed != phydev->speed) ||
216 (bp->duplex != phydev->duplex)) {
217 u32 reg;
218
219 reg = macb_readl(bp, NCFGR);
220 reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
221 if (macb_is_gem(bp))
222 reg &= ~GEM_BIT(GBE);
223
224 if (phydev->duplex)
225 reg |= MACB_BIT(FD);
226 if (phydev->speed == SPEED_100)
227 reg |= MACB_BIT(SPD);
228 if (phydev->speed == SPEED_1000)
229 reg |= GEM_BIT(GBE);
230
231 macb_or_gem_writel(bp, NCFGR, reg);
232
233 bp->speed = phydev->speed;
234 bp->duplex = phydev->duplex;
235 status_change = 1;
236 }
237 }
238
239 if (phydev->link != bp->link) {
240 if (!phydev->link) {
241 bp->speed = 0;
242 bp->duplex = -1;
243 }
244 bp->link = phydev->link;
245
246 status_change = 1;
247 }
248
249 spin_unlock_irqrestore(&bp->lock, flags);
250
251 if (status_change) {
252 if (phydev->link) {
253 netif_carrier_on(dev);
254 netdev_info(dev, "link up (%d/%s)\n",
255 phydev->speed,
256 phydev->duplex == DUPLEX_FULL ?
257 "Full" : "Half");
258 } else {
259 netif_carrier_off(dev);
260 netdev_info(dev, "link down\n");
261 }
262 }
263}
264
265
266static int macb_mii_probe(struct net_device *dev)
267{
268 struct macb *bp = netdev_priv(dev);
269 struct macb_platform_data *pdata;
270 struct phy_device *phydev;
271 int phy_irq;
272 int ret;
273
274 phydev = phy_find_first(bp->mii_bus);
275 if (!phydev) {
276 netdev_err(dev, "no PHY found\n");
277 return -1;
278 }
279
280 pdata = dev_get_platdata(&bp->pdev->dev);
281 if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
282 ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
283 if (!ret) {
284 phy_irq = gpio_to_irq(pdata->phy_irq_pin);
285 phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
286 }
287 }
288
289
290 ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
291 bp->phy_interface);
292 if (ret) {
293 netdev_err(dev, "Could not attach to PHY\n");
294 return ret;
295 }
296
297
298 if (macb_is_gem(bp))
299 phydev->supported &= PHY_GBIT_FEATURES;
300 else
301 phydev->supported &= PHY_BASIC_FEATURES;
302
303 phydev->advertising = phydev->supported;
304
305 bp->link = 0;
306 bp->speed = 0;
307 bp->duplex = -1;
308 bp->phy_dev = phydev;
309
310 return 0;
311}
312
313int macb_mii_init(struct macb *bp)
314{
315 struct macb_platform_data *pdata;
316 int err = -ENXIO, i;
317
318
319 macb_writel(bp, NCR, MACB_BIT(MPE));
320
321 bp->mii_bus = mdiobus_alloc();
322 if (bp->mii_bus == NULL) {
323 err = -ENOMEM;
324 goto err_out;
325 }
326
327 bp->mii_bus->name = "MACB_mii_bus";
328 bp->mii_bus->read = &macb_mdio_read;
329 bp->mii_bus->write = &macb_mdio_write;
330 bp->mii_bus->reset = &macb_mdio_reset;
331 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
332 bp->pdev->name, bp->pdev->id);
333 bp->mii_bus->priv = bp;
334 bp->mii_bus->parent = &bp->dev->dev;
335 pdata = bp->pdev->dev.platform_data;
336
337 if (pdata)
338 bp->mii_bus->phy_mask = pdata->phy_mask;
339
340 bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
341 if (!bp->mii_bus->irq) {
342 err = -ENOMEM;
343 goto err_out_free_mdiobus;
344 }
345
346 for (i = 0; i < PHY_MAX_ADDR; i++)
347 bp->mii_bus->irq[i] = PHY_POLL;
348
349 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
350
351 if (mdiobus_register(bp->mii_bus))
352 goto err_out_free_mdio_irq;
353
354 if (macb_mii_probe(bp->dev) != 0) {
355 goto err_out_unregister_bus;
356 }
357
358 return 0;
359
360err_out_unregister_bus:
361 mdiobus_unregister(bp->mii_bus);
362err_out_free_mdio_irq:
363 kfree(bp->mii_bus->irq);
364err_out_free_mdiobus:
365 mdiobus_free(bp->mii_bus);
366err_out:
367 return err;
368}
369EXPORT_SYMBOL_GPL(macb_mii_init);
370
371static void macb_update_stats(struct macb *bp)
372{
373 u32 __iomem *reg = bp->regs + MACB_PFR;
374 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
375 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
376
377 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
378
379 for(; p < end; p++, reg++)
380 *p += __raw_readl(reg);
381}
382
383static int macb_halt_tx(struct macb *bp)
384{
385 unsigned long halt_time, timeout;
386 u32 status;
387
388 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
389
390 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
391 do {
392 halt_time = jiffies;
393 status = macb_readl(bp, TSR);
394 if (!(status & MACB_BIT(TGO)))
395 return 0;
396
397 usleep_range(10, 250);
398 } while (time_before(halt_time, timeout));
399
400 return -ETIMEDOUT;
401}
402
403static void macb_tx_error_task(struct work_struct *work)
404{
405 struct macb *bp = container_of(work, struct macb, tx_error_task);
406 struct macb_tx_skb *tx_skb;
407 struct sk_buff *skb;
408 unsigned int tail;
409
410 netdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
411 bp->tx_tail, bp->tx_head);
412
413
414 netif_stop_queue(bp->dev);
415
416
417
418
419
420 if (macb_halt_tx(bp))
421
422 netdev_err(bp->dev, "BUG: halt tx timed out\n");
423
424
425
426
427
428
429
430 for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
431 struct macb_dma_desc *desc;
432 u32 ctrl;
433
434 desc = macb_tx_desc(bp, tail);
435 ctrl = desc->ctrl;
436 tx_skb = macb_tx_skb(bp, tail);
437 skb = tx_skb->skb;
438
439 if (ctrl & MACB_BIT(TX_USED)) {
440 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
441 macb_tx_ring_wrap(tail), skb->data);
442 bp->stats.tx_packets++;
443 bp->stats.tx_bytes += skb->len;
444 } else {
445
446
447
448
449
450 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
451 netdev_err(bp->dev,
452 "BUG: TX buffers exhausted mid-frame\n");
453
454 desc->ctrl = ctrl | MACB_BIT(TX_USED);
455 }
456
457 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
458 DMA_TO_DEVICE);
459 tx_skb->skb = NULL;
460 dev_kfree_skb(skb);
461 }
462
463
464 wmb();
465
466
467 macb_writel(bp, TBQP, bp->tx_ring_dma);
468
469 bp->tx_head = bp->tx_tail = 0;
470
471
472 netif_wake_queue(bp->dev);
473
474
475 macb_writel(bp, TSR, macb_readl(bp, TSR));
476 macb_writel(bp, IER, MACB_TX_INT_FLAGS);
477}
478
479static void macb_tx_interrupt(struct macb *bp)
480{
481 unsigned int tail;
482 unsigned int head;
483 u32 status;
484
485 status = macb_readl(bp, TSR);
486 macb_writel(bp, TSR, status);
487
488 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
489 macb_writel(bp, ISR, MACB_BIT(TCOMP));
490
491 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
492 (unsigned long)status);
493
494 head = bp->tx_head;
495 for (tail = bp->tx_tail; tail != head; tail++) {
496 struct macb_tx_skb *tx_skb;
497 struct sk_buff *skb;
498 struct macb_dma_desc *desc;
499 u32 ctrl;
500
501 desc = macb_tx_desc(bp, tail);
502
503
504 rmb();
505
506 ctrl = desc->ctrl;
507
508 if (!(ctrl & MACB_BIT(TX_USED)))
509 break;
510
511 tx_skb = macb_tx_skb(bp, tail);
512 skb = tx_skb->skb;
513
514 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
515 macb_tx_ring_wrap(tail), skb->data);
516 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
517 DMA_TO_DEVICE);
518 bp->stats.tx_packets++;
519 bp->stats.tx_bytes += skb->len;
520 tx_skb->skb = NULL;
521 dev_kfree_skb_irq(skb);
522 }
523
524 bp->tx_tail = tail;
525 if (netif_queue_stopped(bp->dev)
526 && CIRC_CNT(bp->tx_head, bp->tx_tail,
527 TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
528 netif_wake_queue(bp->dev);
529}
530
531static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
532 unsigned int last_frag)
533{
534 unsigned int len;
535 unsigned int frag;
536 unsigned int offset;
537 struct sk_buff *skb;
538 struct macb_dma_desc *desc;
539
540 desc = macb_rx_desc(bp, last_frag);
541 len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
542
543 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
544 macb_rx_ring_wrap(first_frag),
545 macb_rx_ring_wrap(last_frag), len);
546
547
548
549
550
551
552
553
554
555
556 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
557 if (!skb) {
558 bp->stats.rx_dropped++;
559 for (frag = first_frag; ; frag++) {
560 desc = macb_rx_desc(bp, frag);
561 desc->addr &= ~MACB_BIT(RX_USED);
562 if (frag == last_frag)
563 break;
564 }
565
566
567 wmb();
568
569 return 1;
570 }
571
572 offset = 0;
573 len += NET_IP_ALIGN;
574 skb_checksum_none_assert(skb);
575 skb_put(skb, len);
576
577 for (frag = first_frag; ; frag++) {
578 unsigned int frag_len = RX_BUFFER_SIZE;
579
580 if (offset + frag_len > len) {
581 BUG_ON(frag != last_frag);
582 frag_len = len - offset;
583 }
584 skb_copy_to_linear_data_offset(skb, offset,
585 macb_rx_buffer(bp, frag), frag_len);
586 offset += RX_BUFFER_SIZE;
587 desc = macb_rx_desc(bp, frag);
588 desc->addr &= ~MACB_BIT(RX_USED);
589
590 if (frag == last_frag)
591 break;
592 }
593
594
595 wmb();
596
597 __skb_pull(skb, NET_IP_ALIGN);
598 skb->protocol = eth_type_trans(skb, bp->dev);
599
600 bp->stats.rx_packets++;
601 bp->stats.rx_bytes += skb->len;
602 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
603 skb->len, skb->csum);
604 netif_receive_skb(skb);
605
606 return 0;
607}
608
609
610static void discard_partial_frame(struct macb *bp, unsigned int begin,
611 unsigned int end)
612{
613 unsigned int frag;
614
615 for (frag = begin; frag != end; frag++) {
616 struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
617 desc->addr &= ~MACB_BIT(RX_USED);
618 }
619
620
621 wmb();
622
623
624
625
626
627
628}
629
630static int macb_rx(struct macb *bp, int budget)
631{
632 int received = 0;
633 unsigned int tail;
634 int first_frag = -1;
635
636 for (tail = bp->rx_tail; budget > 0; tail++) {
637 struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
638 u32 addr, ctrl;
639
640
641 rmb();
642
643 addr = desc->addr;
644 ctrl = desc->ctrl;
645
646 if (!(addr & MACB_BIT(RX_USED)))
647 break;
648
649 if (ctrl & MACB_BIT(RX_SOF)) {
650 if (first_frag != -1)
651 discard_partial_frame(bp, first_frag, tail);
652 first_frag = tail;
653 }
654
655 if (ctrl & MACB_BIT(RX_EOF)) {
656 int dropped;
657 BUG_ON(first_frag == -1);
658
659 dropped = macb_rx_frame(bp, first_frag, tail);
660 first_frag = -1;
661 if (!dropped) {
662 received++;
663 budget--;
664 }
665 }
666 }
667
668 if (first_frag != -1)
669 bp->rx_tail = first_frag;
670 else
671 bp->rx_tail = tail;
672
673 return received;
674}
675
676static int macb_poll(struct napi_struct *napi, int budget)
677{
678 struct macb *bp = container_of(napi, struct macb, napi);
679 int work_done;
680 u32 status;
681
682 status = macb_readl(bp, RSR);
683 macb_writel(bp, RSR, status);
684
685 work_done = 0;
686
687 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
688 (unsigned long)status, budget);
689
690 work_done = macb_rx(bp, budget);
691 if (work_done < budget) {
692 napi_complete(napi);
693
694
695
696
697
698 macb_writel(bp, IER, MACB_RX_INT_FLAGS);
699
700
701 status = macb_readl(bp, RSR);
702 if (unlikely(status))
703 napi_reschedule(napi);
704 }
705
706
707
708 return work_done;
709}
710
711static irqreturn_t macb_interrupt(int irq, void *dev_id)
712{
713 struct net_device *dev = dev_id;
714 struct macb *bp = netdev_priv(dev);
715 u32 status;
716
717 status = macb_readl(bp, ISR);
718
719 if (unlikely(!status))
720 return IRQ_NONE;
721
722 spin_lock(&bp->lock);
723
724 while (status) {
725
726 if (unlikely(!netif_running(dev))) {
727 macb_writel(bp, IDR, -1);
728 break;
729 }
730
731 netdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
732
733 if (status & MACB_RX_INT_FLAGS) {
734
735
736
737
738
739
740
741 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
742 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
743 macb_writel(bp, ISR, MACB_BIT(RCOMP));
744
745 if (napi_schedule_prep(&bp->napi)) {
746 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
747 __napi_schedule(&bp->napi);
748 }
749 }
750
751 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
752 macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
753 schedule_work(&bp->tx_error_task);
754 break;
755 }
756
757 if (status & MACB_BIT(TCOMP))
758 macb_tx_interrupt(bp);
759
760
761
762
763
764
765 if (status & MACB_BIT(ISR_ROVR)) {
766
767 if (macb_is_gem(bp))
768 bp->hw_stats.gem.rx_overruns++;
769 else
770 bp->hw_stats.macb.rx_overruns++;
771 }
772
773 if (status & MACB_BIT(HRESP)) {
774
775
776
777
778
779 netdev_err(dev, "DMA bus error: HRESP not OK\n");
780 }
781
782 status = macb_readl(bp, ISR);
783 }
784
785 spin_unlock(&bp->lock);
786
787 return IRQ_HANDLED;
788}
789
790#ifdef CONFIG_NET_POLL_CONTROLLER
791
792
793
794
795static void macb_poll_controller(struct net_device *dev)
796{
797 unsigned long flags;
798
799 local_irq_save(flags);
800 macb_interrupt(dev->irq, dev);
801 local_irq_restore(flags);
802}
803#endif
804
805static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
806{
807 struct macb *bp = netdev_priv(dev);
808 dma_addr_t mapping;
809 unsigned int len, entry;
810 struct macb_dma_desc *desc;
811 struct macb_tx_skb *tx_skb;
812 u32 ctrl;
813 unsigned long flags;
814
815#if defined(DEBUG) && defined(VERBOSE_DEBUG)
816 netdev_vdbg(bp->dev,
817 "start_xmit: len %u head %p data %p tail %p end %p\n",
818 skb->len, skb->head, skb->data,
819 skb_tail_pointer(skb), skb_end_pointer(skb));
820 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
821 skb->data, 16, true);
822#endif
823
824 len = skb->len;
825 spin_lock_irqsave(&bp->lock, flags);
826
827
828 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
829 netif_stop_queue(dev);
830 spin_unlock_irqrestore(&bp->lock, flags);
831 netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
832 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
833 bp->tx_head, bp->tx_tail);
834 return NETDEV_TX_BUSY;
835 }
836
837 entry = macb_tx_ring_wrap(bp->tx_head);
838 bp->tx_head++;
839 netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
840 mapping = dma_map_single(&bp->pdev->dev, skb->data,
841 len, DMA_TO_DEVICE);
842
843 tx_skb = &bp->tx_skb[entry];
844 tx_skb->skb = skb;
845 tx_skb->mapping = mapping;
846 netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
847 skb->data, (unsigned long)mapping);
848
849 ctrl = MACB_BF(TX_FRMLEN, len);
850 ctrl |= MACB_BIT(TX_LAST);
851 if (entry == (TX_RING_SIZE - 1))
852 ctrl |= MACB_BIT(TX_WRAP);
853
854 desc = &bp->tx_ring[entry];
855 desc->addr = mapping;
856 desc->ctrl = ctrl;
857
858
859 wmb();
860
861 skb_tx_timestamp(skb);
862
863 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
864
865 if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
866 netif_stop_queue(dev);
867
868 spin_unlock_irqrestore(&bp->lock, flags);
869
870 return NETDEV_TX_OK;
871}
872
873static void macb_free_consistent(struct macb *bp)
874{
875 if (bp->tx_skb) {
876 kfree(bp->tx_skb);
877 bp->tx_skb = NULL;
878 }
879 if (bp->rx_ring) {
880 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
881 bp->rx_ring, bp->rx_ring_dma);
882 bp->rx_ring = NULL;
883 }
884 if (bp->tx_ring) {
885 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
886 bp->tx_ring, bp->tx_ring_dma);
887 bp->tx_ring = NULL;
888 }
889 if (bp->rx_buffers) {
890 dma_free_coherent(&bp->pdev->dev,
891 RX_RING_SIZE * RX_BUFFER_SIZE,
892 bp->rx_buffers, bp->rx_buffers_dma);
893 bp->rx_buffers = NULL;
894 }
895}
896
897static int macb_alloc_consistent(struct macb *bp)
898{
899 int size;
900
901 size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
902 bp->tx_skb = kmalloc(size, GFP_KERNEL);
903 if (!bp->tx_skb)
904 goto out_err;
905
906 size = RX_RING_BYTES;
907 bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
908 &bp->rx_ring_dma, GFP_KERNEL);
909 if (!bp->rx_ring)
910 goto out_err;
911 netdev_dbg(bp->dev,
912 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
913 size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
914
915 size = TX_RING_BYTES;
916 bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
917 &bp->tx_ring_dma, GFP_KERNEL);
918 if (!bp->tx_ring)
919 goto out_err;
920 netdev_dbg(bp->dev,
921 "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
922 size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
923
924 size = RX_RING_SIZE * RX_BUFFER_SIZE;
925 bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
926 &bp->rx_buffers_dma, GFP_KERNEL);
927 if (!bp->rx_buffers)
928 goto out_err;
929 netdev_dbg(bp->dev,
930 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
931 size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
932
933 return 0;
934
935out_err:
936 macb_free_consistent(bp);
937 return -ENOMEM;
938}
939
940static void macb_init_rings(struct macb *bp)
941{
942 int i;
943 dma_addr_t addr;
944
945 addr = bp->rx_buffers_dma;
946 for (i = 0; i < RX_RING_SIZE; i++) {
947 bp->rx_ring[i].addr = addr;
948 bp->rx_ring[i].ctrl = 0;
949 addr += RX_BUFFER_SIZE;
950 }
951 bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
952
953 for (i = 0; i < TX_RING_SIZE; i++) {
954 bp->tx_ring[i].addr = 0;
955 bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
956 }
957 bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
958
959 bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
960}
961
962static void macb_reset_hw(struct macb *bp)
963{
964
965
966
967
968 macb_writel(bp, NCR, 0);
969
970
971 macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
972
973
974 macb_writel(bp, TSR, -1);
975 macb_writel(bp, RSR, -1);
976
977
978 macb_writel(bp, IDR, -1);
979 macb_readl(bp, ISR);
980}
981
982static u32 gem_mdc_clk_div(struct macb *bp)
983{
984 u32 config;
985 unsigned long pclk_hz = clk_get_rate(bp->pclk);
986
987 if (pclk_hz <= 20000000)
988 config = GEM_BF(CLK, GEM_CLK_DIV8);
989 else if (pclk_hz <= 40000000)
990 config = GEM_BF(CLK, GEM_CLK_DIV16);
991 else if (pclk_hz <= 80000000)
992 config = GEM_BF(CLK, GEM_CLK_DIV32);
993 else if (pclk_hz <= 120000000)
994 config = GEM_BF(CLK, GEM_CLK_DIV48);
995 else if (pclk_hz <= 160000000)
996 config = GEM_BF(CLK, GEM_CLK_DIV64);
997 else
998 config = GEM_BF(CLK, GEM_CLK_DIV96);
999
1000 return config;
1001}
1002
1003static u32 macb_mdc_clk_div(struct macb *bp)
1004{
1005 u32 config;
1006 unsigned long pclk_hz;
1007
1008 if (macb_is_gem(bp))
1009 return gem_mdc_clk_div(bp);
1010
1011 pclk_hz = clk_get_rate(bp->pclk);
1012 if (pclk_hz <= 20000000)
1013 config = MACB_BF(CLK, MACB_CLK_DIV8);
1014 else if (pclk_hz <= 40000000)
1015 config = MACB_BF(CLK, MACB_CLK_DIV16);
1016 else if (pclk_hz <= 80000000)
1017 config = MACB_BF(CLK, MACB_CLK_DIV32);
1018 else
1019 config = MACB_BF(CLK, MACB_CLK_DIV64);
1020
1021 return config;
1022}
1023
1024
1025
1026
1027
1028
1029static u32 macb_dbw(struct macb *bp)
1030{
1031 if (!macb_is_gem(bp))
1032 return 0;
1033
1034 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
1035 case 4:
1036 return GEM_BF(DBW, GEM_DBW128);
1037 case 2:
1038 return GEM_BF(DBW, GEM_DBW64);
1039 case 1:
1040 default:
1041 return GEM_BF(DBW, GEM_DBW32);
1042 }
1043}
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053static void macb_configure_dma(struct macb *bp)
1054{
1055 u32 dmacfg;
1056
1057 if (macb_is_gem(bp)) {
1058 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
1059 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
1060 dmacfg |= GEM_BF(FBLDO, 16);
1061 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1062 dmacfg &= ~GEM_BIT(ENDIA);
1063 gem_writel(bp, DMACFG, dmacfg);
1064 }
1065}
1066
1067
1068
1069
1070static void macb_configure_caps(struct macb *bp)
1071{
1072 if (macb_is_gem(bp)) {
1073 if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
1074 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
1075 }
1076}
1077
1078static void macb_init_hw(struct macb *bp)
1079{
1080 u32 config;
1081
1082 macb_reset_hw(bp);
1083 macb_set_hwaddr(bp);
1084
1085 config = macb_mdc_clk_div(bp);
1086 config |= MACB_BF(RBOF, NET_IP_ALIGN);
1087 config |= MACB_BIT(PAE);
1088 config |= MACB_BIT(DRFCS);
1089 config |= MACB_BIT(BIG);
1090 if (bp->dev->flags & IFF_PROMISC)
1091 config |= MACB_BIT(CAF);
1092 if (!(bp->dev->flags & IFF_BROADCAST))
1093 config |= MACB_BIT(NBC);
1094 config |= macb_dbw(bp);
1095 macb_writel(bp, NCFGR, config);
1096 bp->speed = SPEED_10;
1097 bp->duplex = DUPLEX_HALF;
1098
1099 macb_configure_dma(bp);
1100 macb_configure_caps(bp);
1101
1102
1103 macb_writel(bp, RBQP, bp->rx_ring_dma);
1104 macb_writel(bp, TBQP, bp->tx_ring_dma);
1105
1106
1107 macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1108
1109
1110 macb_writel(bp, IER, (MACB_RX_INT_FLAGS
1111 | MACB_TX_INT_FLAGS
1112 | MACB_BIT(HRESP)));
1113
1114}
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150static inline int hash_bit_value(int bitnr, __u8 *addr)
1151{
1152 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
1153 return 1;
1154 return 0;
1155}
1156
1157
1158
1159
1160static int hash_get_index(__u8 *addr)
1161{
1162 int i, j, bitval;
1163 int hash_index = 0;
1164
1165 for (j = 0; j < 6; j++) {
1166 for (i = 0, bitval = 0; i < 8; i++)
1167 bitval ^= hash_bit_value(i*6 + j, addr);
1168
1169 hash_index |= (bitval << j);
1170 }
1171
1172 return hash_index;
1173}
1174
1175
1176
1177
1178static void macb_sethashtable(struct net_device *dev)
1179{
1180 struct netdev_hw_addr *ha;
1181 unsigned long mc_filter[2];
1182 unsigned int bitnr;
1183 struct macb *bp = netdev_priv(dev);
1184
1185 mc_filter[0] = mc_filter[1] = 0;
1186
1187 netdev_for_each_mc_addr(ha, dev) {
1188 bitnr = hash_get_index(ha->addr);
1189 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1190 }
1191
1192 macb_or_gem_writel(bp, HRB, mc_filter[0]);
1193 macb_or_gem_writel(bp, HRT, mc_filter[1]);
1194}
1195
1196
1197
1198
1199void macb_set_rx_mode(struct net_device *dev)
1200{
1201 unsigned long cfg;
1202 struct macb *bp = netdev_priv(dev);
1203
1204 cfg = macb_readl(bp, NCFGR);
1205
1206 if (dev->flags & IFF_PROMISC)
1207
1208 cfg |= MACB_BIT(CAF);
1209 else if (dev->flags & (~IFF_PROMISC))
1210
1211 cfg &= ~MACB_BIT(CAF);
1212
1213 if (dev->flags & IFF_ALLMULTI) {
1214
1215 macb_or_gem_writel(bp, HRB, -1);
1216 macb_or_gem_writel(bp, HRT, -1);
1217 cfg |= MACB_BIT(NCFGR_MTI);
1218 } else if (!netdev_mc_empty(dev)) {
1219
1220 macb_sethashtable(dev);
1221 cfg |= MACB_BIT(NCFGR_MTI);
1222 } else if (dev->flags & (~IFF_ALLMULTI)) {
1223
1224 macb_or_gem_writel(bp, HRB, 0);
1225 macb_or_gem_writel(bp, HRT, 0);
1226 cfg &= ~MACB_BIT(NCFGR_MTI);
1227 }
1228
1229 macb_writel(bp, NCFGR, cfg);
1230}
1231EXPORT_SYMBOL_GPL(macb_set_rx_mode);
1232
1233static int macb_open(struct net_device *dev)
1234{
1235 struct macb *bp = netdev_priv(dev);
1236 int err;
1237
1238 netdev_dbg(bp->dev, "open\n");
1239
1240
1241 netif_carrier_off(dev);
1242
1243
1244 if (!bp->phy_dev)
1245 return -EAGAIN;
1246
1247 err = macb_alloc_consistent(bp);
1248 if (err) {
1249 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
1250 err);
1251 return err;
1252 }
1253
1254 napi_enable(&bp->napi);
1255
1256 macb_init_rings(bp);
1257 macb_init_hw(bp);
1258
1259
1260 phy_start(bp->phy_dev);
1261
1262 netif_start_queue(dev);
1263
1264 return 0;
1265}
1266
1267static int macb_close(struct net_device *dev)
1268{
1269 struct macb *bp = netdev_priv(dev);
1270 unsigned long flags;
1271
1272 netif_stop_queue(dev);
1273 napi_disable(&bp->napi);
1274
1275 if (bp->phy_dev)
1276 phy_stop(bp->phy_dev);
1277
1278 spin_lock_irqsave(&bp->lock, flags);
1279 macb_reset_hw(bp);
1280 netif_carrier_off(dev);
1281 spin_unlock_irqrestore(&bp->lock, flags);
1282
1283 macb_free_consistent(bp);
1284
1285 return 0;
1286}
1287
1288static void gem_update_stats(struct macb *bp)
1289{
1290 u32 __iomem *reg = bp->regs + GEM_OTX;
1291 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1292 u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
1293
1294 for (; p < end; p++, reg++)
1295 *p += __raw_readl(reg);
1296}
1297
1298static struct net_device_stats *gem_get_stats(struct macb *bp)
1299{
1300 struct gem_stats *hwstat = &bp->hw_stats.gem;
1301 struct net_device_stats *nstat = &bp->stats;
1302
1303 gem_update_stats(bp);
1304
1305 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
1306 hwstat->rx_alignment_errors +
1307 hwstat->rx_resource_errors +
1308 hwstat->rx_overruns +
1309 hwstat->rx_oversize_frames +
1310 hwstat->rx_jabbers +
1311 hwstat->rx_undersized_frames +
1312 hwstat->rx_length_field_frame_errors);
1313 nstat->tx_errors = (hwstat->tx_late_collisions +
1314 hwstat->tx_excessive_collisions +
1315 hwstat->tx_underrun +
1316 hwstat->tx_carrier_sense_errors);
1317 nstat->multicast = hwstat->rx_multicast_frames;
1318 nstat->collisions = (hwstat->tx_single_collision_frames +
1319 hwstat->tx_multiple_collision_frames +
1320 hwstat->tx_excessive_collisions);
1321 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
1322 hwstat->rx_jabbers +
1323 hwstat->rx_undersized_frames +
1324 hwstat->rx_length_field_frame_errors);
1325 nstat->rx_over_errors = hwstat->rx_resource_errors;
1326 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
1327 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
1328 nstat->rx_fifo_errors = hwstat->rx_overruns;
1329 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
1330 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
1331 nstat->tx_fifo_errors = hwstat->tx_underrun;
1332
1333 return nstat;
1334}
1335
1336struct net_device_stats *macb_get_stats(struct net_device *dev)
1337{
1338 struct macb *bp = netdev_priv(dev);
1339 struct net_device_stats *nstat = &bp->stats;
1340 struct macb_stats *hwstat = &bp->hw_stats.macb;
1341
1342 if (macb_is_gem(bp))
1343 return gem_get_stats(bp);
1344
1345
1346 macb_update_stats(bp);
1347
1348
1349 nstat->rx_errors = (hwstat->rx_fcs_errors +
1350 hwstat->rx_align_errors +
1351 hwstat->rx_resource_errors +
1352 hwstat->rx_overruns +
1353 hwstat->rx_oversize_pkts +
1354 hwstat->rx_jabbers +
1355 hwstat->rx_undersize_pkts +
1356 hwstat->sqe_test_errors +
1357 hwstat->rx_length_mismatch);
1358 nstat->tx_errors = (hwstat->tx_late_cols +
1359 hwstat->tx_excessive_cols +
1360 hwstat->tx_underruns +
1361 hwstat->tx_carrier_errors);
1362 nstat->collisions = (hwstat->tx_single_cols +
1363 hwstat->tx_multiple_cols +
1364 hwstat->tx_excessive_cols);
1365 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1366 hwstat->rx_jabbers +
1367 hwstat->rx_undersize_pkts +
1368 hwstat->rx_length_mismatch);
1369 nstat->rx_over_errors = hwstat->rx_resource_errors +
1370 hwstat->rx_overruns;
1371 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
1372 nstat->rx_frame_errors = hwstat->rx_align_errors;
1373 nstat->rx_fifo_errors = hwstat->rx_overruns;
1374
1375 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
1376 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
1377 nstat->tx_fifo_errors = hwstat->tx_underruns;
1378
1379
1380 return nstat;
1381}
1382EXPORT_SYMBOL_GPL(macb_get_stats);
1383
1384static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1385{
1386 struct macb *bp = netdev_priv(dev);
1387 struct phy_device *phydev = bp->phy_dev;
1388
1389 if (!phydev)
1390 return -ENODEV;
1391
1392 return phy_ethtool_gset(phydev, cmd);
1393}
1394
1395static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1396{
1397 struct macb *bp = netdev_priv(dev);
1398 struct phy_device *phydev = bp->phy_dev;
1399
1400 if (!phydev)
1401 return -ENODEV;
1402
1403 return phy_ethtool_sset(phydev, cmd);
1404}
1405
1406static int macb_get_regs_len(struct net_device *netdev)
1407{
1408 return MACB_GREGS_NBR * sizeof(u32);
1409}
1410
1411static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1412 void *p)
1413{
1414 struct macb *bp = netdev_priv(dev);
1415 unsigned int tail, head;
1416 u32 *regs_buff = p;
1417
1418 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
1419 | MACB_GREGS_VERSION;
1420
1421 tail = macb_tx_ring_wrap(bp->tx_tail);
1422 head = macb_tx_ring_wrap(bp->tx_head);
1423
1424 regs_buff[0] = macb_readl(bp, NCR);
1425 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
1426 regs_buff[2] = macb_readl(bp, NSR);
1427 regs_buff[3] = macb_readl(bp, TSR);
1428 regs_buff[4] = macb_readl(bp, RBQP);
1429 regs_buff[5] = macb_readl(bp, TBQP);
1430 regs_buff[6] = macb_readl(bp, RSR);
1431 regs_buff[7] = macb_readl(bp, IMR);
1432
1433 regs_buff[8] = tail;
1434 regs_buff[9] = head;
1435 regs_buff[10] = macb_tx_dma(bp, tail);
1436 regs_buff[11] = macb_tx_dma(bp, head);
1437
1438 if (macb_is_gem(bp)) {
1439 regs_buff[12] = gem_readl(bp, USRIO);
1440 regs_buff[13] = gem_readl(bp, DMACFG);
1441 }
1442}
1443
1444const struct ethtool_ops macb_ethtool_ops = {
1445 .get_settings = macb_get_settings,
1446 .set_settings = macb_set_settings,
1447 .get_regs_len = macb_get_regs_len,
1448 .get_regs = macb_get_regs,
1449 .get_link = ethtool_op_get_link,
1450 .get_ts_info = ethtool_op_get_ts_info,
1451};
1452EXPORT_SYMBOL_GPL(macb_ethtool_ops);
1453
1454int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1455{
1456 struct macb *bp = netdev_priv(dev);
1457 struct phy_device *phydev = bp->phy_dev;
1458
1459 if (!netif_running(dev))
1460 return -EINVAL;
1461
1462 if (!phydev)
1463 return -ENODEV;
1464
1465 return phy_mii_ioctl(phydev, rq, cmd);
1466}
1467EXPORT_SYMBOL_GPL(macb_ioctl);
1468
1469static const struct net_device_ops macb_netdev_ops = {
1470 .ndo_open = macb_open,
1471 .ndo_stop = macb_close,
1472 .ndo_start_xmit = macb_start_xmit,
1473 .ndo_set_rx_mode = macb_set_rx_mode,
1474 .ndo_get_stats = macb_get_stats,
1475 .ndo_do_ioctl = macb_ioctl,
1476 .ndo_validate_addr = eth_validate_addr,
1477 .ndo_change_mtu_rh74 = eth_change_mtu,
1478 .ndo_set_mac_address = eth_mac_addr,
1479#ifdef CONFIG_NET_POLL_CONTROLLER
1480 .ndo_poll_controller = macb_poll_controller,
1481#endif
1482};
1483
1484#if defined(CONFIG_OF)
1485static const struct of_device_id macb_dt_ids[] = {
1486 { .compatible = "cdns,at32ap7000-macb" },
1487 { .compatible = "cdns,at91sam9260-macb" },
1488 { .compatible = "cdns,macb" },
1489 { .compatible = "cdns,pc302-gem" },
1490 { .compatible = "cdns,gem" },
1491 { }
1492};
1493MODULE_DEVICE_TABLE(of, macb_dt_ids);
1494#endif
1495
1496static int __init macb_probe(struct platform_device *pdev)
1497{
1498 struct macb_platform_data *pdata;
1499 struct resource *regs;
1500 struct net_device *dev;
1501 struct macb *bp;
1502 struct phy_device *phydev;
1503 u32 config;
1504 int err = -ENXIO;
1505 struct pinctrl *pinctrl;
1506 const char *mac;
1507
1508 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1509 if (!regs) {
1510 dev_err(&pdev->dev, "no mmio resource defined\n");
1511 goto err_out;
1512 }
1513
1514 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
1515 if (IS_ERR(pinctrl)) {
1516 err = PTR_ERR(pinctrl);
1517 if (err == -EPROBE_DEFER)
1518 goto err_out;
1519
1520 dev_warn(&pdev->dev, "No pinctrl provided\n");
1521 }
1522
1523 err = -ENOMEM;
1524 dev = alloc_etherdev(sizeof(*bp));
1525 if (!dev)
1526 goto err_out;
1527
1528 SET_NETDEV_DEV(dev, &pdev->dev);
1529
1530
1531 dev->features |= 0;
1532
1533 bp = netdev_priv(dev);
1534 bp->pdev = pdev;
1535 bp->dev = dev;
1536
1537 spin_lock_init(&bp->lock);
1538 INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
1539
1540 bp->pclk = clk_get(&pdev->dev, "pclk");
1541 if (IS_ERR(bp->pclk)) {
1542 dev_err(&pdev->dev, "failed to get macb_clk\n");
1543 goto err_out_free_dev;
1544 }
1545 clk_prepare_enable(bp->pclk);
1546
1547 bp->hclk = clk_get(&pdev->dev, "hclk");
1548 if (IS_ERR(bp->hclk)) {
1549 dev_err(&pdev->dev, "failed to get hclk\n");
1550 goto err_out_put_pclk;
1551 }
1552 clk_prepare_enable(bp->hclk);
1553
1554 bp->regs = ioremap(regs->start, resource_size(regs));
1555 if (!bp->regs) {
1556 dev_err(&pdev->dev, "failed to map registers, aborting.\n");
1557 err = -ENOMEM;
1558 goto err_out_disable_clocks;
1559 }
1560
1561 dev->irq = platform_get_irq(pdev, 0);
1562 err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
1563 if (err) {
1564 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
1565 dev->irq, err);
1566 goto err_out_iounmap;
1567 }
1568
1569 dev->netdev_ops = &macb_netdev_ops;
1570 netif_napi_add(dev, &bp->napi, macb_poll, 64);
1571 dev->ethtool_ops = &macb_ethtool_ops;
1572
1573 dev->base_addr = regs->start;
1574
1575
1576 config = macb_mdc_clk_div(bp);
1577 config |= macb_dbw(bp);
1578 macb_writel(bp, NCFGR, config);
1579
1580 mac = of_get_mac_address(pdev->dev.of_node);
1581 if (mac)
1582 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1583 else
1584 macb_get_hwaddr(bp);
1585
1586 err = of_get_phy_mode(pdev->dev.of_node);
1587 if (err < 0) {
1588 pdata = pdev->dev.platform_data;
1589 if (pdata && pdata->is_rmii)
1590 bp->phy_interface = PHY_INTERFACE_MODE_RMII;
1591 else
1592 bp->phy_interface = PHY_INTERFACE_MODE_MII;
1593 } else {
1594 bp->phy_interface = err;
1595 }
1596
1597 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
1598 macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
1599 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
1600#if defined(CONFIG_ARCH_AT91)
1601 macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
1602 MACB_BIT(CLKEN)));
1603#else
1604 macb_or_gem_writel(bp, USRIO, 0);
1605#endif
1606 else
1607#if defined(CONFIG_ARCH_AT91)
1608 macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
1609#else
1610 macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
1611#endif
1612
1613 err = register_netdev(dev);
1614 if (err) {
1615 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1616 goto err_out_free_irq;
1617 }
1618
1619 err = macb_mii_init(bp);
1620 if (err)
1621 goto err_out_unregister_netdev;
1622
1623 platform_set_drvdata(pdev, dev);
1624
1625 netif_carrier_off(dev);
1626
1627 netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
1628 macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
1629 dev->irq, dev->dev_addr);
1630
1631 phydev = bp->phy_dev;
1632 phy_attached_info(phydev);
1633
1634 return 0;
1635
1636err_out_unregister_netdev:
1637 unregister_netdev(dev);
1638err_out_free_irq:
1639 free_irq(dev->irq, dev);
1640err_out_iounmap:
1641 iounmap(bp->regs);
1642err_out_disable_clocks:
1643 clk_disable_unprepare(bp->hclk);
1644 clk_put(bp->hclk);
1645 clk_disable_unprepare(bp->pclk);
1646err_out_put_pclk:
1647 clk_put(bp->pclk);
1648err_out_free_dev:
1649 free_netdev(dev);
1650err_out:
1651 platform_set_drvdata(pdev, NULL);
1652 return err;
1653}
1654
1655static int __exit macb_remove(struct platform_device *pdev)
1656{
1657 struct net_device *dev;
1658 struct macb *bp;
1659
1660 dev = platform_get_drvdata(pdev);
1661
1662 if (dev) {
1663 bp = netdev_priv(dev);
1664 if (bp->phy_dev)
1665 phy_disconnect(bp->phy_dev);
1666 mdiobus_unregister(bp->mii_bus);
1667 kfree(bp->mii_bus->irq);
1668 mdiobus_free(bp->mii_bus);
1669 unregister_netdev(dev);
1670 free_irq(dev->irq, dev);
1671 iounmap(bp->regs);
1672 clk_disable_unprepare(bp->hclk);
1673 clk_put(bp->hclk);
1674 clk_disable_unprepare(bp->pclk);
1675 clk_put(bp->pclk);
1676 free_netdev(dev);
1677 platform_set_drvdata(pdev, NULL);
1678 }
1679
1680 return 0;
1681}
1682
1683#ifdef CONFIG_PM
1684static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1685{
1686 struct net_device *netdev = platform_get_drvdata(pdev);
1687 struct macb *bp = netdev_priv(netdev);
1688
1689 netif_carrier_off(netdev);
1690 netif_device_detach(netdev);
1691
1692 clk_disable_unprepare(bp->hclk);
1693 clk_disable_unprepare(bp->pclk);
1694
1695 return 0;
1696}
1697
1698static int macb_resume(struct platform_device *pdev)
1699{
1700 struct net_device *netdev = platform_get_drvdata(pdev);
1701 struct macb *bp = netdev_priv(netdev);
1702
1703 clk_prepare_enable(bp->pclk);
1704 clk_prepare_enable(bp->hclk);
1705
1706 netif_device_attach(netdev);
1707
1708 return 0;
1709}
1710#else
1711#define macb_suspend NULL
1712#define macb_resume NULL
1713#endif
1714
1715static struct platform_driver macb_driver = {
1716 .remove = __exit_p(macb_remove),
1717 .suspend = macb_suspend,
1718 .resume = macb_resume,
1719 .driver = {
1720 .name = "macb",
1721 .owner = THIS_MODULE,
1722 .of_match_table = of_match_ptr(macb_dt_ids),
1723 },
1724};
1725
1726module_platform_driver_probe(macb_driver, macb_probe);
1727
1728MODULE_LICENSE("GPL");
1729MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
1730MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1731MODULE_ALIAS("platform:macb");
1732